diff --git "a/6212.jsonl" "b/6212.jsonl" new file mode 100644--- /dev/null +++ "b/6212.jsonl" @@ -0,0 +1,697 @@ +{"seq_id":"541465639","text":"import sys\n\n\nhits = 0\nhits_50 = 0\nsegments = 0\nwith open(sys.argv[1]) as f:\n line = next(f)[:-1].split(\"\\t\")\n sys.stderr.write(\"Getting summary for field: %s\\n\" % line[5])\n for line in f:\n line = line[:-1].split(\"\\t\")\n variantId = line[5]\n \n if variantId:\n variantId = variantId.split(\";\")\n \n hits += 1\n segments += 1\n\nsys.stderr.write(\"SUMMARY:\\n\")\nsys.stderr.write(\"Regions that overlap a DGV entry: %d/%d (%2.2f %%)\\n\" % (hits, segments, float(hits)*100/float(segments)))\n\n\n","sub_path":"genome/cnvnator/listtest/dgv/dgv_stats.py","file_name":"dgv_stats.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"631879517","text":"#! /usr/bin/env python\n# Copyright (C) 2011 OpenStack, LLC.\n# Copyright (c) 2012 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n# manage_projects.py reads a config file called projects.ini\n# It should look like:\n\n# [projects]\n# homepage=http://openstack.org\n# gerrit-host=review.openstack.org\n# local-git-dir=/var/lib/git\n# gerrit-key=/home/gerrit2/review_site/etc/ssh_host_rsa_key\n# gerrit-committer=Project Creator \n# gerrit-replicate=True\n# has-github=True\n# has-wiki=False\n# has-issues=False\n# has-downloads=False\n# acl-dir=/home/gerrit2/acls\n# acl-base=/home/gerrit2/acls/project.config\n#\n# manage_projects.py reads a project listing file called projects.yaml\n# It should look like:\n# - project: PROJECT_NAME\n# options:\n# - has-wiki\n# - has-issues\n# - has-downloads\n# - has-pull-requests\n# - track-upstream\n# homepage: Some homepage that isn't http://openstack.org\n# description: This is a great project\n# upstream: https://gerrit.googlesource.com/gerrit\n# upstream-prefix: upstream\n# acl-config: /path/to/gerrit/project.config\n# acl-append:\n# - /path/to/gerrit/project.config\n# acl-parameters:\n# project: OTHER_PROJECT_NAME\n\nimport argparse\nimport ConfigParser\nimport logging\nimport os\nimport re\nimport shlex\nimport subprocess\nimport tempfile\nimport time\nfrom gitlab import Gitlab\n\nimport gerritlib.gerrit\nimport github\n\nimport jeepyb.gerritdb\nimport jeepyb.log as l\nimport jeepyb.utils as u\n\nregistry = u.ProjectsRegistry()\n\nlog = logging.getLogger(\"manage_projects\")\n\n\nclass FetchConfigException(Exception):\n pass\n\n\nclass CopyACLException(Exception):\n pass\n\n\nclass CreateGroupException(Exception):\n pass\n\n\ndef run_command(cmd, status=False, env=None):\n env = env or {}\n cmd_list = shlex.split(str(cmd))\n newenv = os.environ\n newenv.update(env)\n log.info(\"Executing command: %s\" % \" \".join(cmd_list))\n p = subprocess.Popen(cmd_list, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, env=newenv)\n (out, nothing) = p.communicate()\n log.debug(\"Return code: %s\" % p.returncode)\n log.debug(\"Command said: %s\" % out.strip())\n if status:\n return (p.returncode, out.strip())\n return out.strip()\n\n\ndef run_command_status(cmd, env=None):\n env = env or {}\n return run_command(cmd, True, env)\n\n\ndef git_command(repo_dir, sub_cmd, env=None):\n env = env or {}\n git_dir = os.path.join(repo_dir, '.git')\n cmd = \"git --git-dir=%s --work-tree=%s %s\" % (git_dir, repo_dir, sub_cmd)\n status, _ = run_command(cmd, True, env)\n return status\n\n\ndef git_command_output(repo_dir, sub_cmd, env=None):\n env = env or {}\n git_dir = os.path.join(repo_dir, '.git')\n cmd = \"git --git-dir=%s --work-tree=%s %s\" % (git_dir, repo_dir, sub_cmd)\n status, out = run_command(cmd, True, env)\n return (status, out)\n\n\ndef fetch_config(project, remote_url, repo_path, env=None):\n env = env or {}\n # Poll for refs/meta/config as gerrit may not have written it out for\n # us yet.\n for x in range(1):\n status = git_command(repo_path, \"fetch %s +refs/meta/config:\"\n \"refs/remotes/gerrit-meta/config\" %\n remote_url, env)\n if status == 0:\n break\n else:\n log.debug(\"Failed to fetch refs/meta/config for project: %s\" %\n project)\n time.sleep(2)\n if status != 0:\n log.error(\"Failed to fetch refs/meta/config for project: %s\" % project)\n raise FetchConfigException()\n\n # Poll for project.config as gerrit may not have committed an empty\n # one yet.\n output = \"\"\n for x in range(10):\n status = git_command(repo_path, \"remote update --prune\", env)\n if status != 0:\n log.error(\"Failed to update remote: %s\" % remote_url)\n time.sleep(2)\n continue\n else:\n status, output = git_command_output(\n repo_path, \"ls-files --with-tree=remotes/gerrit-meta/config \"\n \"project.config\", env)\n if output.strip() != \"project.config\" or status != 0:\n log.debug(\"Failed to find project.config for project: %s\" %\n project)\n time.sleep(2)\n else:\n break\n if output.strip() != \"project.config\" or status != 0:\n log.error(\"Failed to find project.config for project: %s\" % project)\n # raise FetchConfigException()\n\n # Because the following fails if executed more than once you should only\n # run fetch_config once in each repo.\n status = git_command(repo_path, \"checkout -B config \"\n \"remotes/gerrit-meta/config\")\n if status != 0:\n log.error(\"Failed to checkout config for project: %s\" % project)\n raise FetchConfigException()\n\n\ndef copy_acl_config(project, repo_path, acl_config):\n if not os.path.exists(acl_config):\n raise CopyACLException()\n\n acl_dest = os.path.join(repo_path, \"project.config\")\n run_command(\"touch project.config\")\n git_command(repo_path, \"add project.config\")\n status, _ = run_command(\"cp %s %s\" %\n (acl_config, acl_dest), status=True)\n if status != 0:\n raise CopyACLException()\n\n status = git_command(repo_path, \"diff --quiet\")\n return status != 0\n\n\ndef push_acl_config(project, remote_url, repo_path, gitid, env=None):\n env = env or {}\n cmd = \"commit -a -m'Update project config.' \"\n status = git_command(repo_path, cmd)\n if status != 0:\n log.error(\"Failed to commit config for project: %s\" % project)\n return False\n status, out = git_command_output(repo_path,\n \"push %s HEAD:refs/meta/config\" %\n remote_url, env)\n if status != 0:\n log.error(\"Failed to push config for project: %s\" % project)\n return False\n return True\n\n\ndef _get_group_uuid(group):\n \"\"\"Wait for up to 10 seconds for the group to be created in the DB.\"\"\"\n query = \"SELECT group_uuid FROM account_groups WHERE name = %s\"\n con = jeepyb.gerritdb.connect()\n for x in range(10):\n cursor = con.cursor()\n cursor.execute(query, (group,))\n data = cursor.fetchone()\n cursor.close()\n con.commit()\n if data:\n return data[0]\n time.sleep(1)\n return None\n\n\ndef get_group_uuid(gerrit, group):\n uuid = _get_group_uuid(group)\n if uuid:\n return uuid\n gerrit.createGroup(group)\n uuid = _get_group_uuid(group)\n if uuid:\n return uuid\n return None\n\n\ndef create_groups_file(project, gerrit, repo_path):\n acl_config = os.path.join(repo_path, \"project.config\")\n group_file = os.path.join(repo_path, \"groups\")\n uuids = {}\n for line in open(acl_config, 'r'):\n r = re.match(r'^.*\\sgroup\\s+(.*)$', line)\n if r:\n group = r.group(1)\n if group in uuids.keys():\n continue\n uuid = get_group_uuid(gerrit, group)\n if uuid:\n uuids[group] = uuid\n else:\n log.error(\"Unable to get UUID for group %s.\" % group)\n raise CreateGroupException()\n if uuids:\n with open(group_file, 'w') as fp:\n for group, uuid in uuids.items():\n fp.write(\"%s\\t%s\\n\" % (uuid, group))\n status = git_command(repo_path, \"add groups\")\n if status != 0:\n log.error(\"Failed to add groups file for project: %s\" % project)\n raise CreateGroupException()\n\n\ndef make_ssh_wrapper(gerrit_user, gerrit_key):\n (fd, name) = tempfile.mkstemp(text=True)\n os.write(fd, '#!/bin/bash\\n')\n os.write(fd,\n 'ssh -i %s -l %s -o \"StrictHostKeyChecking no\" $@\\n' %\n (gerrit_key, gerrit_user))\n os.close(fd)\n os.chmod(name, 0o755)\n return dict(GIT_SSH=name)\n\n\ndef create_gitlab_project(\n default_has_issues, default_has_wiki, gitlab_secure_config,\n options, project, description, homepage):\n\n created = False\n has_issues = 'has-issues' in options or default_has_issues\n has_wiki = 'has-wiki' in options or default_has_wiki\n\n secure_config = ConfigParser.ConfigParser()\n secure_config.read(gitlab_secure_config)\n\n # Project creation doesn't work via oauth\n glab = Gitlab(secure_config.get(\"gitlab\", \"url\"),\n secure_config.get(\"gitlab\", \"key\"))\n glab.auth()\n orgs = glab.Group()\n orgs_dict = dict(zip([o.name.lower() for o in orgs], orgs))\n\n # Find the project's repo\n project_split = project.split('/', 1)\n org_name = project_split[0]\n if len(project_split) > 1:\n repo_name = project_split[1]\n else:\n repo_name = project\n org_name = 'ustack'\n\n try:\n org = orgs_dict[org_name.lower()]\n except Exception:\n # we do not have control of this github org ignore the project.\n return False\n if glab.search_projects(repo_name):\n return created\n\n project_info = {'name': repo_name, 'namespace_id': org.id,\n 'wiki_enabled': has_wiki, 'description': description,\n 'issues_enabled': has_issues}\n glab.Project(project_info).save()\n created = True\n return created\n\n\ndef create_github_project(\n default_has_issues, default_has_downloads, default_has_wiki,\n github_secure_config, options, project, description, homepage):\n created = False\n has_issues = 'has-issues' in options or default_has_issues\n has_downloads = 'has-downloads' in options or default_has_downloads\n has_wiki = 'has-wiki' in options or default_has_wiki\n\n secure_config = ConfigParser.ConfigParser()\n secure_config.read(github_secure_config)\n\n # Project creation doesn't work via oauth\n ghub = github.Github(secure_config.get(\"github\", \"username\"),\n secure_config.get(\"github\", \"password\"))\n orgs = ghub.get_user().get_orgs()\n orgs_dict = dict(zip([o.login.lower() for o in orgs], orgs))\n\n # Find the project's repo\n project_split = project.split('/', 1)\n org_name = project_split[0]\n if len(project_split) > 1:\n repo_name = project_split[1]\n else:\n repo_name = project\n\n try:\n org = orgs_dict[org_name.lower()]\n except Exception:\n # we do not have control of this github org ignore the project.\n return False\n try:\n repo = org.get_repo(repo_name)\n except github.GithubException:\n repo = org.create_repo(repo_name,\n homepage=homepage,\n has_issues=has_issues,\n has_downloads=has_downloads,\n has_wiki=has_wiki)\n if description:\n repo.edit(repo_name, description=description)\n if homepage:\n repo.edit(repo_name, homepage=homepage)\n repo.edit(repo_name, has_issues=has_issues,\n has_downloads=has_downloads,\n has_wiki=has_wiki)\n\n if 'gerrit' not in [team.name for team in repo.get_teams()]:\n teams = org.get_teams()\n teams_dict = dict(zip([t.name.lower() for t in teams], teams))\n teams_dict['gerrit'].add_to_repos(repo)\n created = True\n\n return created\n\n\n# TODO(mordred): Inspect repo_dir:master for a description\n# override\ndef find_description_override(repo_path):\n return None\n\n\ndef make_local_copy(repo_path, project, project_list,\n git_opts, ssh_env, upstream, GERRIT_HOST, GERRIT_PORT,\n project_git, GERRIT_GITID):\n\n # Ensure that the base location exists\n if not os.path.exists(os.path.dirname(repo_path)):\n os.makedirs(os.path.dirname(repo_path))\n\n # Three choices\n # - If gerrit has it, get from gerrit\n # - If gerrit doesn't have it:\n # - If it has an upstream, clone that\n # - If it doesn't, create it\n\n # Gerrit knows about the project, clone it\n # TODO(mordred): there is a possible failure condition here\n # we should consider 'gerrit has it' to be\n # 'gerrit repo has a master branch'\n if project in project_list:\n run_command(\n \"git clone %(remote_url)s %(repo_path)s\" % git_opts,\n env=ssh_env)\n if upstream:\n git_command(\n repo_path,\n \"remote add -f upstream %(upstream)s\" % git_opts)\n return None\n\n # Gerrit doesn't have it, but it has an upstream configured\n # We're probably importing it for the first time, clone\n # upstream, but then ongoing we want gerrit to ge origin\n # and upstream to be only there for ongoing tracking\n # purposes, so rename origin to upstream and add a new\n # origin remote that points at gerrit\n elif upstream:\n run_command(\n \"git clone %(upstream)s %(repo_path)s\" % git_opts,\n env=ssh_env)\n git_command(\n repo_path,\n \"fetch origin +refs/heads/*:refs/copy/heads/*\",\n env=ssh_env)\n git_command(repo_path, \"remote rename origin upstream\")\n git_command(\n repo_path,\n \"remote add origin %(remote_url)s\" % git_opts)\n return \"push %s +refs/copy/heads/*:refs/heads/*\"\n\n # Neither gerrit has it, nor does it have an upstream,\n # just create a whole new one\n else:\n run_command(\"git init %s\" % repo_path)\n git_command(\n repo_path,\n \"remote add origin %(remote_url)s\" % git_opts)\n with open(os.path.join(repo_path,\n \".gitreview\"),\n 'w') as gitreview:\n gitreview.write(\"\"\"[gerrit]\nhost=%s\nport=%s\nproject=%s\n\"\"\" % (GERRIT_HOST, GERRIT_PORT, project_git))\n git_command(repo_path, \"add .gitreview\")\n cmd = (\"commit -a -m'Added .gitreview' \")\n git_command(repo_path, cmd)\n return \"push %s HEAD:refs/heads/master\"\n\n\ndef update_local_copy(repo_path, track_upstream, git_opts, ssh_env):\n # first do a clean of the branch to prevent possible\n # problems due to previous runs\n git_command(repo_path, \"clean -fdx\")\n\n has_upstream_remote = (\n 'upstream' in git_command_output(repo_path, 'remote')[1])\n if track_upstream:\n # If we're configured to track upstream but the repo\n # does not have an upstream remote, add one\n if not has_upstream_remote:\n git_command(\n repo_path,\n \"remote add upstream %(upstream)s\" % git_opts)\n\n # If we're configured to track upstream, make sure that\n # the upstream URL matches the config\n else:\n git_command(\n repo_path,\n \"remote set-url upstream %(upstream)s\" % git_opts)\n\n # Now that we have any upstreams configured, fetch all of the refs\n # we might need, pruning remote branches that no longer exist\n git_command(\n repo_path, \"remote update --prune\", env=ssh_env)\n else:\n # If we are not tracking upstream, then we do not need\n # an upstream remote configured\n if has_upstream_remote:\n git_command(repo_path, \"remote rm upstream\")\n\n # TODO(mordred): This is here so that later we can\n # inspect the master branch for meta-info\n # Checkout master and reset to the state of origin/master\n git_command(repo_path, \"checkout -B master\")\n git_command(repo_path, \"pull origin master\")\n\n\ndef push_to_gerrit(repo_path, project, push_string, remote_url, ssh_env):\n try:\n git_command(repo_path, push_string % remote_url, env=ssh_env)\n git_command(repo_path, \"push --tags %s\" % remote_url, env=ssh_env)\n except Exception:\n log.exception(\n \"Error pushing %s to Gerrit.\" % project)\n\n\ndef sync_upstream(repo_path, project, ssh_env, upstream_prefix):\n git_command(\n repo_path,\n \"remote update upstream --prune\", env=ssh_env)\n # Any branch that exists in the upstream remote, we want\n # a local branch of, optionally prefixed with the\n # upstream prefix value\n for branch in git_command_output(\n repo_path, \"branch -a\")[1].split('\\n'):\n if not branch.strip().startswith(\"remotes/upstream\"):\n continue\n if \"->\" in branch:\n continue\n local_branch = branch.split()[0][len('remotes/upstream/'):]\n if upstream_prefix:\n local_branch = \"%s/%s\" % (\n upstream_prefix, local_branch)\n\n # Check out an up to date copy of the branch, so that\n # we can push it and it will get picked up below\n git_command(repo_path, \"checkout -B %s %s\" % (\n local_branch, branch))\n\n try:\n # Push all of the local branches to similarly named\n # Branches on gerrit. Also, push all of the tags\n git_command(\n repo_path,\n \"push origin refs/heads/*:refs/heads/*\",\n env=ssh_env)\n git_command(repo_path, 'push origin --tags', env=ssh_env)\n except Exception:\n log.exception(\n \"Error pushing %s to Gerrit.\" % project)\n\n\ndef process_acls(acl_config, project, ACL_DIR, section,\n remote_url, repo_path, ssh_env, gerrit, GERRIT_GITID):\n if not os.path.isfile(acl_config):\n return\n try:\n fetch_config(project, remote_url, repo_path, ssh_env)\n if not copy_acl_config(project, repo_path, acl_config):\n # nothing was copied, so we're done\n return\n create_groups_file(project, gerrit, repo_path)\n push_acl_config(project, remote_url, repo_path,\n GERRIT_GITID, ssh_env)\n except Exception:\n log.exception(\n \"Exception processing ACLS for %s.\" % project)\n finally:\n git_command(repo_path, 'reset --hard')\n git_command(repo_path, 'checkout master')\n git_command(repo_path, 'branch -D config')\n\n\ndef create_gerrit_project(project, project_list, gerrit):\n if project not in project_list:\n try:\n gerrit.createProject(project, empty_repo=True)\n return True\n except Exception:\n log.exception(\n \"Exception creating %s in Gerrit.\" % project)\n raise\n return False\n\n\ndef create_local_mirror(local_git_dir, project_git,\n gerrit_system_user, gerrit_system_group):\n\n git_mirror_path = os.path.join(local_git_dir, project_git)\n if not os.path.exists(git_mirror_path):\n (ret, output) = run_command_status(\n \"git --bare init %s\" % git_mirror_path)\n if ret:\n run_command(\"rm -rf git_mirror_path\")\n raise Exception(output)\n run_command(\"chown -R %s:%s %s\"\n % (gerrit_system_user, gerrit_system_group,\n git_mirror_path))\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Manage projects')\n l.setup_logging_arguments(parser)\n parser.add_argument('--nocleanup', action='store_true',\n help='do not remove temp directories')\n parser.add_argument('projects', metavar='project', nargs='*',\n help='name of project(s) to process')\n args = parser.parse_args()\n l.configure_logging(args)\n\n default_has_github = registry.get_defaults('has-github', True)\n default_has_gitlab = registry.get_defaults('has-gitlab', True)\n\n LOCAL_GIT_DIR = registry.get_defaults('local-git-dir', '/var/lib/git')\n JEEPYB_CACHE_DIR = registry.get_defaults('jeepyb-cache-dir',\n '/var/lib/jeepyb')\n ACL_DIR = registry.get_defaults('acl-dir')\n GERRIT_HOST = registry.get_defaults('gerrit-host')\n GERRIT_PORT = int(registry.get_defaults('gerrit-port', '29418'))\n GERRIT_USER = registry.get_defaults('gerrit-user')\n GERRIT_KEY = registry.get_defaults('gerrit-key')\n GERRIT_GITID = registry.get_defaults('gerrit-committer')\n GERRIT_REPLICATE = registry.get_defaults('gerrit-replicate', True)\n GERRIT_SYSTEM_USER = registry.get_defaults('gerrit-system-user', 'gerrit2')\n GERRIT_SYSTEM_GROUP = registry.get_defaults('gerrit-system-group',\n 'gerrit2')\n DEFAULT_HOMEPAGE = registry.get_defaults('homepage')\n DEFAULT_HAS_ISSUES = registry.get_defaults('has-issues', False)\n DEFAULT_HAS_DOWNLOADS = registry.get_defaults('has-downloads', False)\n DEFAULT_HAS_WIKI = registry.get_defaults('has-wiki', False)\n GITHUB_SECURE_CONFIG = registry.get_defaults(\n 'github-config',\n '/etc/github/github-projects.secure.config')\n GITLAB_SECURE_CONFIG = registry.get_defaults(\n 'gitlab-config',\n '/etc/gitlab/gitlab-projects.secure.config')\n gerrit = gerritlib.gerrit.Gerrit(GERRIT_HOST,\n GERRIT_USER,\n GERRIT_PORT,\n GERRIT_KEY)\n project_list = gerrit.listProjects()\n ssh_env = make_ssh_wrapper(GERRIT_USER, GERRIT_KEY)\n try:\n\n for section in registry.configs_list:\n project = section['project']\n if args.projects and project not in args.projects:\n continue\n\n try:\n log.info(\"Processing project: %s\" % project)\n\n # Figure out all of the options\n options = section.get('options', dict())\n description = section.get('description', None)\n homepage = section.get('homepage', DEFAULT_HOMEPAGE)\n upstream = section.get('upstream', None)\n upstream_prefix = section.get('upstream-prefix', None)\n track_upstream = 'track-upstream' in options\n repo_path = os.path.join(JEEPYB_CACHE_DIR, project)\n\n # If this project doesn't want to use gerrit, exit cleanly.\n if 'no-gerrit' in options:\n continue\n\n project_git = \"%s.git\" % project\n remote_url = \"ssh://devops@%s:%s/%s\" % (\n GERRIT_HOST,\n GERRIT_PORT,\n project)\n git_opts = dict(upstream=upstream,\n repo_path=repo_path,\n remote_url=remote_url)\n acl_config = section.get(\n 'acl-config',\n '%s.config' % os.path.join(ACL_DIR, project))\n\n # Create the project in Gerrit first, since it will fail\n # spectacularly if its project directory or local replica\n # already exist on disk\n project_created = create_gerrit_project(\n project, project_list, gerrit)\n\n # Create the repo for the local git mirror\n create_local_mirror(\n LOCAL_GIT_DIR, project_git,\n GERRIT_SYSTEM_USER, GERRIT_SYSTEM_GROUP)\n\n if not os.path.exists(repo_path) or project_created:\n # We don't have a local copy already, get one\n\n # Make Local repo\n push_string = make_local_copy(\n repo_path, project, project_list,\n git_opts, ssh_env, upstream, GERRIT_HOST, GERRIT_PORT,\n project_git, GERRIT_GITID)\n else:\n # We do have a local copy of it already, make sure it's\n # in shape to have work done.\n update_local_copy(\n repo_path, track_upstream, git_opts, ssh_env)\n\n description = (\n find_description_override(repo_path) or description)\n\n if project_created:\n push_to_gerrit(\n repo_path, project, push_string, remote_url, ssh_env)\n if GERRIT_REPLICATE:\n gerrit.replicate(project)\n\n # If we're configured to track upstream, make sure we have\n # upstream's refs, and then push them to the appropriate\n # branches in gerrit\n if track_upstream:\n sync_upstream(repo_path, project, ssh_env, upstream_prefix)\n\n if acl_config:\n process_acls(\n acl_config, project, ACL_DIR, section,\n remote_url, repo_path, ssh_env, gerrit, GERRIT_GITID)\n\n if 'has-github' in options or default_has_github:\n created = create_github_project(\n DEFAULT_HAS_ISSUES, DEFAULT_HAS_DOWNLOADS,\n DEFAULT_HAS_WIKI, GITHUB_SECURE_CONFIG,\n options, project, description, homepage)\n if created and GERRIT_REPLICATE:\n gerrit.replicate(project)\n\n if 'has-gitlab' in options or default_has_gitlab:\n created = create_gitlab_project(\n DEFAULT_HAS_ISSUES,\n DEFAULT_HAS_WIKI, GITLAB_SECURE_CONFIG,\n options, project, description, homepage)\n if created and GERRIT_REPLICATE:\n gerrit.replicate(project)\n except Exception:\n log.exception(\n \"Problems creating %s, moving on.\" % project)\n continue\n finally:\n os.unlink(ssh_env['GIT_SSH'])\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python_examples/work_instance/jeepyb/manage_projects.py","file_name":"manage_projects.py","file_ext":"py","file_size_in_byte":26156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"517704320","text":"from requests_oauthlib import OAuth1\nimport json\nimport sys\nimport requests\nimport secret_data1 # file that contains OAuth credentials\n# import nltk # uncomment line after you install nltk\n\n## SI 206 - HW\n## COMMENT WITH:\n#SECTION: Tuesdays 5:30 - 7\n## Any names of people you worked with on this assignment:\n\n#usage should be python3 hw5_twitter.py \nusername = sys.argv[1]\nnum_tweets = sys.argv[2]\n\nconsumer_key = secret_data.CONSUMER_KEY\nconsumer_secret = secret_data.CONSUMER_SECRET\naccess_token = secret_data.ACCESS_KEY\naccess_secret = secret_data.ACCESS_SECRET\n\n#Code for OAuth starts\nurl = 'https://api.twitter.com/1.1/account/verify_credentials.json'\nauth = OAuth1(consumer_key, consumer_secret, access_token, access_secret)\nrequests.get(url, auth=auth)\n#Code for OAuth ends\n\n\n#Write your code below:\n#Code for Part 3:Caching\n\n# on startup, try to load the cache from file\nCACHE_FNAME = 'twitter_cache.json'\ntry:\n cache_file = open(CACHE_FNAME, 'r')\n cache_contents = cache_file.read()\n CACHE_DICTION = json.loads(cache_contents)\n cache_file.close()\n\n\nexcept:\n CACHE_DICTION = {}\n\n\ndef params_unique_combination(baseurl, params):\n alphabetized_keys = sorted(params.keys())\n res = []\n for k in alphabetized_keys:\n res.append(\"{}-{}\".format(k, params[k]))\n return baseurl + \"_\".join(res)\n\ndef make_request_using_cache(baseurl, params, auth):\n unique_ident = params_unique_combination(baseurl,params)\n\n\n\n if unique_ident in CACHE_DICTION:\n print(\"Fetching cached data...\")\n return CACHE_DICTION[unique_ident]\n\n\n else:\n print(\"Making a request for new data...\")\n\n resp = requests.get(baseurl, params, auth=auth)\n CACHE_DICTION[unique_ident] = json.loads(resp.text)\n dumped_json_cache = json.dumps(CACHE_DICTION)\n fw = open(CACHE_FNAME,\"w\")\n fw.write(dumped_json_cache)\n fw.close() # Close the open file\n return CACHE_DICTION[unique_ident]\n\n\ndef get_twitter_data_caching(username, num_tweets):\n params_diction = {'screen_name': username, 'count' : num_tweets}\n return make_request_using_cache(baseurl, params_diction)\n\n\nbaseurl = \"https://api.twitter.com/1.1/statuses/user_timeline.json\"\nresponse = make_request_using_cache(baseurl, params={'screen_name': username, 'count': num_tweets}, auth=auth)\n\n\n\n\n\n\n#Finish parts 1 and 2 and then come back to this\n\n#Code for Part 1:Get Tweets\n\nbaseurl = \"https://api.twitter.com/1.1/statuses/user_timeline.json\"\nparams = {'screen_name':username,'count': num_tweets}\nr = requests.get(baseurl, params=params, auth=auth)\nobj = json.loads(r.text)\n#return obj\n\n#Code for Part 2:Analyze Tweets\nimport nltk\n\nlist_of_text = []\nfor x in obj:\n list_of_text.append(x['text'])\n\nbig_string = \" \".join(list_of_text)\n\n#Creates list of tokens\n\ntokens = nltk.word_tokenize(big_string)\n\n#Creates frequency distribution from list\n\nbad_word =[\"www.\", 'http', 'https', 'RT']\nfreqDist = nltk.FreqDist(token for token in tokens if token.isalpha() and 'http' not in token and 'RT' not in token)\n\n#Loop through and print the words and frequencies for the most common 5 words\n\nfor word, frequency in freqDist.most_common(5):\n print(word + \" \" + str(frequency))\n\n\n\nif __name__ == \"__main__\":\n if not consumer_key or not consumer_secret:\n print(\"You need to fill in client_key and client_secret in the secret_data.py file.\")\n exit()\n if not access_token or not access_secret:\n print(\"You need to fill in this API's specific OAuth URLs in this file.\")\n exit()\n","sub_path":"hw5_twitter1.py","file_name":"hw5_twitter1.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"105208079","text":"class RingBuffer:\n def __init__(self, capacity):\n self.capacity = capacity\n # Begin appending index at 0\n self.ind = 0\n # Create empty list to store items\n self.items = []\n\n def append(self, item):\n # First check if index needs to be reset to not exceed capacity\n if self.ind >= self.capacity:\n self.ind = 0\n # If list is not yet full, append item to end of list\n if len(self.items) < self.capacity:\n self.items.append(item)\n # If list is full, pop item at the given index & replace with new item\n else:\n # Remove current item at given index\n self.items.pop(self.ind)\n # Insert item at given index\n self.items.insert(self.ind, item)\n # Update index\n self.ind += 1\n\n def get(self):\n # Return list of items\n return self.items\n","sub_path":"ring_buffer/ring_buffer.py","file_name":"ring_buffer.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"326016949","text":"import datetime\n\n#Minimum duration of the interview\nMIN_INTERVIEW_TIME = 1800 #minutes\n\n# computes matches of timeslots for candidate as list of timeslots and interviewers as timeslots\n# with given date range\n# 4 situations:\n\n# First Case:\n# [ ] - first timerange\n# [ ] - second timerange\n\n# Second Case:\n# [ ] - first timerange\n# [ ] - second timerange\n\n# Third Case:\n# [ ] - firt timerange\n# [ ] - second timerange\n# *with condition that intersection is more than MIN_INTERVIEW_TIME\n\n# Fourth Case:\n# [ ] - firt timerange\n# [ ] - second timerange\n# *with condition that intersection is more than MIN_INTERVIEW_TIME\n\nclass Timeslot:\n def __init__(self, start_date = None, end_date = None, interviewer = None):\n self.start_date = start_date\n self.end_date = end_date\n self.interviewer = interviewer\n\nclass Timeslot_sm:\n def __init__(self, start_date = None, end_date = None):\n self.start_date = start_date\n self.end_date = end_date\n\ndef timeslots_computation(interviewers, candidate):\n\n timeslots = []\n\n for candidate_element in candidate:\n for interviewer_element in interviewers:\n if interviewer_element.start_date.date() == candidate_element.start_date.date():\n if candidate_element.end_date > interviewer_element.start_date:\n\n first_timedate_start = candidate_element.start_date\n first_timedate_end = candidate_element.end_date\n second_timedate_start = interviewer_element.start_date\n second_timedate_end = interviewer_element.end_date\n\n #First case\n if (first_timedate_start <= second_timedate_start and first_timedate_end > second_timedate_end):\n timeslots.append(Timeslot(second_timedate_start, second_timedate_end, interviewer_element.interviewer))\n\n #Second case\n if (first_timedate_start >= second_timedate_start and first_timedate_end <= second_timedate_end):\n timeslots.append(Timeslot(first_timedate_start, first_timedate_end, interviewer_element.interviewer))\n\n #Third case\n if (first_timedate_start > second_timedate_start and first_timedate_start < second_timedate_end and\n first_timedate_end > second_timedate_end and\n (second_timedate_end - first_timedate_start).seconds >= MIN_INTERVIEW_TIME):\n timeslots.append(Timeslot(first_timedate_start, second_timedate_end, interviewer_element.interviewer))\n\n #Fourth case\n if (first_timedate_end > second_timedate_start and first_timedate_end < second_timedate_end and\n first_timedate_start < second_timedate_start and\n (first_timedate_end - second_timedate_start).seconds >= MIN_INTERVIEW_TIME):\n timeslots.append(Timeslot(second_timedate_start, first_timedate_end, interviewer_element.interviewer))\n \n return timeslots\n\ndef timeslots_computation_sm(interviewers, candidate):\n timeslots_sm = []\n timeslots = []\n timeslots = timeslots_computation(interviewers, candidate)\n \n for first_element in enumerate(timeslots):\n for second_element in enumerate(timeslots):\n if(first_element.interviewer.id != second_element.interviewer.id ):\n\n first_timedate_start = first_element.start_date\n first_timedate_end = first_element.end_date\n second_timedate_start = second_element.start_date\n second_timedate_end = second_element.end_date\n\n if first_element.start_date.date() == second_element.start_date.date(): \n if first_timedate_end > second_timedate_start:\n\n #First case\n if (first_timedate_start <= second_timedate_start and first_timedate_end > second_timedate_end):\n timeslots_sm.append(Timeslot_sm(second_timedate_start, second_timedate_end))\n\n #Second case\n if (first_timedate_start >= second_timedate_start and first_timedate_end <= second_timedate_end):\n timeslots_sm.append(Timeslot_sm(first_timedate_start, first_timedate_end))\n\n #Third case\n if (first_timedate_start > second_timedate_start and first_timedate_start < second_timedate_end and\n first_timedate_end > second_timedate_end and\n (second_timedate_end - first_timedate_start).seconds >= MIN_INTERVIEW_TIME):\n timeslots_sm.append(Timeslot_sm(first_timedate_start, second_timedate_end))\n\n #Fourth case\n if (first_timedate_end > second_timedate_start and first_timedate_end < second_timedate_end and\n first_timedate_start < second_timedate_start and\n (first_timedate_end - second_timedate_start).seconds >= MIN_INTERVIEW_TIME):\n timeslots_sm.append(Timeslot_sm(second_timedate_start, first_timedate_end))\n \n #returns only timeslots\n return timeslots_sm\n","sub_path":"timeslots_app/tscompute.py","file_name":"tscompute.py","file_ext":"py","file_size_in_byte":5442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"515847568","text":"'''\nCreated on Apr 25, 2015\n\n@author: eytan\n'''\n\nfrom django.contrib.auth.models import User\nfrom django.test.testcases import TestCase\n\n\nclass AbstractModelTest(TestCase):\n def setUp(self):\n TestCase.setUp(self)\n self.pk = self.model.objects.create(**self.data).pk\n \n def _assert_good_instance(self, instnance, data=None):\n if not data:\n data = self.data\n for k in data.keys():\n self.assertEqual(data[k], getattr(instnance, k), \"<%s expected='%s'>Actual<'%s'>\" % (k, str(data[k]), str(getattr(instnance, k))))\n \n def _update_instance(self, instance, data=None):\n if not data:\n data = self.update_data\n for k in data.keys():\n setattr(instance, k, data[k])\n instance.save()\n \n def _test(self):\n from_db = self.model.objects.get(pk=self.pk)\n self._assert_good_instance(from_db)\n self._update_instance(from_db)\n from_db = self.model.objects.get(pk=self.pk)\n self._assert_good_instance(from_db, data=self.update_data)\n\nclass ABstractDBEnumerationTestCase(AbstractModelTest):\n \n def setUp(self):\n self.data = {\"value\":5, \"name\":\"Test name\"}\n self.update_data = {\"name\":\"New Test name\"}\n AbstractModelTest.setUp(self) \n \n def _enum_test(self):\n self._test()\n self.assertEqual(0, self.model.objects.default.pk)\n self.assertEqual(self.model.default_name, self.model.objects.default.name)\n \nclass LoginTestCase(TestCase):\n def setUp(self):\n TestCase.setUp(self)\n self.user = User.objects.create_superuser('eytan', 'a@x.com', 'test')\n login_successful = self.client.login(username=\"eytan\",password=\"test\")\n self.assertTrue(login_successful)\n\n\n","sub_path":"service_calls/utils/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"42283840","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport storm\nfrom couchbase import Couchbase\nimport json, os, uuid, logging\n\n\n\nclass CouchBaseBolt(storm.BasicBolt):\n global couchbase\n couchbase = Couchbase('127.0.0.1:8091', username='YOUR_USERNAME', password='YOUR_PASSWORD')\n global bucket\n bucket = couchbase['default']\n\n def process(self, tup):\n handler = logging.FileHandler(\"logfile.txt\", \"w\", encoding = \"UTF-8\")\n\n formatter = logging.Formatter(\"%(message)s\")\n handler.setFormatter(formatter)\n root_logger = logging.getLogger()\n root_logger.addHandler(handler)\n root_logger.setLevel(logging.INFO)\n\n try:\n key = str(uuid.uuid4())\n myjson = json.loads(tup.values[0])\n bucket[\"SPOUT_%s\"%(key)] = json.dumps(myjson, sort_keys=True)\n root_logger.info(myjson)\n \n except Exception as inst:\n root_logger.info(\"EXCEPTION!\")\n root_logger.info(myjson)\n root_logger.info(inst)\n root_logger.info(inst.args)\n \n\n\nCouchBaseBolt().run()","sub_path":"target/classes/couchbaseconnector.py","file_name":"couchbaseconnector.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"36643327","text":"from selenium import webdriver\r\nfrom selenium.webdriver.support.ui import Select\r\nfrom selenium.common.exceptions import TimeoutException\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\nfrom time import sleep\r\n\r\n# crawl and fetch data from Leumi Band Account\r\ndef crawl4html(user_data):\r\n\r\n # login\r\n driver = webdriver.Chrome()\r\n driver.get(\"https://hb2.bankleumi.co.il/\")\r\n\r\n uid_field = driver.find_element_by_id(\"uid\")\r\n pw_field = driver.find_element_by_id(\"password\")\r\n enter_btn = driver.find_element_by_id(\"enter\")\r\n\r\n uid_field.clear()\r\n uid_field.send_keys(user_data[1])\r\n\r\n pw_field.clear()\r\n pw_field.send_keys(user_data[2])\r\n\r\n enter_btn.click()\r\n\r\n timeout = 20\r\n try:\r\n element_present = EC.presence_of_element_located((By.XPATH, '/html/body/app-root/div/div/div[1]/div/div/div/div/div/div/div[3]/div/div[2]/div/section/dynamic-panel/nischecking-directive/div/div/div/div/div[1]/h3/span/a'))\r\n WebDriverWait(driver, timeout).until(element_present)\r\n except TimeoutException:\r\n print(\"Timed out waiting for page to load\")\r\n\r\n driver.maximize_window()\r\n\r\n # navigate to \"over vashav\"\r\n overvashav = driver.find_element_by_xpath(\"/html/body/app-root/div/div/div[1]/div/div/div/div/div/div/div[3]/div/div[2]/div/section/dynamic-panel/nischecking-directive/div/div/div/div/div[1]/h3/span/a\")\r\n overvashav.click()\r\n\r\n try:\r\n element_present = EC.presence_of_element_located((By.ID, 'ddlTransactionPeriod'))\r\n WebDriverWait(driver, timeout).until(element_present)\r\n except TimeoutException:\r\n print(\"Timed out waiting for page to load\")\r\n\r\n # select dates\r\n era_drop_down = Select(driver.find_element_by_id(\"ddlTransactionPeriod\"))\r\n era_drop_down.select_by_value(\"004\")\r\n\r\n DTFROM = user_data[3]\r\n DTTO = user_data[4]\r\n\r\n dt_from = driver.find_element_by_id(\"dtFromDate_textBox\")\r\n dt_from.clear()\r\n dt_from.send_keys(DTFROM)\r\n\r\n dt_to = driver.find_element_by_id(\"dtToDate_textBox\")\r\n dt_to.clear()\r\n dt_to.send_keys(DTTO)\r\n\r\n display_dates_btn = driver.find_element_by_id(\"btnDisplayDates\")\r\n display_dates_btn.click()\r\n\r\n # download html file\r\n def save_html():\r\n\r\n export_html = driver.find_element_by_id(\"BTNSAVE\")\r\n export_html.click()\r\n driver.switch_to.window(\"SaveAsOptions\")\r\n sleep(2)\r\n continue_html_btn = driver.find_element_by_id(\"ImgContinue\")\r\n continue_html_btn.click()\r\n driver.switch_to.window(\"\")\r\n\r\n save_html()\r\n\r\n # navigate to credit card\r\n CREDIT1_PATH = '// table[@id = \"ctlActivityTable\"] / tbody / tr[2] / td[1] / a'\r\n\r\n def goto_creditcard(path):\r\n\r\n sleep(2)\r\n credit_link = driver.find_element_by_xpath(\"/html/body/div[5]/div[2]/table/tbody/tr[2]/td/div/div/footer-directive/div/div/section/div/placeholder-directive[2]/ul/li[3]/a\")\r\n credit_link.click()\r\n sleep(2)\r\n card_num = driver.find_element_by_xpath(path)\r\n card_num.click()\r\n card_era = Select(driver.find_element_by_id(\"ddlDatePayment\"))\r\n card_era.select_by_value(\"02\")\r\n driver.find_element_by_id(\"btnDisplay\").click()\r\n\r\n goto_creditcard(CREDIT1_PATH)\r\n\r\n # download html file\r\n save_html()\r\n\r\n # if user has more than 1 credit card - use this method.\r\n def next_cc(ccval):\r\n cc_drop_down = Select(driver.find_element_by_id(\"ddlCard\"))\r\n cc_drop_down.select_by_value(ccval)\r\n card_era = Select(driver.find_element_by_id(\"ddlDatePayment\"))\r\n card_era.select_by_value(\"02\")\r\n display_btn = driver.find_element_by_id(\"btnDisplay\")\r\n display_btn.click()\r\n\r\n if user_data[1] == \"\": # insert Leumi Account User ID (\"zihui mishtamaesh\") for account with 3 credit cards.\r\n next_cc('4') # 2nd card from dropdown menu\r\n try:\r\n save_html()\r\n except Exception as e:\r\n print(e)\r\n next_cc('5') # 3rd card from dropdown menu\r\n try:\r\n save_html()\r\n except Exception as e:\r\n print(e)\r\n","sub_path":"NYFinancial/dl_html.py","file_name":"dl_html.py","file_ext":"py","file_size_in_byte":4212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"505394479","text":"import torch\n\ndef wasserstein_loss(y_pred):\n return y_pred.mean()\n\n\ndef compute_gradient_penalty(discriminator, real_samples, \n fake_samples, alpha, gradient_penalty_weight=10):\n \"\"\"Calculates the gradient penalty loss for WGAN GP\"\"\"\n # Random weight term for interpolation between real and fake samples\n random_uniform = torch.rand(real_samples.shape[0], 1, 1, 1, 1).to(real_samples.device)\n # Get random interpolation between real and fake samples\n interpolates = (random_uniform * real_samples + ((1 - random_uniform) * fake_samples)).requires_grad_(True)\n d_interpolates = discriminator(interpolates, alpha)\n fake = torch.autograd.Variable(torch.Tensor(real_samples.shape[0], 1).fill_(1.0).to(real_samples.device), requires_grad=False)\n # Get gradient w.r.t. interpolates\n gradients = torch.autograd.grad(\n outputs=d_interpolates,\n inputs=interpolates,\n grad_outputs=fake,\n create_graph=True,\n retain_graph=True,\n only_inputs=True,\n )[0]\n gradients = gradients.view(gradients.size(0), -1)\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()\n return gradient_penalty * gradient_penalty_weight\n","sub_path":"pgan_pytorch/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"362453614","text":"\n\nclass macroCaloriesEstimator:\n \"\"\"The class calculates various body indexes like LBM, BMR & TDEE.\n Also computes macros as the share of the total daily kcals.\n \n :param weight: body weight in pounds\n :type weight: int\n :param height: height in feet\n :type height: int\n :param body_fat: user's body fat as percentage, e.i. if body fat is 15% of total weight enter 15\n :type body_fat: float, optional\n :param age: user's age in years\n :type age: int\n :param gender: gender of the user. Can take both args, \"Male/Female\" of \"Man/Women\"\n :type gender: string\n \"\"\"\n \n PROTEIN_KCAL = 4\n CARBS_KCAL = 4\n FATS_KCAL = 9\n \n def __init__(self, weight, height, body_fat, age, gender):\n self.weight = weight\n self.height = height\n self.body_fat = body_fat\n self.age = age\n self.gender = gender\n\n def __str__(self):\n return f'Weight: {self.weight} lbs\\nBody fat: {self.body_fat} %\\nHeight: {self.height}'\n\n def lean_body_mass(self):\n \"\"\"\n Lean body mass (LBM) is a part of body composition that is defined\n as the difference between total body weight and body fat weight.\n \"\"\"\n return self.weight * (1 - (self.body_fat / 100))\n\n def _basal_metabolic_rate(self):\n \"\"\"BMR is the number of calories required to keep your body functioning at rest.\n \n BMR is also known as your body's metabolism; therefore,\n any increase to your metabolic weight, such as exercise, will increase your BMR.\n \"\"\"\n if self.gender.lower() == 'male':\n return 66 + (6.23 * self.weight) + (12.7 * self.height * 12) - (6.8 * self.age)\n elif self.gender.lower() == 'female':\n return 665 + (4.35 * self.weight) + (4.7 * self.height * 12) - (4.7 * self.age)\n\n def total_daily_energy_expenditure(self, exercise_frequency, active_job):\n \"\"\"\n TDEE is an estimation of how calories burned per day when exercise is taken into account.\n\n :param exercise_frequency: Number of days you exercise per week.\n :type exercise_frequency: int\n ...\n :return: BMR adjusted for the exercise amount.\n :rtype: int\n \"\"\"\n tdee = 0\n if exercise_frequency == 'Occasionally':\n tdee = self._basal_metabolic_rate() * 1.2\n elif exercise_frequency == '1 to 2 Day':\n tdee = self._basal_metabolic_rate() * 1.375\n elif exercise_frequency == '3 to 4 days':\n tdee = self._basal_metabolic_rate() * 1.55\n elif exercise_frequency == '5 to 7 days':\n tdee = self._basal_metabolic_rate() * 1.725\n # Additional multiplier if the user has a physically active job.\n if active_job == 'Yes':\n return tdee * 1.15\n elif active_job == 'No':\n return tdee\n\n def protein_requirement(self):\n \"\"\"Minimum protein amount (in grams) needed for your body weight\"\"\"\n return self.lean_body_mass() / 2.20462 * 2.25\n\n def diet_macros(self, diet_type, exercise_frequency, active_job):\n \"\"\"Calculates macros (Proteins, Carbs, Fats) for a chosen diet.\n\n :param diet_type: Three options of diet 'gain', 'lose', 'maintain'\n :type diet_type: string\n ...\n :return: protein, carbs, fats, totals: Returns macros as Kcal.\n :rtype: int\n \"\"\"\n if diet_type.lower() == 'gain':\n protein = self.weight * self.PROTEIN_KCAL\n carbs = self.weight * 2 * self.CARBS_KCAL\n fats = self.weight * 0.45 * self.FATS_KCAL\n total = sum([protein, carbs, fats])\n tdee = self.total_daily_energy_expenditure(exercise_frequency, active_job)\n if tdee > total:\n diff = tdee - total\n while total <= tdee + 500:\n protein += diff * (protein/total)\n carbs += diff * (carbs/total)\n fats += diff * (fats/total)\n total = sum([protein, carbs, fats])\n return protein, carbs, fats, sum([protein, carbs, fats])\n\n elif diet_type.lower() == 'lose':\n protein = self.weight * 1.4 * self.PROTEIN_KCAL\n carbs = self.weight * self.CARBS_KCAL\n fats = self.weight * 0.25 * self.FATS_KCAL\n total = sum([protein, carbs, fats])\n tdee = self.total_daily_energy_expenditure(exercise_frequency, active_job)\n if tdee - total < 350:\n diff = 350 - (tdee - total)\n while total >= tdee - 350:\n protein -= diff * (protein/total)\n carbs -= diff * (carbs/total)\n fats -= diff * (fats/total)\n total = sum([protein, carbs, fats])\n return protein, carbs, fats, sum([protein, carbs, fats])\n \n elif diet_type.lower()== 'maintain':\n protein = self.weight * self.PROTEIN_KCAL\n carbs = self.weight * 1.6 * self.CARBS_KCAL\n fats = self.weight * 0.35 * self.FATS_KCAL\n total = sum([protein, carbs, fats])\n tdee = self.total_daily_energy_expenditure(exercise_frequency, active_job)\n if tdee > total:\n diff = tdee - total\n while total < tdee:\n protein += 1\n carbs += 1.6\n fats += 0.35\n total = sum([protein, carbs, fats])\n elif tdee < total:\n diff = total - tdee\n while total > tdee:\n protein += 1\n carbs += 1.6\n fats += 0.35\n total = sum([protein, carbs, fats])\n return protein, carbs, fats, sum([protein, carbs, fats])\n\n def print_macros(self, diet_type, exercise_frequency, active_job):\n \"\"\"Prints the chosen diet with macros as grams & kcal, and totals as kcal.\"\"\"\n if diet_type.lower() == 'gain':\n protein, carbs, fats, total = self.diet_macros(diet_type, exercise_frequency, active_job)\n elif diet_type.lower() == 'lose':\n protein, carbs, fats, total = self.diet_macros(diet_type, exercise_frequency, active_job)\n elif diet_type.lower() == 'maintain':\n protein, carbs, fats, total = self.diet_macros(diet_type, exercise_frequency, active_job)\n return f'Protein: \\t{round(protein/self.PROTEIN_KCAL, 1)} g. \\t{int(protein)} kcal.\\\n \\nCarbs: \\t{round(carbs/self.CARBS_KCAL, 1)} g. \\t{int(carbs)} kcal.\\\n \\nFats: \\t{round(fats/self.FATS_KCAL, 1)} g. \\t{int(fats)} kcal.\\\n \\nTotal: \\t\\t{int(total)} kcal.'\n","sub_path":"MacroEstimator.py","file_name":"MacroEstimator.py","file_ext":"py","file_size_in_byte":6688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"483775253","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/starwater/blowdrycss_venv/lib/python3.6/site-packages/blowdrycss/fontparser.py\n# Compiled at: 2018-03-07 18:42:13\n# Size of source mod 2**32: 3019 bytes\nfrom __future__ import absolute_import\n__author__ = 'chad nelson'\n__project__ = 'blowdrycss'\n\nclass FontParser(object):\n __doc__ = \" **Features:**\\n\\n - Parses unquoted font families.\\n\\n Unquoted Font-Family References:\\n | http://www.cssfontstack.com/\\n | https://mathiasbynens.be/notes/unquoted-font-family\\n\\n - Holds a basic ``font_families_dict`` (could be extended as desired):\\n | Keys: ``font-family`` category names\\n | Values: ``font-family`` member names\\n\\n - Can generate web safe fallback fonts.\\n\\n Assumes that the property_name is ``font-family``. It does not handle the shorthand property_name ``font``\\n\\n **Examples:**\\n\\n >>> font_parser = FontParser('papyrus')\\n >>> font_parser.generate_fallback_fonts()\\n 'papyrus, fantasy'\\n\\n \"\n\n def __init__(self, font_value=''):\n self.font_value = font_value\n self.font_families_dict = {'serif':{\n 'georgia', 'palatino', 'times', 'cambria', 'didot', 'garamond', 'perpetua', 'rockwell', 'baskerville'}, \n 'sans-serif':{\n 'arial', 'helvetica', 'gadget', 'cursive', 'impact', 'charcoal', 'tahoma', 'geneva', 'verdana',\n 'calibri', 'candara', 'futura', 'optima'}, \n 'monospace':{\n 'courier', 'monaco', 'consolas'}, \n 'fantasy':{\n 'copperplate', 'papyrus'}}\n\n def generate_fallback_fonts(self):\n \"\"\" Generates web safe fallback fonts\n\n Reference: http://www.w3schools.com/cssref/css_websafe_fonts.asp\n\n :return: (str) -- Returns a web safe fallback font string.\n\n **Examples:**\n\n >>> font_parser = FontParser('arial')\n >>> font_parser.generate_fallback_fonts()\n 'arial, sans-serif'\n >>> font_parser.font_value = 'monospace'\n 'monospace'\n >>> font_parser.font_value = 'invalid'\n ''\n\n \"\"\"\n fallback = ''\n if self.font_value in self.font_families_dict:\n fallback = self.font_value\n else:\n for family, fonts in self.font_families_dict.items():\n if self.font_value in fonts:\n fallback = self.font_value + ', ' + family\n\n return fallback","sub_path":"pycfiles/blowdrycss-1.0.3.linux-x86_64.tar/fontparser.cpython-36.py","file_name":"fontparser.cpython-36.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"549446578","text":"#!/usr/bin/env python3\n\nimport sys\nimport numpy as np\n\nfor line in sys.stdin:\n\tfields = line.strip().split()\n\tm1, m2 = fields[:2]\n\tobserved = list(map(int, fields[2:11]))\n\tpval = float(fields[11])\n\tmatrix = np.array(observed, dtype=float).reshape((3,3))\n\n\tmarginal_totals_0 = matrix.sum(axis=0)\n\tmarginal_totals_1 = matrix.sum(axis=1)\n\texpected = np.array([[a*b for b in marginal_totals_0] for a in marginal_totals_1])\n\texpected *= matrix.sum()/float(expected.sum())\n\n\tif pval > 1e-4 or expected.min() < 20:\n\t\tcontinue\t\t\t\t\n\n\t# if pval < 1e-5:\n\t# \tsys.stderr.write(line + '\\n')\n\t# \tsys.stderr.flush()\n\n\tprint(line.strip())\n","sub_path":"filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"189355247","text":"import random\r\nfrom IPython.display import clear_output as cls\r\n\r\nchoice = ''\r\nmessage = ''\r\ncls_choice = ''\r\n\r\nascii_characters = [chr(x) for x in range(32,127)]\r\n\r\nwhile str(choice) != '0':\r\n shuffled_list = [chr(x) for x in range(32,127)]\r\n result = ''\r\n \r\n choice = input(\"\\nDo you want to Encrypt or Decrypt the Message?\\n Enter 1 to Encrypt, 2 to Decrypt or 0 to Exit Program: \")\r\n\r\n if str(choice) == '1':\r\n message = input('\\nEnter Message for Encryption: ')\r\n \r\n seed_val = input('Enter an Integer to use as a Seed: ')\r\n random.seed(seed_val)\r\n random.shuffle(shuffled_list)\r\n\r\n for index in range(0, len(message)):\r\n result += shuffled_list[ascii_characters.index(message[index])]\r\n\r\n print(f'\\nEncoded Message: {result} \\n\\n')\r\n \r\n cls_choice = input(\"Clear Screen? Answer (Y/N): \").upper()\r\n if cls_choice == 'Y':\r\n cls()\r\n \r\n elif str(choice) == '2':\r\n message = input('\\nEnter Message to Decrypt: ')\r\n\r\n seed_val = input('Enter an Integer to use as a Seed (should be the same one used to encrypt): ')\r\n random.seed(seed_val)\r\n random.shuffle(shuffled_list)\r\n\r\n for index in range(0, len(message)):\r\n result += ascii_characters[shuffled_list.index(message[index])]\r\n\r\n print(f'\\nDecoded Message: {result} \\n\\n')\r\n \r\n cls_choice = input(\"Clear Screen? Answer (Y/N): \").upper()\r\n if cls_choice == 'Y':\r\n cls()\r\n\r\n elif str(choice) != '0':\r\n print('Invalid Input, please try again. \\n\\n')\r\n \r\nelse:\r\n print(\"\\n\\nThanks for using the Algorithm.\")","sub_path":"Text_Encryption_Decryption_Software.py","file_name":"Text_Encryption_Decryption_Software.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"206739329","text":"def get_odd_list():\n\todds = []\n\tn = int(input())\n\twhile n != -1:\n\t\tif n % 2 != 0:\n\t\t\todds.append(n)\n\t\tn = int(input())\n\treturn odds\n\ndef main():\n\todds = get_odd_list()\n\tprint(odds)\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"Sequences_sets_maps/get_odd_list.py","file_name":"get_odd_list.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"434709506","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 19 16:47:35 2021\n\n@author: Mohammad Asif Zaman\n\"\"\"\n\n\n# Parameters (All SI units)\n# =============================================================================\nk_B = 1.38e-23 # Boltzmann constant\nT = 300 # Temperature (K)\neta = 8.9e-4 # Dynamic viscosity of water (PaS) \n\n\nframe_rate = 30 # Animation frame rate in fps\nNt =frame_rate*40 # 300 #1501 # Number of time steps\n\n\nrho = 1055 # density of polystyrene beads in kg/m3\ndamping_factor = 0 # Fraction of the velocity that is lost during each collision\n\n\n# Np = number of particles, defined inside the force_() functions\n# ro = (Np,1) raidus vector, defined inside the force_() functions\n# tfinal = final simulation time, defined inside the force_() functions","sub_path":"Codes/Version 1.7.7/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"151179028","text":"# import matplotlib.pyplot as plt\nimport re\nimport copy\n\ndef load(filename: str) -> list:\n \"\"\"Open a text file & return a list of lowercase strings.\"\"\"\n try:\n with open(filename) as in_file:\n loaded_txt = in_file.read().strip().split(\"\\n\")\n loaded_txt = [x.lower() for x in loaded_txt]\n return loaded_txt\n except IOError as e:\n print(\"{}\\nError opening {}. Terminating program.\".format(e, filename))\n\ndef parse_input(input_list: list) -> list:\n rules = dict()\n my_ticket = []\n nearby = []\n rerules = re.compile(r'(?P.*?): (?P\\d+)-(?P\\d+) or (?P\\d+)-(?P\\d+)')\n rows = 0\n while input_list[rows] != \"\":\n scanrule = rerules.match(input_list[rows])\n if scanrule == None: break\n rules[scanrule.group('rule_name')] = {\n 1: scanrule.group('rule_1a'),\n 2: scanrule.group('rule_1b'),\n 3: scanrule.group('rule_2a'),\n 4: scanrule.group('rule_2b')\n }\n rows += 1\n rows += 2\n my_ticket = input_list[rows].split(\",\")\n rows += 3\n for ticket in input_list[rows:]:\n nearby.append(ticket.split(\",\"))\n return rules, my_ticket, nearby\n\ndef validate_ticket(rules: dict(), ticket: list) -> int:\n errors = 0\n invalids = set()\n for number in ticket:\n number = int(number)\n any_valid = False\n for rule in rules:\n a = int(rules[rule][1])\n b = int(rules[rule][2])\n c = int(rules[rule][3])\n d = int(rules[rule][4])\n if ((a <= number <= b) or (c <= number <= d)):\n any_valid = True\n if not(any_valid):\n invalids.add(number)\n if len(invalids) > 0:\n errors = sum(invalids)\n return errors\n\ninput_list = load(\"Day16A-input.txt\")\nrules, my_ticket, nearby = parse_input(input_list)\n\nerror_rate = 0\nfor ticket in nearby:\n error_rate += validate_ticket(rules, ticket)\n\nprint(error_rate)\n","sub_path":"Day16A.py","file_name":"Day16A.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"647379093","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 16 16:52:16 2016\n\n@author: trader2\n\"\"\"\n\n\nimport subprocess\nimport sys,os\nfrom multiprocessing import Pool\n\ndef long_time_task(name):\n cmdline=name\n print(\"running \",cmdline)\n os.system(cmdline) \n\njoblist=list()\nfor line in sys.stdin:\n print(line.strip())\n joblist.append(line.strip())\n\nif __name__=='__main__':\n p = Pool(int(sys.argv[1]))\n for i in range(len(joblist)):\n p.apply_async(long_time_task, args=(joblist[i],))\n print('Waiting for all subprocesses done...')\n p.close()\n p.join()\n print('All subprocesses done.')\n","sub_path":"multi_run.py","file_name":"multi_run.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"282373013","text":"#Author: Rishab Kanwal\t\n#Checks unknown DNA sample to determine if it is from a human or mouse\n#CSCI 1300\n#Recitation 4\n#TA: Sina Aghil\ndef main(): \n h = open(\"humanDNA.txt\", \"r\")\n human = h.readline()\n h.close()\n m = open(\"mouseDNA.txt\", \"r\")\n mouse = m.readline()\n m.close()\n u = open(\"unknownDNA.txt\", \"r\")\n unknown = u.readline()\n u.close()\n um = compareDNA(unknown, mouse)\n uh = compareDNA(unknown, human)\n print(\"the similarity score for the mouse is\", um)\n print(\"the similarity score for the human is\", uh)\n if um > uh:\n print(\"the DNA is from a mouse\")\n elif um == uh:\n print(\"identity cannot be determined\")\n else:\n print(\"the DNA is from a human\")\n \n#compareDNA\ndef compareDNA(DNA1, DNA2):\n hd = 0\n ss = 0\n l = len(DNA1)\n for x in range(0,l):\n if DNA1[x] != DNA2[x]:\n hd = hd + 1\n ss = float(l - hd) / l\n return(round(ss, 2))\n\n\nmain()\n","sub_path":"Recitation4/DNA.py","file_name":"DNA.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"625039216","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.9-x86_64/egg/nutch/test_nutch.py\n# Compiled at: 2015-10-04 15:23:08\nimport nutch, pytest, glob\nfrom time import sleep\nslow = pytest.mark.slow\n\ndef get_nutch():\n return nutch.Nutch()\n\n\ndef test_nutch_constructor():\n nt = get_nutch()\n assert nt\n\n\ndef get_config_client():\n return get_nutch().Configs()\n\n\ndef test_config_client_constructor():\n cc = get_config_client()\n assert cc\n\n\ndef test_config_access():\n cc = get_config_client()\n default_config = cc['default']\n assert default_config.info()\n\n\ndef test_config_create():\n cc = get_config_client()\n cc['defaultcopy'] = {}\n assert cc['defaultcopy'].info()['db.fetch.interval.max']\n\n\ndef test_config_copy():\n cc = get_config_client()\n default_config = cc['default']\n default_config_data = default_config.info()\n cc['defaultcopy'] = default_config_data\n assert cc['defaultcopy'].info()['db.fetch.interval.max']\n\n\ndef get_seed_client():\n return get_nutch().Seeds()\n\n\ndef test_seed_client_constructor():\n sc = get_seed_client()\n assert sc\n\n\ndef get_seed(seed_urls=('http://aron.ahmadia.net', 'http://www.google.com')):\n sc = get_seed_client()\n return sc.create('test_seed', seed_urls)\n\n\ndef test_seed_create():\n seed_urls = ('http://aron.ahmadia.net', 'http://www.google.com')\n seed = get_seed(seed_urls)\n seed_path = seed.seedPath\n with open(glob.glob(seed_path + '/*.txt')[0]) as (f):\n seed_data = f.read()\n assert seed_data.split() == list(seed_urls)\n\n\ndef get_job_client():\n return get_nutch().Jobs()\n\n\ndef get_inject_job(jc=None):\n seed = get_seed()\n if jc is None:\n jc = get_job_client()\n return jc.inject(seed)\n\n\ndef test_job_client_constructor():\n jc = get_job_client()\n assert jc\n\n\ndef test_job_start():\n jc = get_job_client()\n old_jobs = jc.list()\n inject_job = get_inject_job(jc)\n updated_jobs = jc.list()\n assert len(updated_jobs) == len(old_jobs) + 1\n assert inject_job not in old_jobs\n assert inject_job in updated_jobs\n\n\ndef test_job_client_lists():\n jc1 = get_job_client()\n jc2 = get_job_client()\n jc1_job = get_inject_job(jc1)\n assert jc1_job in jc1.list()\n assert jc1_job not in jc2.list()\n assert jc1_job in jc2.list(allJobs=True)\n\n\ndef test_job_inject():\n nt = get_nutch()\n inject_job = get_inject_job()\n job_info = inject_job.info()\n assert job_info['type'] == 'INJECT'\n assert job_info['msg'] == 'OK'\n assert job_info['confId'] == nt.confId\n\n\ndef test_job_generate():\n nt = get_nutch()\n jc = get_job_client()\n inject = get_inject_job(jc)\n for wait in range(10):\n if inject.info()['state'] != 'FINISHED':\n sleep(1)\n continue\n else:\n break\n else:\n raise Exception('took too long to inject')\n\n assert inject.info()['state'] == 'FINISHED'\n generate = jc.generate()\n job_info = generate.info()\n assert job_info['type'] == 'GENERATE'\n assert job_info['msg'] == 'OK'\n assert job_info['confId'] == nt.confId\n\n\ndef test_job_stop():\n inject_job = get_inject_job()\n inject_job.stop()\n assert inject_job.info()['state'] == 'STOPPING'\n\n\ndef test_job_abort():\n inject_job = get_inject_job()\n inject_job.abort()\n assert inject_job.info()['state'] == 'KILLED'\n\n\ndef get_crawl_client():\n seed = get_seed()\n return get_nutch().Crawl(seed)\n\n\n@slow\ndef test_crawl_client():\n cc = get_crawl_client()\n assert cc.currentJob.info()['type'] == 'INJECT'\n rounds = cc.waitAll()\n assert len(rounds) == 1\n assert cc.currentJob is None\n jobs = rounds[0]\n assert all([ j.info()['state'] == 'FINISHED' for j in jobs ])\n return","sub_path":"pycfiles/nutch-1.10.3-py2.7/test_nutch.py","file_name":"test_nutch.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"335777586","text":"# encoding: utf-8\n\nfrom twisted.web import xmlrpc\n\nfrom trompet.listeners import registry\n\n\nclass XMLRPCInterface(xmlrpc.XMLRPC):\n def __init__(self, project, observer, *args, **kwargs):\n xmlrpc.XMLRPC.__init__(self, *args, **kwargs)\n self.project = project\n self.observer = observer\n\n def xmlrpc_notify(self, message):\n self.observer.notify(self.project, message)\n return True\n\nclass ListenerFactory(object):\n name = u\"xmlrpc\"\n\n def create(self, service, project, config, observer):\n if config:\n resource = service.get_resource_for_project(project)\n resource.putChild(\"xmlrpc\", XMLRPCInterface(project, observer))\n\nlistener_factory = ListenerFactory()\nregistry.register(listener_factory)\n","sub_path":"trompet/listeners/xmlrpc.py","file_name":"xmlrpc.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"88814068","text":"import discord\nfrom discord.ext import commands\n\nclass Info(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n \n @commands.command(help='Shows info about Dolphin', aliases=['link', 'l'])\n async def links(self, ctx):\n '''\n Download links\n '''\n await ctx.send(embed=discord.Embed(title='Links:', description=\n \"Dolphin site: \\n\"\n \"Downloads: \\n\"\n \"FAQ: \\n\"\n \"Wiki: \\n\"\n \"Forums: \\n\"\n \"Source code: \\n\"\n \"Bug tracker: \\n\"\n \"Translation: \\n\"\n \"TODO list: \\n\"\n \"Developer wiki: \\n\"\n \"Reddit: \\n\"\n \"Twitter: \"\n ))\n \ndef setup(bot):\n bot.add_cog(Info(bot))\n","sub_path":"categories/Info.py","file_name":"Info.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"43708548","text":"import numpy as np\nimport pandas as pd\nfrom ftplib import FTP_TLS\nfrom urllib.request import urlopen\nimport shutil\nimport urllib.request as request\nfrom contextlib import closing\nimport ftplib\nimport os, datetime, io , csv , re , itertools\n\ndef valor_linha(linha):\n cor=[]\n txt=''+ linha+ ' '\n x=[m.start() for m in re.finditer('=', txt)]\n x.append(len(txt)-1)\n valor=''\n peso='0'\n peso1='1'\n if 'STOCK' in txt:\n cor.append(peso1)\n else:\n cor.append(peso)\n \n for i in range(len(x)):\n if (i==len(x)-1):\n break;\n for j in range(x[i],x[i+1]):\n if (txt[j].isdigit()==True or txt[j]=='.'):\n valor+=txt[j]\n if(txt[j+1].isalpha()==True or txt[j+1]==' ' or txt[j+1]=='\\n'):\n break \n cor.append(valor)\n valor=''\n return cor\n \n path='C:/Users/valer/OneDrive/Ambiente de Trabalho/estagio/dados'\npath1='C:/Users/valer/OneDrive/Ambiente de Trabalho/estagio/raios'\nos.listdir(path)\ncoo=[]\nfor moldes in os.listdir(path):\n print('#molde',moldes)\n for pecas in os.listdir(path+'/'+moldes):\n print('*peça',pecas)\n coordenadas=[]\n for operacoes in os.listdir(path+'/'+moldes+'/'+pecas):\n print(operacoes)\n if('.txt' not in operacoes):\n linha=''\n coo=[]\n with open(path+'/'+moldes+'/'+pecas+'/'+operacoes) as ft: \n\n linha=''\n linha_r=''\n linha_stock=''\n for line in ft:\n if (line.lstrip().startswith('%')==False and 'MSG' not in line):\n\n if ('DIAM' in line or 'RAIO TOPO' in line or 'RT' in line or 'DIÂMETRO' in line or 'RAIO' in line or 'FR' in line ) :#or ('DIÂMETRO' in line and 'RT' in line)or ('RAIO' in line and 'RT' in line)\n linha_r+= line\n \n \n if('STOCK' in line):\n linha_stock+=line\n linha=linha_r+ \" \" +linha_stock\n\n\n coo=valor_linha(linha)\n split_str=[]\n for t in coo:\n n=5\n if len(t)>5:\n for index in range(0, len(t), n):\n split_str.append(t[index : index + n])\n\n else:\n split_str.append(t)\n if(split_str[0]=='0' and len(split_str)>4):\n split_str=split_str[:-2]\n \n coordenadas.append(split_str)\n\n print('coordinates done')\n out = list(itertools.chain(*coordenadas))\n \n\n con_num = []\n for item in out:\n con_num.append(float(item))\n \n print('num',con_num)\n \n ficheiro= open(path1+'/{}.txt'.format(pecas),'w')\n out = csv.writer(ficheiro)\n out.writerows(map(lambda x: [x], con_num))\n ficheiro.close()\n \n","sub_path":"organização dos dados/Inserção de dados e juntamento de todas as operações por peças-apenas os raios e raios topos.py","file_name":"Inserção de dados e juntamento de todas as operações por peças-apenas os raios e raios topos.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"269208950","text":"import os\nimport re\n\nfrom collections import defaultdict\n\ntag_pattern = re.compile(r\"<([^>]+)>([^<]+)\")\n\nCORPORA_FOLDER = \"corpora/\"\n\nBEGIN = END = object()\n\ndef read_corpus(file):\n \"\"\"Process the annotations in a corpus.\"\"\"\n with open(file) as corpus:\n for sent in corpus:\n tag_ = None\n for tag, mention in tag_pattern.findall(sent):\n bigrams[tag_ if tag_ else BEGIN][tag] += 1\n mentions[mention][tag] += 1\n tag_ = tag\n bigrams[tag][END] += 1\n\ndef get_model():\n \"\"\"Return a frequency dictionary.\"\"\"\n global mentions\n global bigrams\n \n mentions = defaultdict(lambda: defaultdict(int))\n bigrams = defaultdict(lambda: defaultdict(lambda: 1))\n for file in os.listdir(CORPORA_FOLDER):\n read_corpus(CORPORA_FOLDER+file)\n \n return mentions, bigrams\n","sub_path":"learn.py","file_name":"learn.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572789636","text":"#Default Imports\nfrom greyatomlib.numpy_advanced.q01_get_total_deliveries_players.build import ipl_matches_array\n\nimport numpy as np\ndef get_toss_win_count(team=\"Mumbai Indians\"):\n a=[]\n for i in ipl_matches_array:\n if i[5]==\"Mumbai Indians\":\n a.append(i[0])\n\n\n unique_matches = np.unique(a)\n\n count = len(unique_matches)\n return count\nget_toss_win_count()\n","sub_path":"q03_get_toss_win_count/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"609614194","text":"def ii():return int(input())\ndef iim():return map(int,input().split())\ndef iil():return list(map(int,input().split()))\n\ndef factorization(n):\n arr = []\n temp = n\n for i in range(2, int(-(-n**0.5//1))+1):\n if temp%i==0:\n cnt=0\n while temp%i==0:\n cnt+=1\n temp //= i\n arr.append([i, cnt])\n\n if temp!=1:\n arr.append([temp, 1])\n\n if arr==[]:\n arr.append([n, 1])\n\n return arr\n\nn = ii()\nA = factorization(n)\n#print(A)\nans = 0\nif n == 1:\n print(0)\nelse:\n for i in A:\n num = i[1]\n cnt = 0\n while num-cnt > 0:\n cnt += 1\n num -= cnt\n ans += cnt\n print(ans)","sub_path":"ABC/ABC169/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"300240621","text":"from pathlib import Path\nimport json\nimport time\n\nfrom database_updater import *\n\ndef load_all_json_results(path):\n data = []\n with open(json_file_path) as json_file:\n obj = \"\"\n for line in json_file.readlines():\n #print(\"line = \", line)\n clean = line.strip()\n if \"}{\" in clean:\n #terminate the obj and convert to json\n obj += \"}\"\n data.append(json.loads(obj))\n obj = \"{\"\n continue \n obj += str(clean)\n #print(\"len obj = \", len(obj))\n if len(obj) > 0:\n data.append(json.loads(obj))\n\n print(f\"loaded {len(data)} JSON records\")\n return data\n\nif __name__ == \"__main__\":\n\n json_file_path = Path(\"../results/0205_ballot_results.json\")\n json_as_python = load_all_json_results(json_file_path)\n\n dbu = DatabaseUpdater()\n\n\n if dbu.connect(\"election\", \"election\"):\n\n #do some stuff\n start_time = time.time()\n count = 0\n\n print(\"started database updating\")\n\n dbu.start_transactions()\n\n for jr in json_as_python:\n\n dbu.start_transactions(batchsize=1)\n status = dbu.insert_or_update_one_form(jr)\n ### hold off for testing --> status2 = dbu.insert_json(jr)\n status2 = True\n\n count += 1\n if status and status2:\n dbu.finish()\n else:\n dbu.rollback_transactions()\n \n dbu.finish()\n\n stop_time = time.time()\n print(f\"Elapsed time = {stop_time - start_time} per tx: {(stop_time - start_time) / count}\")\n\n else: \n print(\"Something went wrong\")\n\n print(\"Done\")","sub_path":"votes/src15/run_database_updater.py","file_name":"run_database_updater.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403454608","text":"# Copyright (C) Nial Peters 2009\n#\n# This file is part of PASKIL.\n#\n# PASKIL is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n#(at your option) any later version.\n#\n# PASKIL is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with PASKIL. If not, see .\n\"\"\"\nPIL plugin for opening the PMIS files that used to be produced by the UiO ASC. This plugin is largely\nincomplete and needs a lot more work done to it to make it robust.\n\"\"\"\n\nimport Image\nimport ImageFile\nimport string\nimport sys\n\n\nclass PmisImageFile(ImageFile.ImageFile):\n\n format = \"PMIS\"\n format_description = \"Pmis raster image\"\n\n def _open(self):\n\n # check header\n header = self.fp.read(100)\n if header[:4] != \"PMIS\":\n raise SyntaxError(\"Not a PMIS file\")\n\n # read header data\n header = header[60:]\n\n # find indicies of data fields\n indicies = []\n try:\n indicies.append(header.index(\"D\"))\n indicies.append(header.index(\"W\"))\n indicies.append(header.index(\"F\"))\n indicies.append(header.index(\"G\"))\n indicies.append(header.index(\"E\"))\n try:\n indicies.append(header.index(\"$\"))\n except ValueError:\n indicies.append(header.index(\"S\"))\n except ValueError:\n print(\"Cannot read pmis header data\")\n sys.exit()\n\n # sort list\n indicies.sort()\n\n # create data fields dictionary\n data_fields = {'D': 'Creation Time', 'W': 'Wavelength', 'F': 'Filter Number',\n 'G': 'Gain', 'E': 'Exposure Time', '$': 'Site', 'S': 'Site'}\n\n # loop through list of indices and read data\n for i in range(len(indicies) - 1):\n data = header[indicies[i] + 1:indicies[i + 1]]\n self.info[data_fields[header[indicies[i]]]] = data\n\n # read final data field in list\n data = header[indicies[len(indicies) - 1] + 1:]\n self.info[data_fields[header[indicies[len(indicies) - 1]]]] = data\n\n # size in pixels (width, height)\n self.size = int(512), int(512)\n\n # Set image mode, in this case 32bit floating point pixel values\n self.mode = \"F\"\n\n # image filetype setting\n self.filetype = \"PMIS\"\n\n # data descriptor (how the pixel data is arranged, and how to read it)\n self.tile = [(\"raw\", (0, 0) + self.size, 180, (\"F;16\", 0, 1))]\n\n#Image.register_open(\"PMIS\", PmisImageFile)\n\nImage.register_open(\"PMIS\", PmisImageFile)\n\n# PMIS files have a whole range of extensions that they can have. Need to\n# register them all\nhex_range = ['0', '1', '2', '3', '4', '5', '6',\n '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']\nfilter_letter = [\"r\", \"s\", \"t\", \"u\", \"v\", \"g\",\n \"h\", \"i\", \"j\", \"k\", \"b\", \"c\", \"d\", \"e\", \"f\"]\n\nfor i in hex_range:\n for j in hex_range:\n for k in filter_letter:\n # register extension with PIL\n Image.register_extension(\"PMIS\", \".\" + i + j + k)\n","sub_path":"PASKIL/PASKIL/plugins/PmisImagePlugin.py","file_name":"PmisImagePlugin.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"297169242","text":"import os, ghostscript, sys, locale\n\ncommand = os.popen('unoconv -f pdf Test1.pptx')\ncommand.close()\n\nargs = [\"gs\", \"-q\", \"-o\", \"image%d.png\", \"-sDEVICE=pngalpha\", \"Test1.pdf\"]\nencoding = locale.getpreferredencoding()\n\nargs = [a.encode(encoding) for a in args]\nghostscript.Ghostscript(*args)","sub_path":"slide extractor/pdf2img.py","file_name":"pdf2img.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"232909144","text":"from django import forms\nfrom . models import Chess\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Submit\nimport os\n\n\nclass ChessForm(forms.ModelForm):\n gameName = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Type your Game Name'}),\n help_text='Type a Game name, so you can save and reload it!')\n\n activePlayer = forms.ChoiceField(choices=(('','--choose color--'), \n ('2','Black'), \n ('1','White'), ),\n help_text='Which color player are you?')\n\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.helper.form_method = 'post'\n self.helper.layout = Layout('gameName', 'activePlayer',\n Submit('save',\n 'Start Game',\n css_class='btn-outline-info'))\n self.helper.form_class = 'form-horizontal'\n self.helper.labels_uppercase = True\n\n\n class Meta:\n model = Chess\n fields = ['gameName', 'activePlayer',]\n labels = {'activePlayer': 'Select Your Color',}\n\n\n","sub_path":"web_project/chess/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"120467941","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 21 10:22:17 2016\r\n\r\n@author: liuxs981001\r\n\"\"\"\r\nimport time\r\n\r\n\r\n\r\nsearchList = []\r\nfor i in range(19981001):\r\n searchList.append(i)\r\n i += 1\r\n\r\n\r\nnum = eval(input(\"please input the value you want to find: \")) # input the item that user want to search\r\n\r\nn = len(searchList) # find the upper boundary of the list\r\nsearchList = sorted(searchList)\r\n\r\ndef search(searchList, n, num):\r\n left = 0\r\n right = n\r\n while left != right:\r\n if searchList[left] != num:\r\n left += 1\r\n else:\r\n break\r\n return left + 1\r\n\r\nstart = time.clock()\r\nprint(\"The value you want to search is at the position No.\",search(searchList, n, num))\r\n\r\nend = time.clock()\r\nprint('Running time: %s Seconds'%(end-start))","sub_path":"lSearch - with timer.py","file_name":"lSearch - with timer.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"389720859","text":"\"\"\"\nUtility functions like instance management threads that dont make sense anywhere else.\n\nThey certainly dont belong in views.\n\"\"\"\n'''\nCopyright 2017 ODAP Contributors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n'''\nfrom odapweb.settings import DNS_DATA, DNS_API_URL, DNS_ODAP_URL\nfrom odapweb.models import Key\nimport requests\n\n\n\"\"\"\nDNS implementation varies by a large margin between users. When correctly implemented, the api will create readable DNS names and remove HTTPS warnings from ec2 instance home pages\n\n\"\"\"\n\n\n\"\"\"\nThis is the dns url that is displayed on the website and also \n\nChanging it from the default implementation will enable the DNS module\n\"\"\"\ndef dnsDisplayName(instance_name):\n\treturn \"instance\"+str(instance_name)\n\n\"\"\"\nThis method is called when an instance is created to create a dns entry. \n\"\"\"\ndef createDnsEntry(instance_name, ip):\n\tdns_api_url = DNS_API_URL + instance_name + DNS_ODAP_URL\n\n\t# Get the key from our database\n\n\tkey = Key.objects.get(\n\t\ttitle=\"ODAP-DNS-API-KEY\").key_text # \"kyWsnkaxUGKAtnIQkFW7XdCO0SGf9IS1NCRdSAK9o4SzLm4_1fIEY8Av5ZtMNJ1I\"\n\n\tdata = DNS_DATA\n\n\tdata['key'] = key\n\n\tr = requests.put(url=dns_api_url + ip, data=data, verify=False)\n\n\treturn r\n\n\"\"\"\nThis method is called when an instance is destroyed to remove a dns entry. \n\"\"\"\ndef deleteDnsEntry(instance_name, ip):\n\tdns_api_url = DNS_API_URL + instance_name + DNS_ODAP_URL\n\n\t# Get the key from our database\n\n\tkey = Key.objects.get(\n\t\ttitle=\"ODAP-DNS-API-KEY\").key_text # \"kyWsnkaxUGKAtnIQkFW7XdCO0SGf9IS1NCRdSAK9o4SzLm4_1fIEY8Av5ZtMNJ1I\"\n\n\tdata = DNS_DATA\n\n\tdata['key'] = key\n\n\tr = requests.delete(url=dns_api_url + ip, data=data, verify=False)\n\n\treturn r\n","sub_path":"plugin/dns.py","file_name":"dns.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"298158155","text":"from typing import Any, Dict, List, Union\n\nimport attr\n\nfrom ..models.validation_error import ValidationError\nfrom ..types import UNSET, Unset\n\n\n@attr.s(auto_attribs=True)\nclass HTTPValidationError:\n \"\"\" \"\"\"\n\n detail: Union[Unset, List[ValidationError]] = UNSET\n\n def to_dict(self) -> Dict[str, Any]:\n detail: Union[Unset, List[Any]] = UNSET\n if not isinstance(self.detail, Unset):\n detail = []\n for detail_item_data in self.detail:\n detail_item = detail_item_data.to_dict()\n\n detail.append(detail_item)\n\n field_dict = {}\n if detail is not UNSET:\n field_dict[\"detail\"] = detail\n\n return field_dict\n\n @staticmethod\n def from_dict(d: Dict[str, Any]) -> \"HTTPValidationError\":\n detail = []\n for detail_item_data in d.get(\"detail\", UNSET) or []:\n detail_item = ValidationError.from_dict(detail_item_data)\n\n detail.append(detail_item)\n\n return HTTPValidationError(\n detail=detail,\n )\n","sub_path":"end_to_end_tests/golden-record-custom/custom_e2e/models/http_validation_error.py","file_name":"http_validation_error.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"174999198","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom datetime import date, datetime\nimport calendar\n\n\ndef parse_next_row(row, year):\n meeting_data = {}\n month_tag = row.find('td', attrs={'class':'month'})\n day_tag = row.find('td', attrs={'class':'day'})\n sm_tag = row.find('td', attrs={'class':'statement2'})\n video_tag = row.find('td', attrs={'class':'video'})\n min_tag = row.find('td', attrs={'class':'minutes'})\n if month_tag is None or day_tag is None:\n return None\n\n # month\n month_dict = {a: n for n, a in enumerate(calendar.month_name)}\n month_str = str(month_tag.string).strip()\n m = re.search('^(\\w*)/?(\\w*)$', month_str)\n s_month = month_dict[m.group(1)]\n meeting_data['month'] = s_month\n if m.group(2) != '':\n e_month = month_dict[m.group(2)]\n else:\n e_month = s_month\n\n # start and end date\n day_str = str(day_tag.string).strip()\n unscheduled = False if re.search('unscheduled', day_str) is None else True\n s_day = int(re.search('^\\d{1,2}', day_str).group(0))\n meeting_data['day'] = s_day\n meeting_data['start_date'] = date(year, s_month, s_day)\n\n # special case of unscheduled meeting\n if not unscheduled:\n e_day = int(re.search('(\\d{1,2}).?$', day_str).group(1))\n else:\n e_day = s_day\n meeting_data['end_date'] = date(year, e_month, e_day)\n # set estimated release timestamp to be 2PM ET on meeting end date\n meeting_data['est_release'] = datetime(year, e_month, e_day, 19, 0, 0)\n\n # statement\n if sm_tag.a is not None:\n meeting_data['statement'] = str(sm_tag.a.get('href'))\n else:\n meeting_data['statement'] = ''\n\n # video and projection materials\n if video_tag is not None:\n press_conf = video_tag.find(text='Press Conference')\n if press_conf is not None:\n meeting_data['press_conf'] = str(press_conf.parent.get('href'))\n projections = video_tag.find(text=re.compile(r'Projections Materials:'))\n if projections is not None:\n meeting_data['projections'] = True\n pdf = video_tag.find(text=re.compile(r'PDF'))\n if pdf is not None:\n meeting_data['proj_pdf'] = str(pdf.parent.get('href'))\n accessible = video_tag.find(text=re.compile(r'Accessible'))\n if accessible is not None:\n meeting_data['accessible'] = str(accessible.parent.get('href'))\n else:\n meeting_data['projections'] = False\n else:\n meeting_data['projections'] = False\n\n # minutes\n if min_tag is not None:\n pdf = min_tag.find(text=re.compile(r'PDF'))\n if pdf is not None:\n meeting_data['minutes_pdf'] = str(pdf.parent.get('href'))\n html = min_tag.find(text=re.compile(r'HTML'))\n if html is not None:\n meeting_data['minutes_html'] = str(html.parent.get('href'))\n\n return meeting_data\n\n\ndef FED_FOMC_calendar_parser(url):\n r = requests.get(url)\n soup = BeautifulSoup(r.content)\n tables = soup.find_all('table', attrs={'class':'pressConference'})\n\n FOMC_dict = {}\n for table in tables:\n table_body = table.find('tbody')\n year = 0\n for row in table_body.find_all('tr'):\n hd_tag = row.find('th', attrs={'id':'year'})\n if hd_tag is not None:\n year = int(re.search('\\d{4}', str(hd_tag.string)).group(0))\n else:\n md = parse_next_row(row, year)\n if md is not None:\n mh = md['month']\n mon = '0' + str(mh) if mh < 10 else str(mh)\n meeting_id = str(year)+mon+str(md['day'])\n del md['month']\n del md['day']\n FOMC_dict[meeting_id] = md\n return FOMC_dict\n\nif __name__ == \"__main__\":\n FED_FOMC_calendar_parser(\"http://www.federalreserve.gov/monetarypolicy/fomccalendars.htm\")\n","sub_path":"FED_FOMC_calendar_parser.py","file_name":"FED_FOMC_calendar_parser.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"276912097","text":"def odd_even_mul():\r\n\r\n n = int(input())\r\n arr = input().split()\r\n list=[]\r\n odd =[]\r\n even =[]\r\n \r\n for i in range(n):\r\n list.append(int(arr[i]))\r\n \r\n for i in range(len(list)):\r\n if (list[i]%2==0):\r\n even.append(list[i])\r\n else:\r\n odd.append(list[i])\r\n \r\n odd_sum = sum(ele for ele in odd)\r\n even_sum = sum(val for val in even)\r\n \r\n print(int(odd_sum*even_sum))\r\n\r\nodd_even_mul()\r\n","sub_path":"day 21.py","file_name":"day 21.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"28557334","text":"import keyword\n\n# print(1111);\n# print(keyword.kwlist);\n#\n# print(\"324243ww2\"[0:-2]);\n\nwhile True:\n str = input(\"请用2个字评价下自己\\n\");\n if len(str) == 2:\n print(str, end=\"?还是去照照镜子吧\")\n break\n else:\n print(\"让你输入2个字,看不懂中文吗?再来一次!\")","sub_path":"Test1.py","file_name":"Test1.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"120762119","text":"\nfrom util import Request, HttpMethod, CONFIG\n\n\nclass BilibiliApi:\n\n BASE_API_URL = 'https://api.bilibili.com'\n HEADERS = {\n 'Origin': 'https://www.bilibili.com',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',\n 'Referer': 'https://www.bilibili.com'\n }\n\n @classmethod\n def build_oid_api_request(cls, mid, pn=1, ps=100, tid=0, order='pubdate', keyword=None):\n \"\"\"\n 返回 up 所有的视频av 号\n :param mid: up id\n :param pn:\n :param ps: 每页数量\n :param tid: 0:全部\n :param order:\n :param keyword:\n :return:\n \"\"\"\n url = cls.BASE_API_URL + '/x/space/arc/search'\n params = {\n 'mid': mid,\n 'pn': pn,\n 'ps': ps,\n 'tid': tid,\n 'order': order,\n 'keyword': keyword,\n }\n return Request(url=url, method=HttpMethod.GET, params=params, headers=cls.HEADERS)\n\n @classmethod\n def build_aid_api_request(cls, aid):\n \"\"\"\n 根据 aid 获取视频信息\n :param aid:\n :return:\n \"\"\"\n url = cls.BASE_API_URL + '/x/web-interface/view'\n params = {\n 'aid': aid\n }\n return Request(url=url, method=HttpMethod.GET, params=params, headers=cls.HEADERS)\n\n @classmethod\n def build_cid_api_request(cls, avid, cid):\n \"\"\"\n 获取视频下载信息\n :param avid:\n :param cid:\n :return:\n \"\"\"\n url = cls.BASE_API_URL + '/x/player/playurl'\n params = {\n 'avid': avid,\n 'cid': cid,\n 'qn': 80,\n 'fnver': 0,\n 'fnval': 16,\n }\n cookies = {\n 'SESSDATA': CONFIG['SESSION_DATA']\n }\n return Request(url=url, method=HttpMethod.GET, params=params, headers=cls.HEADERS, cookies=cookies)\n\n @classmethod\n def build_archive_api_request(cls, aid):\n \"\"\"\n 获取视频统计信息,包含合集\n :param aid:\n :return:\n \"\"\"\n url = cls.BASE_API_URL + '/x/web-interface/archive/stat'\n params = {\n 'aid': aid\n }\n return Request(url=url, method=HttpMethod.GET, params=params, headers=cls.HEADERS)\n\n @classmethod\n def build_dm_api_request(cls, oid):\n \"\"\"\n 获取弹幕信息\n :param oid:\n :return:\n \"\"\"\n url = cls.BASE_API_URL + '/x/v1/dm/list.so'\n params = {\n 'oid': oid,\n }\n return Request(url=url, method=HttpMethod.GET, params=params, headers=cls.HEADERS)\n\n @classmethod\n def build_video_download_request(cls, url):\n \"\"\"\n 下载视频\n :param url:\n :return:\n \"\"\"\n return Request(url=url, method=HttpMethod.GET, headers=cls.HEADERS)\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"562040450","text":"import argparse\r\nimport sys\r\nimport os\r\nimport io\r\nimport json\r\nimport hashlib\r\nfrom PIL import Image\r\n\r\nclass MotoBootLogo:\r\n def __init__(self, input, output, list):\r\n if list:\r\n if os.path.isfile(list):\r\n self.decode(list, None); return\r\n elif os.path.isfile(input):\r\n self.decode(input, output); return\r\n elif os.path.isdir(input):\r\n self.encode(input, output); return\r\n print(\"[-] Input FILE/DIR inaccessible\")\r\n\r\n def intFromByte(self, bytes=1):\r\n return int.from_bytes(self.infile.read(bytes), byteorder=\"little\", signed=True)\r\n\r\n def uintFromByte(self, bytes=1):\r\n return int.from_bytes(self.infile.read(bytes), byteorder=\"little\", signed=False)\r\n\r\n def strFromByte(self, bytes=1, encoding=\"ASCII\"):\r\n return str(self.infile.read(bytes), encoding)\r\n\r\n def intToByte(self, integer, length=1):\r\n return (integer).to_bytes(length, byteorder=\"little\", signed=True)\r\n\r\n def uintToByte(self, integer, length=1):\r\n return (integer).to_bytes(length, byteorder=\"little\", signed=False)\r\n\r\n def strToByte(self, str, encoding=\"ASCII\"):\r\n return str.encode(encoding)\r\n\r\n def decode(self, filename, dirname):\r\n print(\"[+] Input [%s] => Output [%s]\" % (filename, dirname))\r\n \r\n self.infile = open(filename, \"rb\")\r\n \r\n # Motorola bootlogo container, \"MotoLogo\\x00\" Header, 9 bytes\r\n if (self.intFromByte(8) != 0x6F676F4C6F746F4D or self.intFromByte() != 0x00):\r\n print(\"[-] Invalid binary file header\")\r\n return\r\n\r\n data = {}\r\n data['count'] = int((self.intFromByte(4) - 0x0D) / 0x20)\r\n print(\"[+] %s Images found\" % (data['count']))\r\n\r\n data['name'], offset, size = ([] for i in range(3))\r\n for i in range(data['count']):\r\n self.infile.seek(0x0D + (0x20 * i))\r\n data['name'].append(self.strFromByte(24).split(\"\\0\")[0])\r\n offset.append(self.intFromByte(4))\r\n size.append(self.intFromByte(4))\r\n\r\n data['version'] = self.intFromByte(4)\r\n if data['version'] == -1:\r\n pass\r\n elif data['version'] == -2:\r\n data['device'] = self.strFromByte(self.intFromByte(4))\r\n data['text'] = self.strFromByte(self.intFromByte(4), \"ASCII\")\r\n data['comment'] = self.strFromByte(self.intFromByte(4))\r\n data['resx'] = self.intFromByte(2)\r\n data['resy'] = self.intFromByte(2)\r\n\r\n print(\"[+] Device: %s\" % (data['device']))\r\n print(\"[+] Comment: %s\" % (data['text']))\r\n print(\"[+] Comment: %s\" % (data['comment']))\r\n print(\"[+] Resolution: %sx%s\" % (data['resx'], data['resy']))\r\n else:\r\n print(\"[-] Unsupported binary file version\")\r\n return\r\n\r\n if dirname is None:\r\n print(\"[+] Binary file version: %s\\n\" % (data['version']))\r\n print(\"[+] Name, Offset, Size\")\r\n for i in range(data['count']):\r\n print(\"[+] %s, %s, %s\" % (data['name'][i], offset[i], size[i]))\r\n return\r\n\r\n # Motorola RLE bootlogo, \"MotoRun\\x00\" Header, 8 bytes\r\n self.infile.seek(offset[0])\r\n if (self.intFromByte(8) != 0x006E75526F746F4D):\r\n print(\"[-] Invalid RLE image header\")\r\n return\r\n\r\n os.makedirs(dirname, exist_ok=True)\r\n for i in range(data['count']):\r\n print(\"[+] Processing %s\" % (data['name'][i]))\r\n \r\n self.infile.seek(offset[i] + 8)\r\n x = self.intFromByte() << 8\r\n x = x | self.uintFromByte()\r\n y = self.intFromByte() << 8\r\n y = y | self.uintFromByte()\r\n img = Image.new(\"RGB\", (x, y))\r\n xx = yy = 0\r\n while (yy < y):\r\n pixelcount = self.intFromByte() << 8\r\n pixelcount = pixelcount | self.uintFromByte()\r\n repeat = (pixelcount & 0x8000) == 0x8000\r\n pixelcount = pixelcount & 0x7FFF\r\n red = green = blue = 0\r\n if (repeat):\r\n blue = self.uintFromByte()\r\n green = self.uintFromByte()\r\n red = self.uintFromByte()\r\n while (pixelcount > 0):\r\n pixelcount = pixelcount - 1\r\n img.putpixel((xx, yy), (red, green, blue))\r\n xx = xx + 1\r\n if (xx != x): continue\r\n xx = 0\r\n yy = yy + 1\r\n if (yy == y): break\r\n else:\r\n while (pixelcount > 0):\r\n pixelcount = pixelcount - 1\r\n blue = self.uintFromByte()\r\n green = self.uintFromByte()\r\n red = self.uintFromByte()\r\n img.putpixel((xx, yy), (red, green, blue))\r\n xx = xx + 1\r\n if (xx != x): continue\r\n xx = 0\r\n yy = yy + 1\r\n if (yy == y): break\r\n img.save(\"%s/%s.png\" % (dirname, data['name'][i]), format=\"PNG\")\r\n\r\n self.infile.close()\r\n with open(dirname + \"/data.json\", 'w') as dfile:\r\n json.dump(data, dfile, indent=2)\r\n\r\n def encode(self, dirname, filename):\r\n print(\"[+] Input [%s] => Output [%s]\" % (dirname, filename))\r\n \r\n try:\r\n with open(dirname + \"/data.json\") as dfile:\r\n data = json.load(dfile)\r\n except Exception:\r\n print(\"[-] \" + dirname + \"/data.json inaccessible\")\r\n return\r\n\r\n print(\"[+] %s Images found\" % (data['count']))\r\n if data['count'] != len(data['name']):\r\n print(\"[-] Image list not equal to image count\")\r\n return\r\n\r\n stream = io.BytesIO()\r\n\r\n # Motorola bootlogo container, \"MotoLogo\\x00\" Header, 9 bytes\r\n stream.write(self.intToByte(0x6F676F4C6F746F4D, 9))\r\n\r\n stream.write(self.intToByte(0x0D + (data['count'] * 0x20), 4))\r\n for i in range(data['count']):\r\n stream.seek(0x0D + (i * 0x20))\r\n name = self.strToByte(data['name'][i])\r\n stream.write(name)\r\n stream.write(self.intToByte(0, 0x20 - len(name)))\r\n\r\n stream.write(self.intToByte(data['version'], 4))\r\n if data['version'] == -1:\r\n pass\r\n elif data['version'] == -2:\r\n print(\"[+] Device: %s\" % (data['device']))\r\n print(\"[+] Comment: %s\" % (data['text']))\r\n print(\"[+] Comment: %s\" % (data['comment']))\r\n print(\"[+] Resolution: %sx%s\" % (data['resx'], data['resy']))\r\n\r\n stream.write(self.intToByte(len(data['device']), 4))\r\n stream.write(self.strToByte(data['device']))\r\n stream.write(self.intToByte(len(data['text']), 4))\r\n stream.write(self.strToByte(data['text']))\r\n stream.write(self.intToByte(len(data['comment']), 4))\r\n stream.write(self.strToByte(data['comment']))\r\n stream.write(self.intToByte(data['resx'], 2))\r\n stream.write(self.intToByte(data['resy'], 2))\r\n else:\r\n print(\"[-] Unsupported binary file version, using common\")\r\n data['version'] = -1\r\n\r\n hashes, offset, size = ([] for i in range(3))\r\n for i in range(data['count']):\r\n while ((stream.tell() % 0x200) != 0):\r\n stream.write(self.uintToByte(0xFF))\r\n\r\n print(\"[+] Processing %s\" % (data['name'][i]))\r\n try:\r\n img = Image.open(\"%s/%s.png\" % (dirname, data['name'][i]))\r\n result = self.encodeImg(img)\r\n except Exception:\r\n print(\"[-] Image corrupt or inaccessible\")\r\n return\r\n\r\n tempoffset = stream.tell()\r\n tempsize = len(result)\r\n hash = hashlib.md5(result).hexdigest()\r\n\r\n if hash in hashes:\r\n index = hashes.index(hash)\r\n tempoffset = offset[index]\r\n tempsize = size[index]\r\n else:\r\n hashes.append(hash)\r\n offset.append(tempoffset)\r\n size.append(tempsize)\r\n stream.write(result)\r\n\r\n stream.seek(0x0D + 0x18 + (i * 0x20))\r\n stream.write(self.intToByte(tempoffset, 4))\r\n stream.write(self.intToByte(tempsize, 4))\r\n stream.seek(0, io.SEEK_END)\r\n\r\n with open(filename, \"wb\") as outfile:\r\n outfile.write(stream.getvalue())\r\n\r\n def encodeImg(self, img):\r\n data = io.BytesIO()\r\n\r\n # Motorola RLE bootlogo, \"MotoRun\\x00\" Header, 8 bytes\r\n data.write(self.intToByte(0x006E75526F746F4D, 8))\r\n\r\n data.write(self.intToByte(img.width >> 8))\r\n data.write(self.uintToByte(img.width & 0xFF))\r\n data.write(self.intToByte(img.height >> 8))\r\n data.write(self.uintToByte(img.height & 0xFF))\r\n \r\n for y in range(0, img.height):\r\n colors = []\r\n for x in range(0, img.width):\r\n colors.append(img.getpixel((x, y)))\r\n row = self.encodeRow(colors)\r\n data.write(row)\r\n return data.getvalue()\r\n\r\n def encodeRow(self, colors):\r\n data = io.BytesIO()\r\n i = 0\r\n count = len(colors)\r\n while (i < count):\r\n j = i\r\n while ((j < count) and (colors[i] == colors[j])): j = j + 1\r\n if ((j - i) > 1):\r\n data.write(self.uintToByte((0x80 | ((j - i) >> 8))))\r\n data.write(self.uintToByte(((j - i) & 0xFF)))\r\n data.write(self.uintToByte(colors[i][2]))\r\n data.write(self.uintToByte(colors[i][1]))\r\n data.write(self.uintToByte(colors[i][0]))\r\n i = j\r\n else:\r\n k = j\r\n while True:\r\n j = k - 1\r\n while ((k < count) and (colors[j] != colors[k])): j = j + 1; k = k + 1\r\n while ((k < count) and (colors[j] == colors[k])): k = k + 1\r\n if (k == count): break\r\n l = k\r\n while ((l < count) and (colors[k] == colors[l])): l = l + 1\r\n if not (((k - j) < 3) and ((l - k) < 2)): break\r\n if ((j - i) == 0):\r\n data.write(self.intToByte(0))\r\n data.write(self.intToByte(1))\r\n data.write(self.uintToByte(colors[count - 1][2]))\r\n data.write(self.uintToByte(colors[count - 1][1]))\r\n data.write(self.uintToByte(colors[count - 1][0]))\r\n break\r\n if (j == (count - 1)): j = j + 1\r\n data.write(self.uintToByte(((j - i) >> 8)))\r\n data.write(self.uintToByte(((j - i) & 0xFF)))\r\n k = 0\r\n while (k < (j - i)):\r\n data.write(self.uintToByte(colors[i + k][2]))\r\n data.write(self.uintToByte(colors[i + k][1]))\r\n data.write(self.uintToByte(colors[i + k][0]))\r\n k = k + 1\r\n i = j\r\n return data.getvalue()\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n list = '-l' in sys.argv\r\n parser.add_argument('-i', required=not list, dest='INPUT', help='FILE/DIR')\r\n parser.add_argument('-o', required=not list, dest='OUTPUT', help='DIR/FILE')\r\n parser.add_argument('-l', required=list, dest='FILE', help='List images, no decoding')\r\n args = parser.parse_args()\r\n\r\n mbl = MotoBootLogo(args.INPUT, args.OUTPUT, args.FILE)\r\n","sub_path":"moto-bootlogo.py","file_name":"moto-bootlogo.py","file_ext":"py","file_size_in_byte":11825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"418346976","text":"\r\nimport speech_recognition as sr\r\nimport smtplib\r\nimport email\r\nimport imaplib\r\nfrom gtts import gTTS\r\nimport pyglet\r\nimport os\r\nfrom imap_tools import MailBox, Q\r\nfrom playsound import playsound\r\nimport random\r\nimport sqlite3\r\n\r\nimport tkinter as tk\r\nfrom tkinter import Message ,Text\r\nimport time\r\nimport Start as s\r\n\r\nremail=\"\"\r\nsubject=\"\"\r\nbom=\"\"\r\nsender_email=\"\"\r\npassword=\"\"\r\nn=0\r\n\r\n# function to be called when button-2 of mouse is pressed \r\ndef pressed2(event): \r\n\tprint('Button-2 pressed at x = % d, y = % d'%(event.x, event.y)) \r\n\r\n# function to be called when button-3 of mouse is pressed \r\ndef pressed3(event):\r\n\tglobal n\r\n\tprint('Button-3 pressed at x = % d, y = % d'%(event.x, event.y)) \r\n\tn=1\r\n\t\r\n\theader = 'To:' + remail + '\\n' + 'From: ' + sender_email + '\\n' + 'Subject: ' + subject + '\\n'\r\n\tmsg = header + bom\r\n\t\r\n\tprint(\"header\",header)\r\n\tprint(\"msg\",msg)\r\n\t\t\r\n\tmail = smtplib.SMTP('smtp.gmail.com',587) #host and port area\r\n\tmail.ehlo() #Hostname to send for this command defaults to the FQDN of the local host.\r\n\tmail.starttls() #security connection\r\n\tmail.login(sender_email,password) #login part\r\n\tmail.sendmail(sender_email,remail,msg) #send part\r\n\tprint (\"Congrats! Your mail has send. \")\r\n\ttts = gTTS(text=\"Congrats! Your mail has send. \", lang='en')\r\n\tttsname=(\"send.mp3\") \r\n\ttts.save(ttsname)\r\n\tplaysound(ttsname)\r\n\tos.remove(ttsname)\r\n\tmail.close() \r\n\r\n\r\ndef display(uname):\r\n\tglobal remail,subject,bom,sender_email,password,n\r\n\tbgcolor=\"#ECFDB0\"\r\n\tfgcolor=\"black\"\r\n\twindow = tk.Tk()\r\n\twindow.title(\"Voice based Email System\")\r\n\twindow.geometry('1280x720')\r\n\twindow.configure(background=bgcolor)\r\n\t#window.attributes('-fullscreen', True)\r\n\r\n\twindow.grid_rowconfigure(0, weight=1)\r\n\twindow.grid_columnconfigure(0, weight=1)\r\n\twindow.bind('', pressed2) \r\n\twindow.bind('', pressed3) \r\n\r\n\t\t\r\n\tmessage1 = tk.Label(window, text=\"Voice based Email System\" ,bg=bgcolor ,fg=fgcolor ,width=50 ,height=3,font=('times', 30, 'italic bold underline')) \r\n\tmessage1.place(x=100, y=10)\r\n\r\n \r\n\t\r\n\tlbl = tk.Label(window, text=\"Receiver Email\",width=20 ,height=2 ,fg=fgcolor ,bg=bgcolor ,font=('times', 15, ' bold ') ) \r\n\tlbl.place(x=300, y=300)\r\n\t\r\n\ttxt = tk.Entry(window,width=20,bg=\"white\" ,fg=\"black\",font=('times', 15, ' bold '))\r\n\ttxt.place(x=600, y=315)\r\n\r\n\tlbl1 = tk.Label(window, text=\"Subject\",width=20 ,height=2 ,fg=fgcolor ,bg=bgcolor ,font=('times', 15, ' bold ') ) \r\n\tlbl1.place(x=300, y=400)\r\n\t\r\n\ttxt1 = tk.Entry(window,width=20,bg=\"white\" ,fg=\"black\",font=('times', 15, ' bold '))\r\n\ttxt1.place(x=600, y=415)\r\n\t\r\n\r\n\tlbl2 = tk.Label(window, text=\"Message\",width=20 ,height=2 ,fg=fgcolor ,bg=bgcolor ,font=('times', 15, ' bold ') ) \r\n\tlbl2.place(x=300, y=500)\r\n\t\r\n\ttxt2 = tk.Entry(window,width=20,bg=\"white\" ,fg=\"black\",font=('times', 15, ' bold '))\r\n\ttxt2.place(x=600, y=515)\r\n\t\r\n\tprint(\"remail in display\",remail)\r\n\r\n\ttxt.insert('end',remail)\r\n\ttxt1.insert('end',subject)\r\n\ttxt2.insert('end',bom)\r\n\r\n\twindow.update()\r\n\t\r\n\twhile True:\r\n\t\ttime.sleep( 1 )\r\n\t\tremail=txt.get()\r\n\t\tsubject=txt1.get()\r\n\t\tbom=txt2.get()\r\n\t\tprint(remail)\r\n\t\tprint(subject)\r\n\t\tprint(bom)\r\n\r\n\t\tprint(n)\r\n\t\tif n==1:\r\n\t\t\twindow.destroy()\r\n\t\t\ts.process(uname)\r\n\t\t\tbreak\r\n\t\twindow.update()\r\n\r\n\t#window.after(30000, window.destroy)\r\n\twindow.mainloop()\r\n\r\n\r\ndef process1(sym):\r\n\tglobal remail,subject,bom,sender_email,password\r\n\tconn= sqlite3.connect(\"Email\")\r\n\tcmd=\"SELECT email,mpassword FROM login WHERE username='\"+str(sym)+\"'\"\r\n\tprint(cmd)\r\n\tcursor=conn.execute(cmd)\r\n\tsender_email=\"\"\r\n\tpassword=\"\"\r\n\r\n\tfor row in cursor.fetchall():\r\n\t\tprint(row[0])\r\n\t\tsender_email=row[0]\r\n\t\tpassword=row[1]\r\n\tflag=0\r\n\r\n\twhile(flag==0):\r\n\t\t\r\n\t\treceiver_email=\"mindsoftblore@gmail.com\"\r\n\t\t\r\n\t\t\r\n\t\tr = sr.Recognizer()\r\n\t\tm = sr.Microphone()\r\n\t\t#set threhold level\r\n\t\twith m as source: r.adjust_for_ambient_noise(source)#recognize\r\n\r\n\t\ttts = gTTS(text=\"Enter Receiver Email. \", lang='en')\r\n\t\tttsname=(\"sub1.mp3\") \r\n\t\ttts.save(ttsname)\r\n\t\tplaysound(ttsname)\r\n\t\tos.remove(ttsname)\r\n\r\n\t\treceiver_email1=\"\"\r\n\t\twhile(receiver_email1==\"\"):\r\n\t\t\twith sr.Microphone() as source:\r\n\t\t\t\tprint (\"Your Receiver Email :\")\r\n\t\t\t\taudio=r.listen(source)\r\n\t\t\t\tprint (\"ok done!!\")\r\n\t\t\ttry:\r\n\t\t\t\treceiver_email1=r.recognize_google(audio)\r\n\t\t\t\tprint (\"You Receiver Email : \"+receiver_email1)\r\n\t\t\texcept sr.UnknownValueError:\r\n\t\t\t\tprint(\"Google Speech Recognition could not understand audio.\")\r\n\t\t\texcept sr.RequestError as e:\r\n\t\t\t\tprint(\"Could not request results from Google Speech Recognition service; {0}\".format(e)) \r\n\r\n\t\t\tif receiver_email1==\"\":\r\n\t\t\t\ttts = gTTS(text=\"Error in Receiver Email.Please Give Input Again \", lang='en')\r\n\t\t\t\tran=random.randint(0,999)\r\n\t\t\t\tttsname=(\"err\"+str(ran)+\".mp3\") \r\n\t\t\t\ttts.save(ttsname)\r\n\t\t\t\tplaysound(ttsname)\r\n\t\t\t\tos.remove(ttsname)\r\n\t\t\t\t\r\n\t\twords=receiver_email1.split()\r\n\t\tmodified_mail=str()\r\n\t\tfor word in words:\r\n\t\t\tif word == 'underscore':\r\n\t\t\t\tmodified_mail=modified_mail+'_'\r\n\t\t\telif word == 'dot':\r\n\t\t\t\tmodified_mail=modified_mail+'.'\r\n\t\t\telif word == 'at':\r\n\t\t\t\tmodified_mail=modified_mail+'@'\r\n\t\t\telse:\r\n\t\t\t\tmodified_mail=modified_mail+word\r\n\r\n\t\treceiver_email=modified_mail\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\tsub=\"\"\r\n\t\tmsg=\"\"\r\n\t\t\r\n\t\twhile(sub==\"\"):\r\n\t\t\tprint(receiver_email)\r\n\t\t\ttts = gTTS(text=\"Your Subject. \", lang='en')\r\n\t\t\tttsname=(\"sub.mp3\") \r\n\t\t\ttts.save(ttsname)\r\n\t\t\tplaysound(ttsname)\r\n\t\t\tos.remove(ttsname)\r\n\r\n\r\n\t\t\twith sr.Microphone() as source:\r\n\t\t\t\tprint (\"Your Subject :\")\r\n\t\t\t\taudio=r.listen(source)\r\n\t\t\t\tprint (\"ok done!!\")\r\n\t\t\ttry:\r\n\t\t\t\tsub1=r.recognize_google(audio)\r\n\t\t\t\tprint (\"You Subject : \"+sub1)\r\n\t\t\t\tsub = sub1\r\n\t\t\texcept sr.UnknownValueError:\r\n\t\t\t\tprint(\"Google Speech Recognition could not understand audio.\")\r\n\t\t\texcept sr.RequestError as e:\r\n\t\t\t\tprint(\"Could not request results from Google Speech Recognition service; {0}\".format(e)) \r\n\r\n\t\t\tif sub==\"\":\r\n\t\t\t\ttts = gTTS(text=\"Error in Subject.Please Give Input Again \", lang='en')\r\n\t\t\t\tran=random.randint(0,999)\r\n\t\t\t\tttsname=(\"err\"+str(ran)+\".mp3\") \r\n\t\t\t\ttts.save(ttsname)\r\n\t\t\t\tplaysound(ttsname)\r\n\t\t\t\tos.remove(ttsname)\r\n\r\n\t\twhile(msg==\"\"):\r\n\t\t\ttts = gTTS(text=\"Body of the Message. \", lang='en')\r\n\t\t\tran=random.randint(0,999)\r\n\t\t\tttsname=(\"sub\"+str(ran)+\".mp3\") \r\n\t\t\ttts.save(ttsname)\r\n\t\t\tplaysound(ttsname)\r\n\t\t\tos.remove(ttsname)\r\n\r\n\r\n\t\t\twith sr.Microphone() as source:\r\n\t\t\t\tprint (\"Body of the Message :\")\r\n\t\t\t\taudio=r.listen(source)\r\n\t\t\t\tprint (\"ok done!!\")\r\n\t\t\ttry:\r\n\t\t\t\ttext1=r.recognize_google(audio)\r\n\t\t\t\tprint (\"You said : \"+text1)\r\n\t\t\t\tmsg = text1\r\n\t\t\texcept sr.UnknownValueError:\r\n\t\t\t\tprint(\"Google Speech Recognition could not understand audio.\")\r\n\t\t\texcept sr.RequestError as e:\r\n\t\t\t\tprint(\"Could not request results from Google Speech Recognition service; {0}\".format(e)) \r\n\t\t\t\r\n\t\t\tif msg==\"\":\r\n\t\t\t\ttts = gTTS(text=\"Error in Body of Message.Please Give Input Again \", lang='en')\r\n\t\t\t\tran=random.randint(0,999)\r\n\t\t\t\tttsname=(\"err\"+str(ran)+\".mp3\") \r\n\t\t\t\ttts.save(ttsname)\r\n\t\t\t\tplaysound(ttsname)\r\n\t\t\t\tos.remove(ttsname)\r\n\r\n\t\ttts = gTTS(text=\"Please Check Your Corresponding Inputs\", lang='en')\r\n\t\tttsname=(\"feed.mp3\") \r\n\t\ttts.save(ttsname)\r\n\t\tplaysound(ttsname)\r\n\t\tos.remove(ttsname)\r\n\r\n\t\ttts = gTTS(text=\"Receiver's Email ID is \"+receiver_email, lang='en')\r\n\t\tttsname=(\"feed1.mp3\") \r\n\t\ttts.save(ttsname)\r\n\t\tplaysound(ttsname)\r\n\t\tos.remove(ttsname)\r\n\r\n\t\ttts = gTTS(text=\"Your Subject is \"+sub, lang='en')\r\n\t\tttsname=(\"feed2.mp3\") \r\n\t\ttts.save(ttsname)\r\n\t\tplaysound(ttsname)\r\n\t\tos.remove(ttsname)\r\n\t\t\r\n\t\ttts = gTTS(text=\"Body of the Message is \"+msg, lang='en')\r\n\t\tran=random.randint(0,999)\r\n\t\tttsname=(\"sub\"+str(ran)+\".mp3\") \r\n\t\ttts.save(ttsname)\r\n\t\tplaysound(ttsname)\r\n\t\tos.remove(ttsname)\r\n\r\n\t\tchoice = \"\"\r\n\t\twhile(choice==\"\"):\r\n\t\t\ttts = gTTS(text=\"Are the Corresponding Inputs correct?\", lang='en')\r\n\t\t\tttsname=(\"feed3.mp3\") \r\n\t\t\ttts.save(ttsname)\r\n\t\t\tplaysound(ttsname)\r\n\t\t\tos.remove(ttsname)\r\n\r\n\r\n\t\t\twith sr.Microphone() as source:\r\n\t\t\t\tprint (\"Your Choice :\")\r\n\t\t\t\taudio=r.listen(source)\r\n\t\t\t\tprint (\"ok done!!\")\r\n\t\t\ttry:\r\n\t\t\t\tchoice = r.recognize_google(audio)\r\n\t\t\t\tprint (\"You Choice : \"+choice)\r\n\t\t\texcept sr.UnknownValueError:\r\n\t\t\t\tprint(\"Google Speech Recognition could not understand audio.\")\r\n\t\t\texcept sr.RequestError as e:\r\n\t\t\t\tprint(\"Could not request results from Google Speech Recognition service; {0}\".format(e)) \r\n\r\n\t\t\tif choice==\"\":\r\n\t\t\t\ttts = gTTS(text=\"Error in Message.Please Give Input Again \", lang='en')\r\n\t\t\t\tran=random.randint(0,999)\r\n\t\t\t\tttsname=(\"err\"+str(ran)+\".mp3\") \r\n\t\t\t\ttts.save(ttsname)\r\n\t\t\t\tplaysound(ttsname)\r\n\t\t\t\tos.remove(ttsname)\r\n\r\n\t\tif choice == 'yes' or choice == 'Yes' or choice == 'YES':\r\n\t\t\tflag=1\r\n\t\tif choice == 'no' or choice == 'No' or choice == 'NO':\r\n\t\t\tflag=0\r\n\r\n\r\n\treturn receiver_email,sub,msg\r\n\t\t\r\n \r\n\r\ndef process(sym):\r\n\tglobal remail,subject,bom,sender_email,password\r\n\tremail,subject,bom=process1(sym)\r\n\tprint(\"remail in process\",remail)\r\n\r\n\tdisplay(sym)\r\n","sub_path":"sendMail.py","file_name":"sendMail.py","file_ext":"py","file_size_in_byte":8852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"544591476","text":"from tkinter import * \nfrom tkinter import filedialog\nfrom tkinter import messagebox\nfrom tkinter import ttk\nfrom Logica import *\nfrom controladores import *\n# ------>raiz o vemtana\nclass Raiz:\n def __init__(self,modelo):\n self._tk = Tk()\n self._tk.title(\"Ventana\")\n self._tk.resizable(0, 0)\n self.ventanainicial = None \n #Ventanainicial(self._tk)\n self.ventanaIngresar=None\n self.ventanaResultado=None\n self.ventanaOperaciones =None\n self.modelo =modelo\n #self._tk.mainloop()\n \n def Get_VentanaInicio(self):\n if self.ventanainicial == None: \n self.ventanainicial = Ventanainicial(self._tk,self.modelo)\n return self.ventanainicial\n \n def Get_VentanaIngresar(self):\n if self.ventanaIngresar == None:\n self.ventanaIngresar = VentanaIngresarDatos(self._tk,self.modelo)\n return self.ventanaIngresar\n\n def Set_VentanaOperaciones(self,valor):\n self.ventanaOperaciones=valor\n\n def Get_VentanaOperaciones(self):\n if self.ventanaOperaciones == None:\n self.ventanaOperaciones = VentanaOperaciones(self._tk,self.modelo)\n \n return self.ventanaOperaciones\n \n def Set_VentanaResultado(self,valor):\n self.ventanaResultado=valor\n\n def Get_VentanaResultado(self):\n if self.ventanaResultado == None:\n self.ventanaResultado = VentanaResultados(self._tk,self.modelo)\n return self.ventanaResultado\n\nclass Visible:\n def __init__(self):\n self.visible=False\n\n def Set_visible(self,valor):\n self.visible=valor\n\n def Isvisibe(self):\n return self.visible \n\n# ----> Panel1 o panel inicial\nclass Ventanainicial(Frame,Visible):\n def __init__(self, master,modelo=None):\n super().__init__(master)\n self.config(bg=\"#06D14A\")\n self.modelo=modelo\n self.controlador = controladorVentanaInicio(self)\n self.master = master\n self.componentes()\n\n def componentes(self):\n self.titulo= Label(self,text=\"Bienvenidos\",font=(\"Comic Sans Ms\", 24),bg=\"#06D14A\",fg=\"white\")\n self.titulo.grid(row=0,column=0,padx=60,pady=5,columnspan=2)\n self._boton = Button(self, text=\"Ingersar datos\" + \"\\nmanualmente \", font=(\"Comic Sans Ms\", 8), relief=\"raised\", bd=7, bg=\"#2486FB\", command=self.controlador.OnclikCambiar)\n self._boton.grid(row=1, column=0, padx=60, pady=45)\n self._boton2 = Button(self, text=\"Gargar archivo\" + \"\\n excel\", font=(\"Comic Sans Ms\", 8),relief=\"raised\", bd=7, bg=\"#2486FB\", command=self.controlador.onclickCargarexcel)\n self._boton2.grid(row=1, column=1, padx=60, pady=45)\n self.pack()\n\n def Get_Modelo(self):\n return self.modelo\n# ---------------------------------------\nclass VentanaIngresarDatos (Frame,Visible):\n def __init__(self, master,modelo=None):\n super().__init__(master)\n master.geometry(\"400x470\")\n master.config(bg=\"#33D9FF\")\n self.ventana=PanelDatos(master,self)\n self.master =master\n self.ventanainicio=0\n self.modelo = modelo\n self.config(bg=\"#33D9FF\")\n self.controlador = controladoringresar(self)\n self.activo=True\n self.activoVariable1=True\n self.activoVariable2=True\n self.valor1=0\n self.valor2=0\n self.Componentes()\n\n def Componentes(self):\n self.titulo= Label(self,text=\"INGRESE LOS DATOS\",font=(\"Comic Sans Ms\",12),bg=\"#33D9FF\")\n self.titulo.grid(row=0,column=0,columnspan=3,padx=20,pady=16)\n self.Valorvariable1 = Entry(self, width=8)\n self.Valorvariable1.grid(row=1, column=0, padx=35, pady=1)\n self.Valorvariable2 = Entry(self, width=8)\n self.Valorvariable2.grid(row=1, column=2, padx=35, pady=1)\n self.boton = Button(self,text=\"agregar\",font=(\"Comic Sans Ms\", 8),command=self.controlador.OnclickObtener)\n self.boton.grid(row=1, column=1, padx=42, pady=1)\n self.botonborrar1= Button(self,text=\"Borrar\"+\"\\nVariable 1\",font=(\"Comic Sans Ms\", 8),state=\"disabled\",command=self.controlador.OnclickBorrarvariable1)\n self.botonborrar1.grid(row=2,column=0,padx=35,pady=5)\n self.botonborrar2= Button(self,text=\"Borrar\"+\"\\nVariable 2\",font=(\"Comic Sans Ms\", 8),state=\"disabled\",command=self.controlador.OnclickBorrarvariable2)\n self.botonborrar2.grid(row=2,column=2,padx=35,pady=5)\n self.botonAtras =Button(self.master,text=\"Atras\",font=(\"Comic Sans Ms\", 12),command=self.controlador.OnclickAtras)\n self.botonAtras.place(x=171,y=420)\n self.pack(side=\"left\", anchor=\"n\", fill=\"x\")\n \n def Is_activoboton1(self,opcion):\n if opcion==0:\n self.Valorvariable1.config(state=\"readonly\")\n self.activoVariable1=False\n elif opcion==1:\n self.Valorvariable1.config(state=\"normal\")\n self.activoVariable1=True \n \n def Is_activoboton2(self,opcion):\n if opcion==0:\n self.Valorvariable2.config(state=\"readonly\")\n self.activoVariable2=False\n elif opcion==1:\n self.Valorvariable2.config(state=\"normal\")\n self.activoVariable2=True \n \n def Organizar(self):\n self.pack()\n self.ventana.place(x=94, y=140)\n self.botonAtras.place(x=171,y=430) \n self.master.geometry(\"400x470\")\n self.master.config(bg=\"#33D9FF\")\n\n def Get_Modelo(self):\n return self.modelo\n\n def Ocultar(self):\n self.ventana.place_forget()\n self.pack_forget()\n self.botonAtras.place_forget()\n# -------------------------------\nclass VentanaOperaciones(Frame,Visible):\n\n def __init__(self, master,modelo=None):\n super().__init__(master,width=500,height=450)\n self.datos1=[]\n self.datos2=[]\n self.modelo=modelo \n self.posicion=0\n self.controlador = controladoroperaciones(self)\n master.geometry(\"550x450\")\n master.config(bg=\"#9AFF10\")\n self.config(bg=\"#9AFF10\")\n self.comboactivo=False\n self.textactivo=False\n \n def Componentes(self):\n self.label=Label(self,text=\" Datos Ingresados\",font=(\"Comic Sans Ms\",12),bg=\"#9AFF10\")\n self.label.grid(row=0,column=0,columnspan=2,pady=20,padx=40)\n self.paneldatos= PanelMostrarDatos(self,self.datos1,self.datos2)\n self.botonWilcoxon= Button(self,text=\"Suma de rangos\"+\"\\nde Wilcoxon\",font=(\"Comic Sans Ms\",8),command=self.OnclickWilcoxon)\n self.botonMannw=Button(self,text=\"Prueba U de \"+\"\\nMann–Whitney\",font=(\"Comic Sans Ms\",8),command=self.OnclickMann_Whitney) \n self.botonatras= Button(self,text=\"Atras\")\n if len(self.datos1)<=len(self.datos2):\n self.maximo = len(self.datos2)\n self.botonWilcoxon.grid(row=len(self.datos2)+1,column=0,padx=40,pady=15)\n self.botonMannw.grid(row=len(self.datos2)+1,column=1,padx=40,pady=15)\n else:\n self.maximo=len(self.datos1)\n self.botonWilcoxon.grid(row=len(self.datos1)+1,column=0,padx=40,pady=15)\n self.botonMannw.grid(row=len(self.datos1)+1,column=1,padx=40,pady=15)\n self.botonatras.grid(row=self.maximo+1,column=0,columnspan=2,pady=15)\n self.pack(fill=\"y\", expand=1) \n\n def OnclickWilcoxon(self):\n if self.textactivo==True:\n if self.posicion==0:\n self.labelcombo2.grid_forget() \n self.entrada_alpha.grid_forget()\n self.botoncalcular.grid_forget()\n else:\n self.labelcombo2.place_forget() \n self.entrada_alpha.place_forget()\n self.botoncalcular.place_forget()\n self.Is_activoTextField(0) \n self.comboopciones = ttk.Combobox(self,state=\"readonly\",width=15)\n self.Opciones()\n self.Is_activoCombo(1)\n self.bototonopcion.config(command=self.OnclikObteneropcionW)\n \n def Opciones(self):\n self.labelcombo= Label(self,text=\"Hipotesis alternativa\",font=(\"Comic Sans Ms\",8))\n self.labelcombo2= Label(self, font=(\"Comic Sans Ms\",8))\n self.comboboxhipoalter = ttk.Combobox(self,state=\"readonly\",width=20)\n self.comboboxhipoalter[\"values\"]=[\"seleccione una opcion\",\"μ1<μ2\",\"μ1>μ2\",\"μ1≠μ2\"]\n self.comboboxhipoalter.current(0)\n self.bototonopcion = Button(self,text=\"Obtener opcion \",font=(\"Comic Sans Ms\",8))\n self.botoncalcular=Button(self,text=\"calcular\",command=self.controlador.calcular)\n if (self.maximo)<=5:\n self.labelcombo.grid(row=self.maximo+2,column=0,sticky=\"n\",pady=5)\n self.comboboxhipoalter.grid(row=self.maximo+3,column=0,padx=10)\n self.bototonopcion.grid(row=self.maximo+3,column=1,pady=5)\n else:\n self.paneldatos.grid_configure(columnspan=1)\n self.label.grid_configure(columnspan=3,column=0)\n self.botonWilcoxon.grid(columnspan=2)\n self.botonMannw.grid(columnspan=2)\n self.botonatras.grid(columnspan=1)\n self.bototonopcion.grid(row=1,column=2)\n self.comboboxhipoalter.grid(row=1,column=1,padx=10)\n if self.maximo <=7:\n self.labelcombo.grid(row=1,column=1,sticky=\"n\")\n else:\n ejey=(self.maximo//2)*30+10\n ejex=(195+10)\n self.labelcombo.place(x=ejex,y=ejey) \n self.posicion=1\n\n def OnclikObteneropcionW(self):\n if self.comboboxhipoalter.get() != \"seleccione una opcion\":\n if self.comboboxhipoalter.get()==\"μ1<μ2\" or self.comboboxhipoalter.get()== \"μ1>μ2\":\n self.comboopciones[\"values\"]=[\" \",\"0.001\",\"0.01\",\"0.025\",\"0.05\"]\n self.labelcombo2.config(text=\"Valor de alpha para una cola \")\n elif self.comboboxhipoalter.get() == \"μ1≠μ2\":\n self.comboopciones[\"values\"]=[\" \",\"0.002\",\"0.02\",\"0.05\",\"0.1\"] \n self.labelcombo2.config(text=\"Valor de alpha para dos colas \") \n if self.posicion==0:\n self.labelcombo2.grid(row=self.maximo+4,column=0)\n self.comboopciones.grid(row=self.maximo+5,column=0) \n self.botoncalcular.grid(row=self.maximo+5,column=1)\n elif self.posicion ==1:\n ejey=(self.maximo//2)*35+90\n self.labelcombo2.place(x=(180+10),y=ejey)\n self.comboopciones.place(x=(195+10),y=(ejey+30))\n self.botoncalcular.place(x=((195+10)+150),y=(ejey+30))\n self.comboopciones.current(0) \n\n def OnclickMann_Whitney(self):\n if self.comboactivo==True:\n if self.posicion==0:\n self.botoncalcular.grid_forget()\n self.comboopciones.grid_forget()\n self.labelcombo2.grid_forget()\n else:\n self.labelcombo2.place_forget()\n self.comboopciones.place_forget()\n self.botoncalcular.place_forget()\n self.Is_activoCombo(0) \n self.entrada_alpha = Entry(self,width=15)\n self.Opciones()\n self.Is_activoTextField(1)\n self.bototonopcion.config(command=self.OnclickObtenerMann)\n self.botoncalcular.config(command=self.controlador.calcularMann)\n\n def OnclickObtenerMann(self):\n if self.comboboxhipoalter.get() != \"seleccione una opcion\":\n if self.comboboxhipoalter.get()==\"μ1<μ2\" or self.comboboxhipoalter.get()== \"μ1>μ2\":\n self.labelcombo2.config(text=\"Valor de alpha para una cola \")\n elif self.comboboxhipoalter.get() == \"μ1≠μ2\":\n self.labelcombo2.config(text=\"Valor de alpha para dos colas \") \n if self.posicion==0:\n self.labelcombo2.grid(row=self.maximo+4,column=0)\n self.entrada_alpha.grid(row=self.maximo+5,column=0) \n self.botoncalcular.grid(row=self.maximo+5,column=1) \n elif self.posicion==1: \n ejey=(self.maximo//2)*35+90\n self.labelcombo2.place(x=(180+10),y=ejey)\n self.entrada_alpha.place(x=(195+10),y=(ejey+30))\n self.botoncalcular.place(x=((195+10)+150),y=(ejey+30)) \n \n def Is_activoCombo(self,opcion):\n if opcion==0:\n self.comboactivo=False\n elif opcion==1:\n self.comboactivo=True\n\n def Is_activoTextField(self,opcion):\n if opcion==0:\n self.textactivo=False\n elif opcion ==1 :\n self.textactivo=True \n\n def Get_botonatras(self):\n return self.botonatras \n\n def Get_Modelo(self):\n return self.modelo\n\n def Set_Datos1(self,lista):\n self.datos1=lista\n\n def Set_Datos2(self,lista):\n self.datos2=lista\n\n def Get_Datos1(self):\n return self.datos1\n\n def Get_Datos2(self):\n return self.datos2 \n\n def Get_combohipoalter(self):\n return self.comboboxhipoalter\n\n def Get_comboopcione(self):\n return self.comboopciones\n\n def Get_JtextDato(self):\n return self.entrada_alpha\n#------------------------------------------- \nclass VentanaResultados(Frame,Visible):\n def __init__(self, master,modelo=None):\n super().__init__(master)\n self.modelo=modelo\n self.opcion=0\n self.uaux=None\n self.controlador = controladorResultado(self)\n self.listarespuesta=[]\n self.config(bg=\"#9AFF10\")\n self.boton_atras =Button(self,text=\"Atras\")\n \n def Componentes(self,resultado):\n self.Set_listarespuesta(resultado)\n self.labeHipotesis=Label(self,text=\"HIPOTESIS NULA: μ1=μ2\",font=(\"Comic Sans Ms\",10),bg=\"#9AFF10\")\n self.hipotesistalternativa = Label(self,text=\"HIPOTESIS ALTERNATIVA: \"+resultado[1],font=(\"Comic Sans Ms\",10),bg=\"#9AFF10\")\n self.uaux= Label(self,text=\"Valor de \"+(resultado[0])[1]+\" : \"+str((resultado[0])[0]),font=(\"Comic Sans Ms\",10),bg=\"#9AFF10\")\n self.uaux.grid(row=5,column=0,pady=10) \n self.w1=Label(self,text=\"Valor de W1 \"+str(resultado[2]),font=(\"Comic Sans Ms\",10),bg=\"#9AFF10\")\n self.w2=Label(self,text=\"Valor de W2 \"+str(resultado[3]),font=(\"Comic Sans Ms\",10),bg=\"#9AFF10\")\n self.u = Label(self,text=\"Valor de U (tabla) \"+str(resultado[4]),font=(\"Comic Sans Ms\",10),bg=\"#9AFF10\")\n self.analisis= Label(self,text=\"Al comparar el valor \"+(resultado[0])[1]+\" con el valor de la tabla para\"+\"\\nevaluar la Hipotesis nula podemos ver que: \"+\"\\n\"+str((resultado[0])[0])+\" ≤ \"+str(resultado[4]),font=(\"Comic Sans Ms\",10),bg=\"#9AFF10\")\n self.respuesta=Label(self,text=\"Por lo tanto se puede concluir que \"+\"\\n\"+str(resultado[5]),font=(\"Comic Sans Ms\",10),bg=\"#9AFF10\")\n self.labeHipotesis.grid(row=0,column=0,pady=10)\n self.hipotesistalternativa.grid(row=1,column=0,pady=10)\n self.w1.grid(row=2,column=0,pady=10)\n self.w2.grid(row=3,column=0,pady=10)\n self.u.grid(row=4,column=0,pady=10) \n self.analisis.grid(row=6,column=0,pady=10)\n self.respuesta.grid(row=7,column=0,pady=10)\n self.boton_atras.grid(row=8,column=0)\n self.boton_atras.config(command=self.controlador.OnclickAtras)\n self.pack(side=TOP) \n \n def ComponentesMann(self,resultado):\n self.Set_listarespuesta(resultado)\n if len(resultado)==7:\n self.hipo_nula=Label(self,text=\"HIPOTESIS NULA: μ1=μ2\",font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\") \n self.hipo_alter=Label(self,text=\"HIPOTESIS ALTERNATIVA: \"+resultado[len(resultado)-1],font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.labelu=Label(self,text=\"El valor de U es \"+str(resultado[0]),font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.labelu0=Label(self,text=\"El valor de U0 (valor de la tabla) es \"+str(resultado[2]),font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.labelw1=Label(self,text=\"Valor de W1 es: \"+str(resultado[1]),font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.label_alpha=Label(self,text=\"Valor de alpha ingresado: \"+str(resultado[4]),font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.label_alphaprox=Label(self,text=\"Valor aproximado de alpha (valor de tabla): \"+str(resultado[3]),font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.analisis = Label(self,text=\"Al comparar el valor de U con U0 para evaluar la Hipotesis Nula se obtiene que\"+\"\\n\"+str(resultado[0])+\"≤\"+str(resultado[2]),font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.solucion = Label(self,text=\"Por lo tanto se puede concluir que\"+\"\\n\"+resultado[len(resultado)-2],font=(\"Comic Sans Ms\",9,),bg=\"#9AFF10\") \n else:\n if resultado[len(resultado)-1]==\"μ1<μ2\":\n self.hipo_nula=Label(self,text=\"HIPOTESIS NULA: μ1=μ2\",bg=\"#9AFF10\",font=(\"Comic Sans Ms\",9)) \n self.hipo_alter=Label(self,text=\"HIPOTESIS ALTERNATIVA: \"+resultado[len(resultado)-1],font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.labelu=Label(self,text=\"El valor de U es \"+str(resultado[0]),font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.labelu0=Label(self,text=\"El valor de U0 (valor de la tabla) es \"+str(resultado[2]),font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.labelw1=Label(self,text=\"Valor de W1 es: \"+str(resultado[1]),font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.label_alpha=Label(self,text=\"Valor de alpha ingresado: \"+str(resultado[5]),font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.label_alphaprox=Label(self,text=\"Valor aproximado de alpha (valor de tabla): \"+str(resultado[4]),font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.analisis = Label(self,text=\"Al comparar el valor de U con (N1*N2)-U0 para evaluar la Hipotesis Nula se obtiene que \"+\"\\n\"+str(resultado[0])+\" ≥ \"+str(resultado[3]),font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.solucion = Label(self,text=\"Por lo tanto se puede concluir que\"+\"\\n\"+resultado[len(resultado)-2],font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\") \n else:\n self.hipo_nula=Label(self,text=\"HIPOTESIS NULA: μ1=μ2\",font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\") \n self.hipo_alter=Label(self,text=\"HIPOTESIS ALTERNATIVA: \"+resultado[len(resultado)-1],font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.labelu=Label(self,text=\"El valor de U es \"+str(resultado[0]),font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.labelu0=Label(self,text=\"El valor de U0 (valor de la tabla) es \"+str(resultado[2]),font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.labelw1=Label(self,text=\"Valor de W1 es: \"+str(resultado[1]),font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.label_alpha=Label(self,text=\"Valor de alpha ingresado: \"+str(resultado[5]),font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.label_alphaprox=Label(self,text=\"Valor aproximado de alpha (valor de tabla): \"+str(resultado[4]),font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.analisis = Label(self,text=\"Al comparar el valor de U con U0 y (N1*N2)-U0 para evaluar la Hipotesis Nula se obtiene que \"+\"\\n\"+str(resultado[0])+\" ≥ \"+str(resultado[3])+\" o \"+str(resultado[0])+\"≤\"+str(resultado[2]),font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\")\n self.solucion = Label(self,text=\"Por lo tanto se puede concluir que\"+\"\\n\"+resultado[len(resultado)-2],font=(\"Comic Sans Ms\",9),bg=\"#9AFF10\") \n self.hipo_nula.grid(row=0,column=0,pady=8)\n self.hipo_alter.grid(row=1,column=0,pady=8)\n self.labelu.grid(row=2,column=0,pady=8)\n self.labelu0.grid(row=3,column=0,pady=8)\n self.labelw1.grid(row=4,column=0,pady=8)\n self.label_alpha.grid(row=5,column=0,pady=8)\n self.label_alphaprox.grid(row=6,column=0,pady=8)\n self.analisis.grid(row=7,column=0,pady=8) \n self.solucion.grid(row=8,column=0,pady=8)\n self.boton_atras.grid(row=9,column=0)\n self.boton_atras.config(command=self.controlador.OnclickAtras1) \n self.pack(side=TOP) \n\n def Get_Modelo(self):\n return self.modelo\n\n def Set_listarespuesta(self,valor):\n self.listarespuesta=valor\n\n def Get_listarespuesta(self):\n return self.listarespuesta\n\nclass PanelDatos(PanedWindow):\n def __init__(self, master,ventana):\n super().__init__(master)\n self.ventana=ventana\n self.config(bg=\"#86E2A3\")\n self.agregar = True\n self.opcion =(0,0)\n self.Contenido()\n\n def Contenido(self):\n self.Lista1=[]\n self.Lista2=[]\n self.aux1=0\n self.aux2=0\n for row in range(1):\n for column in range(2):\n label = Label(self, text=\"Variable : \" + str(column+1),bg=\"#86E2A3\",fg=\"white\")\n label.config(font=('Arial', 14))\n label.grid(row=row, column=column, sticky=\"nsew\",padx=1, pady=1)\n self.grid_columnconfigure(column, weight=1)\n self.place(x=94, y=140)\n \n def ConstruirTabla(self,valor1, valor2):\n if self.verificar():\n self.Agregar(1,valor1)\n self.Agregar(2,valor2)\n self.botonenviar= Button(self,text=\"Enviar datos\",command=self.OnclickEnviarDatos)\n if valor1 <= valor2:\n self.botonenviar.grid(row=valor2+1,column=0,columnspan=2,pady=2)\n else:\n self.botonenviar.grid(row=valor1+1,column=0,columnspan=2,pady=2) \n else:\n if valor2==0:\n if self.Get_Agregar() :\n self.Agregar(1,valor1)\n else:\n self.Borar(1,valor1) \n elif valor1==0:\n if self.Get_Agregar() :\n self.Agregar(2,valor2)\n else:\n self.Borar(2,valor2) \n elif valor1 !=0 and valor2!=0:\n if self.Get_Opcion()==(1,1):\n self.Agregar(1,valor1)\n self.Agregar(2,valor2)\n elif self.Get_Opcion()==(0,0):\n self.Borar(1,valor1) \n self.Borar(2,valor2) \n elif self.Get_Opcion()==(0,1):\n self.Borar(1,valor1)\n self.Agregar(2,valor2)\n elif self.Get_Opcion()==(1,0):\n self.Agregar(1,valor1)\n self.Borar(2,valor2)\n if len(self.Lista1)<= len(self.Lista2):\n self.botonenviar.grid(row=len(self.Lista2)+1,column=0,columnspan=2,pady=2)\n else:\n self.botonenviar.grid(row=len(self.Lista1)+1,column=0,columnspan=2,pady=2)\n \n def Agregar(self,opcion,cantidad):\n if opcion ==1:\n tamaño = cantidad-len(self.Lista1)\n for i in range(tamaño):\n entrada = Entry(self,width=15)\n entrada.grid(row=(len(self.Lista1)+1), column=0, padx=1, pady=1)\n self.Lista1.append(entrada)\n elif opcion ==2: \n tamaño = cantidad-len(self.Lista2)\n for i in range(tamaño):\n entrada = Entry(self,width=15)\n entrada.grid(row=(len(self.Lista2)+1), column=1, padx=1, pady=1)\n self.Lista2.append(entrada)\n\n def Borar(self,opcion,cantidad):\n if opcion == 1:\n inicio=len(self.Lista1)-1\n for i in range(inicio,cantidad-1,-1):\n aux=self.Lista1.pop(i)\n aux.grid_forget()\n elif opcion ==2: \n inicio = len(self.Lista2)-1 \n for i in range(inicio,cantidad-1,-1):\n aux=self.Lista2.pop(i)\n aux.grid_forget() \n\n def verificar(self):\n if len(self.Lista1)==0 and len(self.Lista2)==0:\n return True\n else:\n return False \n \n def OnclickEnviarDatos(self):\n try:\n self.variable1=[]\n self.variable2=[]\n for i in self.Lista1:\n numero = i.get()\n if numero.find(\".\")== -1:\n self.variable1.append(int(numero))\n else:\n self.variable1.append(float(numero))\n \n for j in self.Lista2:\n numero = j.get()\n if numero.find(\".\")== -1:\n self.variable2.append(int(numero))\n else:\n self.variable2.append(float(numero))\n \n self.ventana.controlador.CambiarVentana() \n except:\n messagebox.showwarning(\"Advertencia\",\"Por favor ingrese valores validos\"+\"\\n y utilize un punto ( . ) para las cifras decimales \") \n \n def Get_Agregar(self):\n return self.agregar\n\n def Get_Variable1(self):\n return self.variable1\n\n def Get_Variable2(self):\n return self.variable2 \n\n def Get_Opcion(self):\n return self.opcion\n\n def Get_BotonEnviar(self):\n return self.botonenviar\n\n def Set_Agregar(self,valor):\n self.agregar=valor\n\n def Set_opcion(self,valor):\n self.opcion=valor\n\nclass PanelMostrarDatos(PanedWindow):\n \n def __init__(self, master,datos1,datos2):\n super().__init__(master,)\n self.variable1=datos1\n self.variable2=datos2\n self.Componentes()\n\n def Componentes(self):\n for row in range(1):\n for column in range(2):\n label = Label(self, text=\"Variable : \" + str(column+1),bg=\"#ECC337\")\n label.config(font=('Arial', 12))\n label.grid(row=row, column=column, sticky=\"nsew\",padx=1, pady=1)\n self.grid_columnconfigure(column, weight=1)\n\n for i in range(len(self.Get_Variable1())):\n label = Label(self, text=str(self.Get_Variable1()[i]),bg=\"#ECC337\")\n label.config(font=('Arial', 12))\n label.grid(row=i+1, column=0, sticky=\"nsew\",padx=1, pady=1)\n\n for j in range(len(self.Get_Variable2())):\n label = Label(self, text=str(self.Get_Variable2()[j]),bg=\"#ECC337\")\n label.config(font=('Arial', 12))\n label.grid(row=j+1, column=1, sticky=\"nsew\",padx=1, pady=1) \n \n self.grid(row=1,column=0,columnspan=2)\n\n def Get_Variable1(self):\n return self.variable1\n\n def Get_Variable2(self):\n return self.variable2 \n#-------------------------------------","sub_path":"ventana.py","file_name":"ventana.py","file_ext":"py","file_size_in_byte":26431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"178148001","text":"class Employee:\n\n # Class variables\n raise_amount = 1.04\n\n # Regular methods\n def __init__(self, first, last, pay):\n self.first = first\n self.last = last\n self.pay = pay\n self.email = first + '.' + last + '@example.com'\n\n def fullname(self):\n return '{} {}'.format(self.first, self.last)\n\n def pay_raise(self):\n self.pay = int(self.pay * self.raise_amount)\n\n# Class inherits methods from other class\nclass Developer(Employee):\n raise_amount = 1.1\n \n def __init__(self, first, last, pay, prog_lang):\n # super().__init__() or Employee.__init__() is used to inherit methods from Employee class\n super().__init__(first, last, pay)\n # Employee.__init__(self, first, last, pay)\n self.prog_lang = prog_lang\n\n\nclass Manager(Employee):\n\n def __init__(self, first, last, pay, employees=None):\n super().__init__(first, last, pay)\n if employees is None:\n self.employees = []\n else:\n self.employees = employees\n # Method to add employees\n def add_employee(self, emp):\n if emp not in self.employees:\n self.employees.append(emp)\n # Method to remove employees\n def remove_employee(self, emp):\n if emp in self.employees:\n self.employees.remove(emp)\n def print_employee(self):\n for emp in self.employees:\n print(emp.fullname())\n\n\n\nprint(help(Developer))\n\nemp_1 = Employee('Jack', 'Smith', 2000)\nemp_2 = Employee('Mary', 'Jones', 2500)\n\nprint(emp_1.email)\nprint(emp_2.email)\n\ndev_1 = Developer('Bob', 'Brown', 5000, 'Python')\ndev_2 = Developer('Sarah', 'Simpson', 3000, 'C#')\n# print(dev_1.email)\n# print(dev_2.email)\n#\n# print(dev_1.pay)\n# dev_1.pay_raise()\n# print(dev_1.pay)\n#\n# print(emp_1.pay)\n# emp_1.pay_raise()\n# print(emp_1.pay)\n#\n# print(dev_1.email)\n# print(dev_1.prog_lang)\n\nman_1 = Manager('Simon', 'Green', 10000, [dev_1])\nman_1.print_employee()\nprint('\\n')\nman_1.add_employee(dev_2)\nman_1.print_employee()\nprint('\\n')\nman_1.remove_employee(dev_1)\nman_1.print_employee()\n\n\n# Check if object is instance\nprint(isinstance(man_1, Manager))\nprint(isinstance(man_1, Employee))\nprint(isinstance(man_1, Developer))\n\n\n# Check if class is a subclass\nprint(issubclass(Manager, Employee))\nprint(issubclass(Developer, Employee))\nprint(issubclass(Manager, Developer))","sub_path":"New folder/021_oop_samples/OOP_part5_inheritance.py","file_name":"OOP_part5_inheritance.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"142008216","text":"from pynput.keyboard import Listener, Key, Controller\n# import logging\nimport win32clipboard as w\n# import win32con\nimport time\nimport requests\n\n\n\n# wenjianweizhi = \"D:\\\\hi\\\\\"\n\n# logging.basicConfig(filename=(wenjianweizhi + \"keylogger.txt\"), format=\"%(asctime)s:%(message)s\", level=logging.DEBUG)\n\ndef download_page(url):\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0\"}\n r = requests.get(url, headers=headers) # 增加headers, 模拟浏览器\n return r.text.replace('
', '') + \"\\r \\n\"\n\n\n# def getClipboard():#读取剪切板\n# w.OpenClipboard()\n# d = w.GetClipboardData(win32con.CF_TEXT)\n# w.CloseClipboard()\n# return d\ndef setClipboard(aString): # 写入剪切板\n w.OpenClipboard()\n w.EmptyClipboard()\n w.SetClipboardText(aString)\n w.CloseClipboard()\n\n\n# def timeinitial(c):\n# if c == 0:\n# return\n\n\n\ndef press(key):\n try:\n print(key.char)\n if key.char == '-':\n print('进来了 -- ')\n url = 'https://nmsl.shadiao.app/api.php?level=min&lang=zh_cn'\n html = download_page(url)\n\n m, s = divmod(seconds, 60)\n # 在这里调接口,将数据写进剪贴板,然后模拟键盘的粘贴(Ctrl + V)\n setClipboard(str(html))\n keyboard = Controller()\n keyboard.press(Key.ctrl)\n keyboard.press('v')\n # 延时10毫秒\n time.sleep(0.01)\n keyboard.release(Key.ctrl)\n keyboard.release('v')\n\n\n except Exception as e:\n print(\"已调到该程序,但是引用报错\", e)\n\n\nwith Listener(on_press=press) as listener:\n listener.join()\n","sub_path":"笔试/快捷记录时间.py","file_name":"快捷记录时间.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"523564852","text":"# Tic-Tac-Toe\r\n# Plays the game of tic-tac-toe against a human opponent\r\n \r\n# global constants\r\nX = \"X\"\r\nO = \"O\"\r\nEMPTY = \" \"\r\nTIE = \"НИЧЬЯ\"\r\nNUM_SQUARES = 10\r\n\r\n\r\ndef display_instruct():\r\n \"\"\"Выводит на экран инструкцию для игрока.\"\"\" \r\n print(\r\n \"\"\"\r\n Добро пожаловать на ринг грандиознейших интеллектуальных состязаний всех времен.\r\nТвой мозг и мой процессор сойдутся в схватке за доской игры \"Крестики-нолики\".\r\n\r\nЧтобы сделать ход. введи число от О до 8. Числа однозначно соответствуют полям\r\nдоски - так. как показано ниже:\\n\r\n \r\n 7 | 8 | 9\r\n ---------\r\n 4 | 5 | 6\r\n ---------\r\n 1 | 2 | 3\r\n\r\nПриготовься к бою. жалкий белковый человечишка. Вот-вот начнется решающее сражение.\\n\r\n \"\"\"\r\n )\r\n\r\n\r\ndef ask_yes_no(question):\r\n \"\"\"Задает вопрос с ответом 'да' или 'нет'\"\"\"\r\n response = None\r\n while response not in (\"y\", \"n\"):\r\n response = input(question).lower()\r\n return response\r\n\r\n\r\ndef ask_number(question, low, high):\r\n \"\"\"Просит ввести число из диапазона\"\"\"\r\n response = None\r\n while response not in range(low, high):\r\n response = int(input(question))\r\n return response\r\n\r\n\r\ndef pieces():\r\n \"\"\"Определяет принадлежность первого хода.\"\"\"\r\n go_first = ask_yes_no(\"Xoчeшь оставить за собой первый ход? (y/n): \")\r\n if go_first == \"y\":\r\n print(\"\\nHy что ж. даю тебе фору: играй крестиками.\")\r\n human = X\r\n computer = O\r\n else:\r\n print(\"\\nTвoя удаль тебя погубит.Буду начинать я.\")\r\n computer = X\r\n human = O\r\n return computer, human\r\n\r\n\r\ndef new_board():\r\n \"\"\"Создает новую игровую доску.\"\"\"\r\n board = []\r\n for square in range(NUM_SQUARES):\r\n board.append(EMPTY)\r\n return board\r\n\r\n\r\ndef display_board(board):\r\n \"\"\"Отображает игровую доску на экране.\"\"\"\r\n print(\"\\n\\t\", board[7], \"|\", board[8], \"|\", board[9])\r\n print(\"\\t\", \"---------\")\r\n print(\"\\t\", board[4], \"|\", board[5], \"|\", board[6])\r\n print(\"\\t\", \"---------\")\r\n print(\"\\t\", board[1], \"|\", board[2], \"|\", board[3], \"\\n\")\r\n\r\n\r\ndef legal_moves(board):\r\n \"\"\"создает список доступных ходов\"\"\"\r\n moves = []\r\n for square in range(NUM_SQUARES):\r\n if board[square] == EMPTY:\r\n moves.append(square)\r\n return moves\r\n\r\n\r\ndef winner(board):\r\n \"\"\"Определяет победителя в игре\"\"\"\r\n WAYS_TO_WIN = ((1, 2, 3),\r\n (4, 5, 6),\r\n (7, 8, 9),\r\n (3, 6, 9),\r\n (1, 4, 7),\r\n (2, 5, 8),\r\n (1, 5, 9),\r\n (3, 5, 7))\r\n \r\n for row in WAYS_TO_WIN:\r\n if board[row[0]] == board[row[1]] == board[row[2]] != EMPTY: \r\n winner = board[row[0]]\r\n return winner\r\n\r\n if EMPTY not in board:\r\n return TIE\r\n\r\n return None\r\n\r\n\r\ndef human_move(board, human):\r\n \"\"\"Получает ход человека.\"\"\" \r\n legal = legal_moves(board)\r\n move = None\r\n while move not in legal:\r\n move = ask_number(\"Tвoй ход. Выбери одно из полей (1 - 9):\", 1, NUM_SQUARES)\r\n if move not in legal:\r\n print(\"\\nCмeшнoй человек! Это поле уже занято. Выбери дpyroe.\\n\")\r\n print(\"Ладно...\")\r\n return move\r\n\r\n\r\ndef computer_move(board, computer, human):\r\n \"\"\"Делает ход за компьютерного противника.\"\"\"\r\n # make a copy to work with since function will be changing list\r\n board = board[:]\r\n # the best positions to have, in order\r\n BEST_MOVES = (5, 1, 3, 7, 9, 2, 4, 6, 8)\r\n\r\n print(\"Я выберу поле номер\", end=\" \")\r\n \r\n # if computer can win, take that move\r\n for move in legal_moves(board):\r\n board[move] = computer\r\n if winner(board) == computer:\r\n print(move)\r\n return move\r\n # done checking this move, undo it\r\n board[move] = EMPTY\r\n \r\n # if human can win, block that move\r\n for move in legal_moves(board):\r\n board[move] = human\r\n if winner(board) == human:\r\n print(move)\r\n return move\r\n # done checkin this move, undo it\r\n board[move] = EMPTY\r\n\r\n # since no one can win on next move, pick best open square\r\n for move in BEST_MOVES:\r\n if move in legal_moves(board):\r\n print(move)\r\n return move\r\n\r\n\r\ndef next_turn(turn):\r\n \"\"\"Switch turns.\"\"\"\r\n if turn == X:\r\n return O\r\n else:\r\n return X\r\n\r\n \r\ndef congrat_winner(the_winner, computer, human):\r\n \"\"\"Поздравляет победителя игры\"\"\"\r\n if the_winner != TIE:\r\n print(the_winner, \"выиграл!\\n\")\r\n else:\r\n print(\"Ничья\\n\")\r\n\r\n if the_winner == computer:\r\n print(\"Kaк я и предсказывал. победа в очередной раз осталась за мной.\\n\"\r\n\"Вот еще один довод в пользу того. что компьютеры превосходят людей решительно во всем.\")\r\n\r\n elif the_winner == human:\r\n print(\"O нет. этого не может быть! Неужели ты как-то сумел перехитрить меня.\\\r\nбелковый?\\n\"\r\n\"Клянусь: я. компьютер. не допущу этого больше никогда!\")\r\n\r\n elif the_winner == TIE:\r\n print(\"Teбe несказанно повезло. дружок: ты сумел свести игру вничью.\\n\"\r\n\"Радуйся же сегодняшнему успеху! Завтра тебе уже не суждено его повторить.\")\r\n\r\n\r\ndef main():\r\n display_instruct()\r\n computer, human = pieces()\r\n turn = X\r\n board = new_board()\r\n display_board(board)\r\n\r\n while not winner(board):\r\n if turn == human:\r\n move = human_move(board, human)\r\n board[move] = human\r\n else:\r\n move = computer_move(board, computer, human)\r\n board[move] = computer\r\n display_board(board)\r\n turn = next_turn(turn)\r\n\r\n the_winner = winner(board)\r\n congrat_winner(the_winner, computer, human)\r\n\r\n\r\n# start the program\r\nmain()\r\n\r\ninput(\"\\n\\nНажмите Enter. чтобы выйти.\")\r\n","sub_path":"tic-tac-toe on numpad.py","file_name":"tic-tac-toe on numpad.py","file_ext":"py","file_size_in_byte":6983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"2952406","text":"# -*- coding: utf-8 -*-\n__author__ = 'changwei'\n\nfrom os import path\nfrom scrapy import signals\nfrom scrapy.xlib.pydispatch import dispatcher\nimport sqlite3\n\n\nclass NoteBookPipeline(object):\n def __init__(self):\n self.conn = None\n self.filename = 'note.db'\n dispatcher.connect(self.initialize, signals.engine_started)\n dispatcher.connect(self.finalize, signals.engine_stopped)\n\n def process_item(self, item, spider):\n self.conn.execute('insert into fjsen values(?,?,?,?,?,?)', (None, item['model'][0], item['price'][0], item['sales'][0], item['dealer'][0], item['pic'][0]))\n return item\n\n def initialize(self):\n if path.exists(self.filename):\n self.conn = sqlite3.connect(self.filename)\n else:\n self.conn = self.create_table(self.filename)\n\n def finalize(self):\n if self.conn is not None:\n self.conn.commit()\n self.conn.close()\n self.conn = None\n\n @staticmethod\n def create_table(filename):\n conn = sqlite3.connect(filename)\n conn.execute(\"\"\"create table fjsen(id integer primary key autoincrement, model text, price text, sales text, dealer text, pic text)\"\"\")\n conn.commit()\n return conn\n","sub_path":"note_book/note_book/pipelines_note_book.py","file_name":"pipelines_note_book.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"152892013","text":"#! /Python35/python.exe\r\nprint(\"Content-type: text/html \\n\")\r\n\r\nimport cgi, cgitb, random\r\n\r\nformData = cgi.FieldStorage()\r\nplayer1 = formData.getvalue(\"name1\")\r\nplayer2 = formData.getvalue(\"name2\")\r\nmove = int(formData.getvalue(\"moveNum\"))\r\nroll = random.randint(1, 6)\r\nturn = formData.getvalue(\"whosTurn\")\r\nrolled = formData.getvalue(\"whoRolled\")\r\nrolledCpy = rolled\r\nholder = int()\r\n\r\nblueX = int(formData.getvalue(\"xblue\"))\r\nblueY = int(formData.getvalue(\"yblue\"))\r\nredX = int(formData.getvalue(\"xred\"))\r\nredY = int(formData.getvalue(\"yred\"))\r\n\r\n# Coordinates for each tile\r\nxCoordinates = [35, 101, 167, 233, 299, 365, 431, 497, 563, 629]\r\nyCoordinates = [634, 568, 502, 436, 370, 304, 238, 172, 106, 40]\r\n\r\n# Snakes' and Ladders' positions based on previously specified coordinates (a dictionary of indeces)\r\nsnakes = {(3, 3):(1, 4), (4, 4):(0, 0), (5, 8):(2, 7), (7, 6):(5, 6), (7, 1):(0, 0), (8, 9):(6, 9), (9, 2):(6, 4)}\r\nladders = {(0, 4):(2, 2), (0, 7):(2, 9), (1, 6):(7, 5), (3, 8):(4, 8), (4, 2):(6, 3), (7, 8):(9, 9), (8, 1):(9, 1)}\r\n\r\n# Open and write - game log\r\nlogFileName = formData.getvalue(\"logFileName\")\r\nf = open(logFileName, \"a\")\r\nf.write(\"\\n%5s\\t\" %str(move) +rolled+ \" rolled \" +str(roll)+ \".\")\r\n\r\ndef makeMove(player):\r\n\t# Determines new coordinates for player markers according to roll\r\n\t# Includes snakes and ladders mechanic, and logging\r\n\t# Parameter(s): player - whos turn to move\r\n\t# Returns: [x, y] - new coordinates\r\n\tx = blueX if player == player1 else redX\r\n\ty = blueY if player == player1 else redY \r\n\r\n\tif y % 2 == 0:\r\n\t\tif (x + roll) > 9:\r\n\t\t\ty += 1\r\n\t\t\tx = 10 - ((x+roll)-9)\r\n\t\telse:\r\n\t\t\tx += roll\r\n\telse:\r\n\t\tif (x - roll) < 0:\r\n\t\t\tif (y == 9):\r\n\t\t\t\tx = roll - x\r\n\t\t\t\tf.write(\" \" +player+ \" has an excess of \" +str(x)+ \", moving back to #\" +determineTile(x,y)+ \".\") \r\n\t\t\telse:\r\n\t\t\t\ty += 1\r\n\t\t\t\tx = (roll-x) - 1\r\n\t\telse:\r\n\t\t\tx -= roll\r\n\r\n\tif (y,x) in ladders:\r\n\t\tf.write(\" \" +player+ \" climbed up a ladder from #\" +determineTile(x,y)+ \" to #\") \r\n\t\tholder = y\r\n\t\ty = ladders[(y,x)][0]\r\n\t\tx = ladders[(holder,x)][1]\r\n\t\tf.write(\"\" +determineTile(x,y)+ \"!\") \r\n\r\n\tif (y,x) in snakes:\r\n\t\tf.write(\" \" +player+ \" got rekt by snake from #\" +determineTile(x,y)+ \" to #\") \r\n\t\tholder = y\r\n\t\ty = snakes[(y,x)][0]\r\n\t\tx = snakes[(holder,x)][1]\r\n\t\tf.write(\"\" +determineTile(x,y)+ \".\") \r\n\r\n\treturn [x, y]\r\n\r\ndef determineTile(x, y):\r\n\t# Determines the tile number the player is at, based on coordinates\r\n\t# Parameter(s): x , y - coordinates, aka indeces for x&y coordinates list\r\n\t# Returns: tilenumber - tile number, in string data type\r\n\tif y % 2 == 0:\r\n\t\ttilenumber = (y*10) + (x+1)\r\n\telse:\r\n\t\tif (y == 9):\r\n\t\t\ttilenumber = 100 - x\r\n\t\telse:\r\n\t\t\ttilenumber = ((y+1)*10) - x\r\n\r\n\treturn str(tilenumber)\r\n\r\ndef getWinner():\r\n\t# Determines the winner\r\n\t# Parameter(s): x , y - coordinates, aka indeces for x&y coordinates list\r\n\t# Returns: \r\n\t# \"none\" - if there is no winner at the moment\r\n\t# player - name of winner (either player1 or player2)\r\n\tif blueX == 0 and blueY == 9:\r\n\t\treturn player1\r\n\telif redX == 0 and redY == 9:\r\n\t\treturn player2\r\n\telse:\r\n\t\treturn \"none\"\r\n\r\nprint ('''\r\n\r\n\t\r\n\t\tSnakes and Ladders\r\n\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\r\n\t\r\n\t\t
\r\n\t\t\t
\r\n\t\t\t\t
''')\r\n\r\nif rolled == player1:\r\n\tprint ('''\r\n\t\t\t\t\t
\r\n\t\t\t\t\t\t

''' + player1 + ''' (blue)

\r\n\t\t\t\t\t\t

rolls a...

''')\r\n\tprint (\"\t\t\t\" %roll)\r\n\tprint ('''\t\t
\r\n\r\n\t\t\t\t\t
\r\n\t\t\t\t\t\t

''' + player2 + ''' (red)

\r\n\t\t\t\t\t\t

standby...

\r\n\t\t\t\t\t\t
''')\r\n\tprint ('''\t\t
''')\r\n\r\n\tnewCoordinates = makeMove(player1)\r\n\tblueX = newCoordinates[0]\r\n\tblueY = newCoordinates[1]\r\n\r\nif rolled == player2:\r\n\tprint ('''\r\n\t\t\t\t\t
\r\n\t\t\t\t\t\t

''' + player1 + ''' (blue)

\r\n\t\t\t\t\t\t

standby...

''')\t\r\n\tprint ('''\t\t
\r\n\r\n\t\t\t\t\t
\r\n\t\t\t\t\t\t

''' + player2 + ''' (red)

\r\n\t\t\t\t\t\t

rolls a...

''')\r\n\tprint (\"\t\t\t\" %roll)\r\n\tprint ('''\t\t
''')\r\n\r\n\tnewCoordinates = makeMove(player2)\r\n\tredX = newCoordinates[0]\r\n\tredY = newCoordinates[1]\r\n\r\nif (blueY == redY) and (blueX == redX):\r\n\tf.write(\" Both players are at #\" +determineTile(blueX,blueY)+ \"!\") \r\n\r\nrolledCpy = turn\r\nturn = player1 if turn==player2 else player2\r\n\r\nif getWinner() == \"none\":\r\n\tprint ('''\t\t

\r\n\t\t\t\t\t

''' + rolledCpy + ''''s turn!

\r\n\t\t\t\t\t
\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t
''')\r\n\tf.write(\" \" +rolledCpy+ \"'s turn!\")\r\n\r\nelse:\r\n\tprint ('''\r\n\t\t\t\t\t

\r\n\t\t\t\t\t

''' + getWinner() + ''' won!

\r\n\t\t\t\t\t
\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t
''')\r\n\tf.write(\"\\n\\n\" +getWinner()+ \" has won the game!\")\r\nf.close()\r\n\r\nprint(\"
\")\r\nf = open(logFileName, \"r\")\r\nlogs = f.read().splitlines()\r\nprint('''\t\t
\r\n \t\t\t\t\t
Log
\r\n \t\t\t\t\t
''')\r\n\r\nfor i, n in enumerate(logs):\r\n\tif i <= 3 or i == len(logs)-1:\r\n\t\tprint (\"\" +n+ \"
\")\r\n\telse:\r\n\t\tprint(\"\" +n[:5]+ \"\\t\" + n[5:]+ \"
\")\r\n\r\nprint('''\t\t\t
\r\n\t\t\t\t
''')\t\r\n\r\nprint ('''\t\t
\r\n\r\n\t\t\t\t
''')\r\n\r\n# BOARD DISPLAY\r\nprint(\"
\")\r\nfor row in range (10):\r\n\tprint (\"\")\r\n\tfor col in range (5):\r\n\t\tif row % 2 == 0:\r\n\t\t\tprint (\"\")\r\n\t\t\tprint (\"\")\r\n\t\t\r\n\t\telse:\r\n\t\t\tprint (\"\")\r\n\t\t\tprint (\"\")\r\n\r\n\tprint (\"\")\r\nprint (\"
\")\r\n\r\n# PLAYER MARKER DISPLAY\r\nif move == 1:\r\n\tif rolled == player1:\r\n\t\tprint (\"
\" %(yCoordinates[blueY], xCoordinates[blueX]))\r\n\telif rolled == player2:\r\n\t\tprint (\"
\" %(yCoordinates[blueY], xCoordinates[blueX]))\r\nelif (blueY == redY) and (blueX == redX):\r\n\tprint (\"
\" %(yCoordinates[blueY], xCoordinates[blueX]))\r\nelse:\r\n\tprint (\"
\" %(yCoordinates[blueY], xCoordinates[blueX]))\r\n\tprint (\"
\" %(yCoordinates[redY], xCoordinates[redX]))\r\n\r\n# SNAKES AND LADDERS DISPLAY + closing tags\r\nprint ('''\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t
\r\n\t\t\t
\r\n\t\t
\r\n\t\r\n''')\r\n","sub_path":"snakes-and-ladders/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":10600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"241155826","text":"'''create a shuffling program\ncreated spring 2018\nHW 03\nauthor: Alex McDowell (amm74)\n'''\n\nimport random\nimport math\n\n#list a-z\nl = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',\n 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\n#prompt for number of shuffles\nnumTimes = int(input('Enter number of shuffles: '))\n\n\nfor i in range(0,numTimes):\n \n #get random number from list and remove it\n pull = l[random.randint(0,25)]\n l.remove(pull)\n \n #get random number for where to reinsert and reinsert\n reinsert = random.randint(0,25)\n l.insert(reinsert, pull)\n\nprint(l)","sub_path":"lab03/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"67119998","text":"#!/usr/bin/env python3\n\nimport re\n\nNN = int(input().strip())\n\nfor i in range(NN):\n input_str = input().strip()\n searched = re.search(r'^[789]\\d{9}$', input_str)\n if searched is None:\n print(\"No\")\n else:\n print(\"Yes\")\n","sub_path":"hacker_rank/python/13.regex_and_parsing/08.validating_phone_number.py","file_name":"08.validating_phone_number.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"33276999","text":"class Solution:\n def dayOfYear(self, date: str) -> int:\n tmp = date.split('-') #'-'를 기준으로 split해서 tmp list를 만들기 \n year = int(tmp[0])\n month = int(tmp[1])\n day = int(tmp[2])\n \n days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] #월별로 며칠씩 존재하는지 list로 만들기 \n days = [int (i) for i in days] #str 리스틀 int로 \n \n ans = 0\n ans = sum(days[:month-1])+day # ex)7월 26일이면 6월까지 days를 더하고 + 26\n \n if ((year % 4 == 0 and year % 100 != 0) or year % 400 == 0) and month >2: #윤년일 조건 and 2월 이후 \n ans += 1\n \n return ans\n\nprint(Solution.dayOfYear(\"\",\"2019-01-09\"))\n\n'''\n5-4 Day of the Year\nhttps://leetcode.com/problems/day-of-the-year/\n'''","sub_path":"슬기/5-4_JeongSeulgi.py","file_name":"5-4_JeongSeulgi.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"224500687","text":"def quick_sort(nums: list) -> None:\n def swap(arr: list, index_1: int, index_2: int) -> None:\n temp = arr[index_1]\n arr[index_1] = arr[index_2]\n arr[index_2] = temp\n\n def partition(arr: list, left: int, right: int) -> int:\n temp = left\n pivot = arr[left]\n while True:\n while left < right:\n if nums[left] > pivot:\n break\n else:\n left += 1\n while right > temp:\n if nums[right] < pivot:\n break\n else:\n right -= 1\n if right < left:\n break\n swap(nums, right, left)\n swap(nums, temp, right)\n return right\n\n def sort(arr: list, left: int, right: int) -> None:\n if left >= right:\n return\n else:\n split = partition(arr, left, right)\n sort(arr, left, split - 1)\n sort(arr, split + 1, right)\n sort(nums, 0, len(nums) - 1)\n\n\ntest_list = [6, 6]\nquick_sort(test_list)\nprint(test_list)\n","sub_path":"quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"555714894","text":"import json\nfrom typing import TYPE_CHECKING, Any, Dict\n\nimport numpy as np\nfrom openff.units import unit\nfrom openff.utilities.utilities import has_package, requires_package\nfrom simtk import unit as simtk_unit\n\nfrom openff.interchange.exceptions import (\n MissingUnitError,\n UnitValidationError,\n UnsupportedExportError,\n)\n\nif TYPE_CHECKING or has_package(\"unyt\"):\n import unyt\n\n\nclass _FloatQuantityMeta(type):\n def __getitem__(self, t):\n return type(\"FloatQuantity\", (FloatQuantity,), {\"__unit__\": t})\n\n\nclass FloatQuantity(float, metaclass=_FloatQuantityMeta):\n @classmethod\n def __get_validators__(cls):\n yield cls.validate_type\n\n @classmethod\n def validate_type(cls, val):\n \"\"\"Process a value tagged with units into one tagged with \"OpenFF\" style units\"\"\"\n unit_ = getattr(cls, \"__unit__\", Any)\n if unit_ is Any:\n if isinstance(val, (float, int)):\n # TODO: Can this exception be raised with knowledge of the field it's in?\n raise MissingUnitError(f\"Value {val} needs to be tagged with a unit\")\n elif isinstance(val, unit.Quantity):\n return unit.Quantity(val)\n elif isinstance(val, simtk_unit.Quantity):\n return _from_omm_quantity(val)\n else:\n raise UnitValidationError(\n f\"Could not validate data of type {type(val)}\"\n )\n else:\n unit_ = unit(unit_)\n if isinstance(val, unit.Quantity):\n # some custom behavior could go here\n assert unit_.dimensionality == val.dimensionality\n # return through converting to some intended default units (taken from the class)\n return val.to(unit_)\n # could return here, without converting\n # (could be inconsistent with data model - heteregenous but compatible units)\n # return val\n if isinstance(val, simtk_unit.Quantity):\n return _from_omm_quantity(val).to(unit_)\n if has_package(\"unyt\"):\n if isinstance(val, unyt.unyt_quantity):\n return _from_unyt_quantity(val).to(unit_)\n if isinstance(val, (float, int)) and not isinstance(val, bool):\n return val * unit_\n if isinstance(val, str):\n # could do custom deserialization here?\n return unit.Quantity(val).to(unit_)\n raise UnitValidationError(f\"Could not validate data of type {type(val)}\")\n\n\ndef _from_omm_quantity(val: simtk_unit.Quantity):\n \"\"\"Helper function to convert float or array quantities tagged with SimTK/OpenMM units to\n a Pint-compatible quantity\"\"\"\n unit_ = val.unit\n val_ = val.value_in_unit(unit_)\n if type(val_) in {float, int}:\n unit_ = val.unit\n return val_ * unit.Unit(str(unit_))\n elif type(val_) in {tuple, list, np.ndarray}:\n array = np.asarray(val_)\n return array * unit.Unit(str(unit_))\n else:\n raise UnitValidationError(\n \"Found a simtk.unit.Unit wrapped around something other than a float-like \"\n f\"or np.ndarray-like. Found a unit wrapped around type {type(val_)}.\"\n )\n\n\n@requires_package(\"unyt\")\ndef _from_unyt_quantity(val: \"unyt.unyt_array\"):\n \"\"\"Helper function to convert unyt arrays to Pint quantities\"\"\"\n quantity = val.to_pint()\n # Ensure a float-like quantity is a float, not a scalar array\n if isinstance(val, unyt.unyt_quantity):\n quantity = float(quantity.magnitude) * quantity.units\n return quantity\n\n\nclass QuantityEncoder(json.JSONEncoder):\n \"\"\"JSON encoder for unit-wrapped floats and np arrays. Should work\n for both FloatQuantity and ArrayQuantity\"\"\"\n\n def default(self, obj):\n if isinstance(obj, unit.Quantity):\n if isinstance(obj.magnitude, (float, int)):\n data = obj.magnitude\n elif isinstance(obj.magnitude, np.ndarray):\n data = obj.magnitude.tolist()\n else:\n # This shouldn't ever be hit if our object models\n # behave in ways we expect?\n raise UnsupportedExportError(\n f\"trying to serialize unsupported type {type(obj.magnitude)}\"\n )\n return {\n \"val\": data,\n \"unit\": str(obj.units),\n }\n\n\ndef custom_quantity_encoder(v):\n \"\"\"Wrapper around json.dumps that uses QuantityEncoder\"\"\"\n return json.dumps(v, cls=QuantityEncoder)\n\n\ndef json_loader(data: str) -> dict:\n # TODO: recursively call this function for nested models\n out: Dict = json.loads(data)\n for key, val in out.items():\n try:\n # Directly look for an encoded FloatQuantity/ArrayQuantity,\n # which is itself a dict\n v = json.loads(val)\n except json.JSONDecodeError:\n # Handles some cases of the val being a primitive type\n continue\n # TODO: More gracefully parse non-FloatQuantity/ArrayQuantity dicts\n unit_ = unit(v[\"unit\"])\n val = v[\"val\"]\n out[key] = unit_ * val\n return out\n\n\nclass ArrayQuantityMeta(type):\n def __getitem__(self, t):\n return type(\"ArrayQuantity\", (ArrayQuantity,), {\"__unit__\": t})\n\n\nif TYPE_CHECKING:\n ArrayQuantity = np.ndarray\nelse:\n\n class ArrayQuantity(float, metaclass=ArrayQuantityMeta):\n @classmethod\n def __get_validators__(cls):\n yield cls.validate_type\n\n @classmethod\n def validate_type(cls, val):\n \"\"\"Process an array tagged with units into one tagged with \"OpenFF\" style units\"\"\"\n unit_ = getattr(cls, \"__unit__\", Any)\n if unit_ is Any:\n if isinstance(val, (list, np.ndarray)):\n # TODO: Can this exception be raised with knowledge of the field it's in?\n raise MissingUnitError(\n f\"Value {val} needs to be tagged with a unit\"\n )\n elif isinstance(val, unit.Quantity):\n # Redundant cast? Maybe this handles pint vs openff.interchange.unit?\n return unit.Quantity(val)\n elif isinstance(val, simtk_unit.Quantity):\n return _from_omm_quantity(val)\n else:\n raise UnitValidationError(\n f\"Could not validate data of type {type(val)}\"\n )\n else:\n unit_ = unit(unit_)\n if isinstance(val, unit.Quantity):\n assert unit_.dimensionality == val.dimensionality\n return val.to(unit_)\n if isinstance(val, simtk_unit.Quantity):\n return _from_omm_quantity(val).to(unit_)\n if isinstance(val, (np.ndarray, list)):\n if has_package(\"unyt\"):\n # Must check for unyt_array, not unyt_quantity, which is a subclass\n if isinstance(val, unyt.unyt_array):\n return _from_unyt_quantity(val).to(unit_)\n else:\n return val * unit_\n else:\n return val * unit_\n if isinstance(val, bytes):\n # Define outside loop\n dt = np.dtype(int)\n dt.newbyteorder(\"<\")\n return np.frombuffer(val, dtype=dt) * unit_\n if isinstance(val, str):\n # could do custom deserialization here?\n raise NotImplementedError\n # return unit.Quantity(val).to(unit_)\n raise UnitValidationError(\n f\"Could not validate data of type {type(val)}\"\n )\n","sub_path":"openff/interchange/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":7899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"597610050","text":"#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# --------------------------------------------------------------------------------------------\n\nimport roslib\nimport rospy\nimport socket\nimport geometry_msgs.msg\nimport math\nimport tf\nimport struct\nimport numpy as np\nfrom geometry_msgs.msg import PoseWithCovarianceStamped\n\nglobal trans\nglobal rot\n\nglobal brtrans\nglobal brrot\n\n# -----------------------------------------------------------------------------\n#\ndef initialposeCB(msg):\n\t#robot odom-base (input)\n\tglobal trans\n\tglobal rot\n\n\t#robot map-odom (output)\n\tglobal brtrans\n\tglobal brrot\n\t\n\t#massage to translation, rotation\n\tinittrans=(msg.pose.pose.position.x,msg.pose.pose.position.y,msg.pose.pose.position.z)\n\tinitposequot=(msg.pose.pose.orientation.x,\n\t\tmsg.pose.pose.orientation.y,\n\t\tmsg.pose.pose.orientation.z,\n\t\tmsg.pose.pose.orientation.w)\t\t\n\tinitrot=tf.transformations.quaternion_matrix(initposequot)\n\tmap2foot= np.dot(tf.transformations.translation_matrix(inittrans),initrot)\n\todom2foot = np.dot(tf.transformations.translation_matrix(trans),tf.transformations.quaternion_matrix(rot))\t\n\t\n\t\n\tfoot2odom=np.linalg.inv(odom2foot)\t\n\t\n\tmap2odom=np.dot(map2foot,foot2odom)\n\tbr = tf.TransformBroadcaster()\t\n\t#map2foot=np.dot(map2holo,holo2foot)\n\tbrtrans = (map2odom[0][3], map2odom[1][3], map2odom[2][3])\n\tbrrot = tf.transformations.quaternion_from_matrix(map2odom)\n\t\n# -----------------------------------------------------------------------------\n#\nif __name__ == '__main__':\n\trospy.init_node('localizer')\n\n\tlistener = tf.TransformListener()\n\n\t# from ros\n\tsub = rospy.Subscriber('/initialpose', PoseWithCovarianceStamped, initialposeCB)\n\n\t# from dynamic_adjuster.py\n\tsub2 = rospy.Subscriber('/initialpose_h', PoseWithCovarianceStamped, initialposeCB)\n\n\tbr = tf.TransformBroadcaster()\n\tbrtrans=(0,0, 0)\n\tbrrot=(0,0,0,1)\n\t\n\trate = rospy.Rate(10)\n\twhile not rospy.is_shutdown():\n\t\trospy.loginfo(\"Getting transform for '/base_footprint'!\")\n\t\ttry:\n\t\t\t# obtain robot odometry to base_footprint (for pepper)\n\t\t\t(trans, rot) = listener.lookupTransform('/odom', '/base_footprint', rospy.Time(0))\n\t\t\trospy.loginfo(\"Got transform for '/base_footprint'!\")\n\t\texcept (tf.LookupException, tf.ConnectivityException,tf.ExtrapolationException):\n\t\t\trospy.logwarn(\"tf error. Unable to get transform for '/base_footprint'!\")\n\t\t\tcontinue\n\n\t\tbr.sendTransform(brtrans, brrot, rospy.Time.now(), \"/odom\", \"/map\")\n\n\t\trate.sleep()\n\n\trospy.loginfo(\"localizer.py exit...\")\n","sub_path":"linux/HoloLens_Localization/scripts/localizer.py","file_name":"localizer.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"577487941","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport re\n\nimport datalad.api\nfrom datalad.distribution.dataset import require_dataset\n\nimport argparse\n\nparser = argparse.ArgumentParser(description='setup the dataset siblings')\nparser.add_argument('path', help = 'path')\nparser.add_argument('--confidential', help = 'setup access to confidential data', action = 'store_true')\nparser.add_argument('--private', help = 'setup access to private data', action = 'store_true')\n\nargs = parser.parse_args()\n\nconfidential = args.confidential or args.private\nprivate = args.private\n\nds = require_dataset(\n sys.argv[1],\n check_installed = True,\n purpose = 'configure'\n)\n\norigin_url = datalad.api.siblings(name = 'origin', dataset = ds)[0]['url']\ndataset_name = os.path.splitext(os.path.basename(origin_url))[0]\nprivate_organization = 'LAAC-LSCP'\nel1000_organization = 'EL1000'\n\nel1000_url = \"git@gin.g-node.org:/{}/{}.git\".format(el1000_organization, dataset_name)\nprivate_url = \"git@gin.g-node.org:/{}/{}.git\".format(private_organization, dataset_name)\nconfidential_url = \"git@gin.g-node.org:/{}/{}-confidential.git\".format(el1000_organization, dataset_name)\n\nsiblings = {\n 'private': {'url': private_url, 'wanted': 'include=*' },\n 'el1000': {'url': el1000_url, 'wanted': '(metadata=EL1000=*) and (exclude=**/confidential/*)'},\n 'confidential': {'url': confidential_url, 'wanted': '(metadata=EL1000=*) and (include=**/confidential/*)'}\n}\n\norigin = None\nfor sibling in siblings.keys():\n if siblings[sibling]['url'] == origin_url:\n origin = sibling\n\nif origin is None:\n raise Exception('failed to determine where this repository has been cloned from.')\n\nif origin == 'confidential':\n raise Exception('please install the dataset from {}'.format(el1000_url))\n\nargs = parser.parse_args()\n\nds = require_dataset(\n args.path,\n check_installed = True,\n purpose = 'configuration'\n)\n\nfor sibling in siblings.keys():\n name = 'origin' if sibling == origin else sibling\n\n if sibling == 'private' and not private:\n continue\n\n if sibling == 'confidential' and not confidential:\n continue\n\n datalad.api.siblings(\n name = name,\n dataset = ds,\n action = 'configure',\n url = siblings[sibling]['url']\n )\n\n datalad.api.siblings(\n name = name,\n dataset = ds,\n action = 'configure',\n annex_wanted = siblings[sibling]['wanted'],\n annex_required = siblings[sibling]['wanted']\n )\n\navailable_siblings = {sibling['name'] for sibling in datalad.api.siblings(dataset = ds)}\n\ndatalad.api.siblings(\n name = 'origin',\n dataset = ds,\n action = 'configure',\n publish_depends = (set(siblings.keys()) & available_siblings) - {'origin', origin}\n)\n","sub_path":".datalad/procedures/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"641036420","text":"\"\"\"\n------------------------------------------------------------------------------\nMango 802.11 Reference Design Experiments Framework - Continuous Log capture\n------------------------------------------------------------------------------\nLicense: Copyright 2014-2016, Mango Communications. All rights reserved.\n Distributed under the WARP license (http://warpproject.org/license)\n------------------------------------------------------------------------------\nThis script uses the 802.11 ref design and wlan_exp to create a log\nfile that contains all data assocated with an interactive experiment.\n\nHardware Setup:\n - Requires one WARP v3 node\n - PC NIC and ETH B on WARP v3 nodes connected to common Ethernet switch\n\nRequired Script Changes:\n - Set NETWORK to the IP address of your host PC NIC network (eg X.Y.Z.0 for IP X.Y.Z.W)\n - Set NODE_SERIAL_LIST to the serial number of your WARP node\n\nDescription:\n This script initializes one WARP v3 node. It will periodically update \ninformation on the screen about the log. The script will also read the log \ndata every LOG_READ_TIME seconds, write it to the hdf5 file, HDF5_FILENAME, \nand continue until MAX_LOG_SIZE is reached or the user ends the experiment.\n------------------------------------------------------------------------------\n\"\"\"\nimport sys\nimport time\nimport datetime\nimport threading\n\nimport wlan_exp.util as wlan_exp_util\nimport wlan_exp.config as wlan_exp_config\n\nimport wlan_exp.ltg as ltg\n\nimport wlan_exp.log.util as log_util\nimport wlan_exp.log.util_hdf as hdf_util\n\n# Fix to support Python 2.x and 3.x\nif sys.version[0]==\"3\": raw_input=input\n\n\n#-----------------------------------------------------------------------------\n# Experiment Variables\n#-----------------------------------------------------------------------------\n# Network / Node information\nNETWORK = '10.0.0.0'\nUSE_JUMBO_ETH_FRAMES = False\nNODE_SERIAL_LIST = ['W3-a-00260', 'W3-a-00249','W3-a-00137']\n\n# BSS parameters\nSSID = \"WARP Log Process\"\nCHANNEL = 14\nBEACON_INTERVAL = 100\n\n# Interval for printing\nPRINT_TIME = 1\n\n# Logging variables\nRUN_TIME = 240\n\n#-----------------------------------------------------------------------------\n# Global Variables\n#-----------------------------------------------------------------------------\nnetwork_config = None\nnodes = []\nnode = None\nexp_done = False\ninput_done = False\ntimeout = 0.01\n\nattr_dict = {}\n\n\n#-----------------------------------------------------------------------------\n# Local Helper Utilities\n#-----------------------------------------------------------------------------\ndef get_exp_duration_str(start_time):\n \"\"\"Gets the duration str of the experiment since start_time.\"\"\"\n return \"Duration: {0:8.0f} sec\".format(time.time() - start_time)\n\n# End def\n\ndef print_node_state(start_time):\n \"\"\"Print the current state of the node.\"\"\"\n\n msg = \"\\r\"\n msg += get_exp_duration_str(start_time)\n msg += \" \" * 5\n\n sys.stdout.write(msg)\n sys.stdout.flush()\n\n# End def\n\n\n\n#-----------------------------------------------------------------------------\n# Experiment Script\n#-----------------------------------------------------------------------------\ndef init_experiment():\n \"\"\"Initialize the experiment.\"\"\"\n global n_ap, n_sta1, n_sta2, network_config, nodes, attr_dict\n\n print(\"\\nInitializing experiment\\n\")\n\n # Log attributes about the experiment\n attr_dict['exp_start_time'] = log_util.convert_datetime_to_log_time_str(datetime.datetime.utcnow())\n\n # Create an object that describes the network configuration of the host PC\n network_config = wlan_exp_config.WlanExpNetworkConfiguration(network=NETWORK,\n jumbo_frame_support=USE_JUMBO_ETH_FRAMES)\n\n # Create an object that describes the WARP v3 nodes that will be used in this experiment\n nodes_config = wlan_exp_config.WlanExpNodesConfiguration(network_config=network_config,\n serial_numbers=NODE_SERIAL_LIST)\n\n # Initialize the Nodes\n # This command will fail if either WARP v3 node does not respond\n nodes = wlan_exp_util.init_nodes(nodes_config, network_config)\n\n n_ap_l = wlan_exp_util.filter_nodes(nodes=nodes, mac_high='AP', serial_number=NODE_SERIAL_LIST)\n n_sta_l = wlan_exp_util.filter_nodes(nodes=nodes, mac_high='STA', serial_number=NODE_SERIAL_LIST)\n\n # Check that setup is valid\n if len(n_ap_l) == 1 and len(n_sta_l) == 2:\n # Extract the two nodes from the lists for easier referencing below\n n_ap = n_ap_l[0]\n n_sta1 = n_sta_l[0]\n n_sta2 = n_sta_l[1]\n elif len(n_ap_l) == 1 and len(n_sta_l) == 1:\n n_ap = n_ap_l[0]\n n_sta1 = n_sta_l[0]\n\n # Configure the AP to reject authentication requests from wireless clients\n # - Uncomment this line to block any wireless associations during the experiment\n # n_ap.set_authentication_address_filter(allow='NONE')\n\n # Configure AP BSS\n n_ap.configure_bss(ssid=SSID, channel=CHANNEL, beacon_interval=BEACON_INTERVAL)\n\n # Establish the association state between nodes\n # - This will change the STA to the appropriate channel\n n_ap.add_association(n_sta1)\n n_ap.add_association(n_sta2)\n else:\n print(\"ERROR: Node configurations did not match requirements of script.\\n\")\n print(\" Ensure two nodes are ready, one using the AP design, one using the STA design\\n\")\n sys.exit(0)\n\n # Check that the nodes are part of the same BSS. Otherwise, the LTGs below will fail.\n if not wlan_exp_util.check_bss_membership([n_ap, n_sta1]) | wlan_exp_util.check_bss_membership([n_ap, n_sta2]):\n print(\"\\nERROR: Nodes are not part of the same BSS.\")\n wlan_exp_util.check_bss_membership([n_ap, n_sta1], verbose=True)\n wlan_exp_util.check_bss_membership([n_ap, n_sta2], verbose=True)\n print(\"Ensure that both nodes are part of the same BSS.\")\n sys.exit(0)\n\n # Do not set the node into a known state. This example will just read\n # what the node currently has in the log. However, the below code could\n # be used to initialize all nodes into a default state:\n #\n # Set each node into the default state\n for tmp_node in nodes:\n # Issue a reset command to stop current operation / initialize components\n tmp_node.reset(log=True, txrx_counts=True, ltg=True, queue_data=True) # Do not reset associations/bss_info\n\n # Configure the log\n tmp_node.log_configure(log_full_payloads=True)\n\n# End def\n\n\ndef setup_experiment():\n \"\"\"Setup the experiment.\"\"\"\n global node\n\n # Check that setup is valid\n if (len(nodes) == 2):\n # Extract the node from the list for easier referencing below\n node = n_ap\n else:\n print(\"ERROR: Node configurations did not match requirements of script.\\n\")\n return\n\n# End def\n\n\ndef run_experiment():\n \"\"\"Run the experiment.\"\"\"\n global network_config, node, log_container, exp_done, input_done\n\n print(\"\\nRun Experiment:\\n\")\n\n # Get the start time\n start_time = time.time()\n\n # Print the current state of the node\n print_node_state(start_time)\n\n print(\"\\nStart LTG - AP -> STA\")\n # Start a flow from the AP's local traffic generator (LTG) to the STA\n # - Set the flow to 1400 byte payloads, fully backlogged (0 usec between new pkts), run forever\n # - Start the flow immediately\n ap_ltg_id1 = n_ap.ltg_configure(ltg.FlowConfigCBR(dest_addr=n_sta1.wlan_mac_address,\n payload_length=1400,\n interval=0), auto_start=True)\n ap_ltg_id2 = n_ap.ltg_configure(ltg.FlowConfigCBR(dest_addr=n_sta2.wlan_mac_address,\n payload_length=1400,\n interval=0), auto_start=True)\n # Let the LTG flows run at the new rate\n time.sleep(RUN_TIME / 3)\n\n# End def\n\n\ndef end_experiment():\n \"\"\"Experiment cleanup / post processing.\"\"\"\n global node, log_container\n print(\"\\nEnding Transmission\\n\")\n\n print(\"Done.\")\n return\n\n# End def\n\n\n#-----------------------------------------------------------------------------\n# Main Function\n#-----------------------------------------------------------------------------\nif __name__ == '__main__':\n\n # Create thread for experiment\n exp_thread = threading.Thread(target=run_experiment)\n exp_thread.daemon = True\n\n try:\n # Initialize the experiment\n init_experiment()\n\n # Setup the experiment\n setup_experiment()\n\n # Start the experiment loop thread\n exp_thread.start()\n\n # See if there is any input from the user\n while not input_done:\n sys.stdout.flush()\n temp = raw_input(\"\")\n\n if temp is not '':\n user_input = temp.strip()\n user_input = user_input.upper()\n\n if ((user_input == 'Q') or (user_input == 'QUIT') or (user_input == 'EXIT')):\n input_done = True\n exp_done = True\n\n # Wait for all threads\n exp_thread.join()\n sys.stdout.flush()\n\n # End the experiment\n end_experiment()\n\n except KeyboardInterrupt:\n exp_done = True\n input_done = True\n\n # If there is a keyboard interrupt, then end the experiment\n end_experiment()\n\n print(\"\\nExperiment Finished.\")\n","sub_path":"Python_Reference/examples/log/tx_continuous.py","file_name":"tx_continuous.py","file_ext":"py","file_size_in_byte":9708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"35081952","text":"# -*- coding: utf-8 -*-\nimport json\nimport requests\nimport codecs\nfrom bs4 import BeautifulSoup\nimport time\nimport random\n\ntotal_count = 500*5 #トータルで保存した数\ncurrent_count = 0 # 現在ファイルに保存してる数\ncurrent_file = 5 #現在の保存ファイル番号\nlimit = 500 # 1ファイルに何個データを保存するか\ntotal_amount = 49053# 64387\n\n# index_**.jsonl から映画のデータ取得\ndef import_movie_index():\n global total_count\n path = 'C:\\Apps\\scraping\\ymovie-collector\\\\all_index_unique.jsonl'\n\n # 普通のopenだと文字が上手く変���できない\n # codecs でutf8で開く\n f = codecs.open( path , 'r','utf-8')\n #一行ずつ読む\n line = f.readline()\n i = 0\n while line:\n #一行よんでjson→辞書に変換 by loads\n one_row = json.loads(line)\n url = 'https://movies.yahoo.co.jp/movie/'+str(one_row[\"y_id\"])\n if i >= total_count: # total_countまでは飛ばす(途中で中断した場合の処理)\n get_one_movie(url,one_row[\"y_id\"])\n i += 1\n line = f.readline()\n\n f.close()\n\n\ndef get_one_movie(url,y_id):\n time.sleep(random.uniform(3.0,5.0))\n response = requests.get(url) # requestsを使って、webから取得\n soup = BeautifulSoup(response.text, \"lxml\") # 要素の抽出\n filtered_data = soup.find(\"div\", {\"id\": \"mvinf\"}).find_all(\"tr\") #div ,id = mvinfのtr要素にある\n\n movie_data = {}\n movie_data[\"yahoo_id\"] = y_id\n\n# thにインデックス情報(タイトル)が、trに各要素(ターミネーター)がある\n for tr in filtered_data:\n target_data = tr.find(\"td\").text\n # tdの中のテキストをtextで抽出→複数の要素(俳優:シュワちゃん、スターローン、ブルース・ウィリス)があったり、改行だらけだったりする\n # なので →1:改行で区切る、2:改行だけのデータ以外を保存 https://qiita.com/poorko/items/9140c75415d748633a10\n\n # 1:splitlineで改行で区切る\n obtained_data = []\n for line in target_data.splitlines():\n # 2:中身が有るときだけ(=改行ではない)、追加\n if line:\n obtained_data.append(line)\n # もし要素が1つだけなら配列じゃなくてテキストとして格納(タイトル、制作年月など)\n if len(obtained_data)<=1:\n obtained_data = ''.join(obtained_data)\n # th要素をインデックス(俳優)として、要素を保存(シュワちゃん、スターローン、ブルース・ウィリス)\n movie_data[tr.find(\"th\").text] = obtained_data\n\n # print (movie_data)\n # print(\"{}\".format(json.dumps(movie_data,indent=4,ensure_ascii=False))) https://qiita.com/wakaba130/items/5f54aed913156dc4438f\n export(movie_data)\n\n\ndef export(data):\n #グローバル変数をもってくる\n global total_count\n global current_count\n global current_file\n global limit\n global total_amount\n # もしlimitの数を超えたらファイル名変更=次のファイルへ保存\n if current_count > limit:\n current_file += 1\n current_count = 0\n current_count +=1\n total_count +=1\n # output in order to know current status\n print(\"current_file: \", str(current_file).zfill(3))\n print(\"current_count: \", current_count)\n print(\"total_count: \", total_count)\n print(\"progress: \"+\"{0:.2f}\".format(total_count/total_amount*100)+\"%\")\n print(\"{}\".format(json.dumps(data,indent=4,ensure_ascii=False)))\n\n filename = \"scraped_data_\" + str(current_file).zfill(3) #ファイル名を揃える\n # path = './ymovie-collector/'+str(filename)+'.jsonl'\n path = 'C:\\Apps\\scraping\\ymovie-collector\\\\' + str(filename)+ '.jsonl'\n #pathに書き込み\n #デフォルのopenで書き込むと文字化け。 なのでcodesでutf8指定でjsonを保存 https://daily-fruit.com/programing/python/375/\n f = codecs.open( path , 'a','utf-8')\n #dumpは辞書をjsonに変換、その時にensure_ascii=Falseとしないと文字化け https://www.sejuku.net/blog/79338\n json.dump(data, f, ensure_ascii=False)\n f.write('\\n')\n f.close()\n\n\n\nif __name__ == '__main__':\n import_movie_index()\n# 技術的なやつ\n#find はそのままsoupの形だけど、find_allは結果を複数かえす構造になるので、そこでまたfindやfindallを使えない。もう一度使いたいときはfor文でバラす必要あり","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"75704313","text":"import data_loader\nimport train_identifier\nfrom utils import log\nimport utils\nfrom pathlib import Path\n\n\nroot = Path('.')\ntraining_dir = root / 'training_data'\nval_dir = root / 'validation_data'\ntest_dir = root / 'test_data'\nfs_tr_dir = training_dir / 'expressive_all_tr' / 'faceScrub_train'\nfs_val_dir = val_dir / 'expressive_all_val' / 'faceScrub_val'\n\n\nutils.set_logger('log')\n\nlog(utils.separator())\n\n# # data_loader.convert_data_to_vectors(root_path_str='./Data7', text_file_str='tr_data.txt')\n# # data_loader.convert_data_to_vectors(root_path_str='./Validation', text_file_str='val_data.txt')\n# # data_loader.convert_data_to_vectors(root_path_str='./Test', text_file_str='test_data.txt')\n\n\n# m = train_identifier.train_model(training_dir / 'expressive_all_tr' / 'Brynn_Szczesniak', fs_tr_dir, .1, 'linear')\n# model_dict = {'Brynn_Szczesniak': m}\n\n# # c = .01\n# # for i in range (1, 8):\n# # c *= 10\n# # m = train_identifier.train_model('./Data6', c, 'linear')\n# # train_identifier.test_model(m, \"./Validation\", \"val_data.txt\", 'Validation')\n\n# # m = train_identifier.train_model('./Data7', 1000000, 'linear')\n# # train_identifier.test_model(model_dict, \"./Will_val\", \"val_data.txt\", 'Validation')\n# train_identifier.test_model_all(model_dict, val_dir / 'expressive_all_val', \"val_data.txt\", 'Validation')\n# train_identifier.test_model(m, val_dir / 'expressive_all_val' / 'Brynn_Szczesniak', fs_val_dir, 'val_data.txt', 'Validation')\n\ndata_loader.convert_data_to_vectors(root_path_str='./training_data/expressive_all_tr', text_file_str='tr_data.txt')\ndata_loader.convert_data_to_vectors(root_path_str='./validation_data/expressive_all_val', text_file_str='val_data.txt')\ndata_loader.convert_data_to_vectors(root_path_str='./test_data/expressive_all_test', text_file_str='test_data.txt')\n\n\n\nlog(utils.separator())\n","sub_path":"run_training_old.py","file_name":"run_training_old.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"384202217","text":"import cv2\nimport numpy as np\n\ndef filter_by_hist(field, obj_hist):\n\n field_hsv = cv2.cvtColor(field.copy(), cv2.COLOR_BGR2HSV)\n\n obj_by_hist = cv2.calcBackProject([field_hsv], [0, 1], obj_hist, [0, 180, 0, 256], 1)\n\n # # Now convolute with circular disc\n disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))\n cv2.filter2D(obj_by_hist, -1, disc, obj_by_hist)\n\n ret, thresh = cv2.threshold(obj_by_hist, 50, 255, 0)\n thresh = cv2.merge((thresh, thresh, thresh))\n\n team_mask = cv2.bitwise_and(field, thresh)\n\n return team_mask\n\npath = 'image/'\n\n# image = cv2.imread(path + 'foot.png')\nimage = cv2.imread(path + 'h2.jpeg')\n\n\nimage_h, image_w = image.shape[:2]\nimage_k = 2\nimage_w = int(image_w/image_k)\nimage_h = int(image_h/image_k)\n\nimage = cv2.resize(image, (image_w, image_h))\n\n\nbox = cv2.selectROI('roi', image, False)\nprint(box)\ncv2.destroyWindow('roi')\n\nroi = image[int(box[1]):int(box[1]+box[3]),\n int(box[0]):int(box[0]+box[2])]\n\nhsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)\n\ntarget = image.copy()\nhsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)\n\n# calculating object histogram\nroihist = cv2.calcHist([hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] )\n\n# normalize histogram and apply backprojection\ncv2.normalize(roihist,roihist,0,255,cv2.NORM_MINMAX)\n\nres = filter_by_hist(target, roihist)\n\n# ----- color quantization -----\ndef kmeans(image, k=3):\n img = image.copy()\n\n Z = img.reshape((-1,3))\n\n # convert to np.float32\n Z = np.float32(Z)\n\n # define criteria, number of clusters(K) and apply kmeans()\n # criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 0.1)\n ret, label, center = cv2.kmeans(Z, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)\n\n # Now convert back into uint8, and make original image\n center = np.uint8(center)\n res = center[label.flatten()]\n res2 = res.reshape((img.shape))\n\n return res2\n\n# res = np.vstack((target,res))\n# cv2.imwrite('res.jpg',res)\n\nres2 = kmeans(res, k=2)\n\n\ncv2.imshow('res', np.hstack([res, res2]))\ncv2.waitKey(0)\n\ncv2.destroyAllWindows()","sub_path":"tracking/histogram 2.py","file_name":"histogram 2.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"75502947","text":"\"\"\"\nWrite a function that takes a non-empty array of distinct integers and an integer with a target sum.\n\nIf any two numbers in the input array sum up to the target sum, the function should return them in an array.\n\nIf no 2 numbers sum up to the target sum, return an empty array.\n\n\"\"\"\n\ndef twoNumberSum(array, targetSum):\n\n nums = {}\n\n for num in array:\n potential = targetSum - num\n\n if potential in nums:\n return sorted([potential, num])\n\n else:\n nums[num] = True\n\n return []\n","sub_path":"Arrays/twoNumberSum.py","file_name":"twoNumberSum.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"242653081","text":"#! /usr/bin/env python\n\nimport roslib\nfrom free_gait import *\nimport threading\nfrom actionlib_msgs.msg import GoalStatus\nimport roslaunch\n\n\nclass ActionState:\n UNINITIALIZED = 0\n INITIALIZED = 1\n PENDING = 2\n ACTIVE = 3\n DONE = 4\n\n\nclass ActionBase(object):\n\n def __init__(self, client, directory = None):\n self.state = ActionState.UNINITIALIZED\n self.client = client\n self.directory = directory\n self.goal = None\n self.feedback = None\n self.result = None\n self.timeout = rospy.Duration()\n # If true, action can run in background after state DONE.\n self.keep_alive = False\n self.state = ActionState.INITIALIZED\n\n def start(self):\n self.state = ActionState.PENDING\n self._send_goal()\n\n def wait_for_result(self):\n wait_for_done = WaitForDone(self)\n wait_for_done.wait();\n\n def stop(self):\n pass\n\n def _send_goal(self):\n if self.goal is None:\n self.result = free_gait_msgs.msg.ExecuteStepsResult()\n self.result.status = free_gait_msgs.msg.ExecuteStepsResult.RESULT_UNKNOWN\n self.state = ActionState.DONE\n return\n\n if self.client.gh:\n self.client.stop_tracking_goal()\n self.client.wait_for_server()\n self.client.send_goal(self.goal,\n done_cb=self._done_callback,\n active_cb=self._active_callback,\n feedback_cb=self._feedback_callback)\n\n def _active_callback(self):\n self.state = ActionState.ACTIVE\n\n def _feedback_callback(self, feedback):\n self.feedback = feedback\n\n def _done_callback(self, status, result):\n self.state = ActionState.DONE\n self.result = result\n if status != GoalStatus.SUCCEEDED:\n self.stop()\n\n\nclass SimpleAction(ActionBase):\n\n def __init__(self, client, goal):\n ActionBase.__init__(self, client, None)\n self.goal = goal\n\n\nclass ContinuousAction(ActionBase):\n\n def __init__(self, client, directory = None):\n ActionBase.__init__(self, client, directory)\n self.keep_alive = True\n\n def start(self):\n self.state = ActionState.PENDING\n\n def wait_for_result(self):\n # Immediate return because action runs in background.\n self.result = free_gait_msgs.msg.ExecuteStepsResult()\n self.result.status = self.result.RESULT_UNKNOWN\n\n\nclass ExternalAction(ActionBase):\n\n def __init__(self, client, file_path):\n ActionBase.__init__(self, client, None)\n self.file_path = file_path\n self.keep_alive = True\n\n def start(self):\n self.state = ActionState.PENDING\n uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)\n roslaunch.configure_logging(uuid)\n self.launch = roslaunch.parent.ROSLaunchParent(uuid, [self.file_path])\n self.launch.start()\n\n def wait_for_result(self):\n # Immediate return because action runs externally.\n self.result = free_gait_msgs.msg.ExecuteStepsResult()\n self.result.status = self.result.RESULT_UNKNOWN\n\n def stop(self):\n self.launch.shutdown()\n\n\nclass TriggerOnFeedback:\n\n def __init__(self, n_steps_in_queue, phase_of_step):\n self.n_steps_in_queue = n_steps_in_queue\n self.phase_of_step = phase_of_step\n self.feedback = None\n\n def check(self, feedback):\n self.feedback = feedback\n if self.feedback.queue_size <= self.n_steps_in_queue and self.feedback.phase >= self.phase_of_step:\n return True\n else:\n return False\n\n\nclass WaitForDone:\n\n def __init__(self, action, timeout = rospy.Duration(), loop_period = rospy.Duration(0.1)):\n self.action = action\n self.timeout = timeout\n self.loop_period = loop_period\n self.done_condition = threading.Condition()\n\n def wait(self):\n timeout_time = rospy.get_rostime() + self.timeout\n loop_period = rospy.Duration(0.1)\n with self.done_condition:\n while not rospy.is_shutdown():\n time_left = timeout_time - rospy.get_rostime()\n if self.timeout > rospy.Duration(0.0) and time_left <= rospy.Duration(0.0):\n break\n\n if self.action.state == ActionState.DONE:\n break\n\n if time_left > loop_period or self.timeout == rospy.Duration():\n time_left = loop_period\n\n self.done_condition.wait(time_left.to_sec())\n\n return self.action.state == ActionState.DONE\n","sub_path":"free_gait_python/src/free_gait/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"110001910","text":"# Polsby Popper\nimport shapefile\n#import shapely\nfrom shapely import geometry\nimport math\n\nsf = shapefile.Reader(\"FinalOutput/New_VA_Congress_3.shp\")\n\n#print(\"Length = \", len(sf))\n#print(\"Type = \", sf.shapeType)\n#print(sf.fields)\n\nfor current in sf.iterShapeRecords():\n\tcurrentRecord = current.record\n\tproposedName = \"Proposed District \" + str(currentRecord['DNAME11'])\n\t#print(\"DNAME11 = \" + proposedName)\n\t\t\t\n\tcurrentShape = current.shape\n\tshapePartIndices = currentShape.parts\n\tnumParts = len(shapePartIndices)\n\tshapePoints = currentShape.points\n\tnumPoints = len(shapePoints)\n\t#print(\"parts array = \" + str(shapePartIndices))\n\ttotalArea = 0\n\tcombinedScore = 0\n\t\n\tfor i in range(numParts):\n\t\tstart = shapePartIndices[i]\n\t\tend = numPoints\n\t\tif i < numParts - 1:\n\t\t\tend = shapePartIndices[i + 1]\n\t\t\n\t\t#print(\"i = \" + str(i))\n\t\t#print(\"Num points = \" + str(end - start))\n\t\tcurrentPoly = geometry.Polygon([[pt[0], pt[1]] for pt in currentShape.points[start:end]])\n\t\tpolyArea = currentPoly.area\n\t\tpolyPerimeter = currentPoly.length\n\t\t#print(\"Area = \" + str(polyArea))\n\t\t#print(\"Length = \" + str(polyPerimeter))\n\t\tppScore = (4 * math.pi * polyArea) / (polyPerimeter * polyPerimeter)\n\t\t\n\t\tcombinedScore += (ppScore * polyArea)\n\t\ttotalArea += polyArea\n\t\n\tfinalScore = combinedScore / totalArea\n\tprint(\"Polsby-Popper Score for \" + proposedName + \" = \" + str(finalScore))\n\t#equalPerimeterArea = (polyPerimeter * polyPerimeter) / (4 * math.pi)\n\t#ipqScore = polyArea / equalPerimeterArea\n\t#print(\"Isoperimetric Quotient Score = \" + str(ipqScore))\n\n#print(allRecords[0])\n\n#print(len(allRecords[0]))\n\n#record = allRecords[0].record\n#print(record['POPCOUNT'])\n\n#outputFile = open(\"censusBlocks.txt\", \"w\")\n\n\n","sub_path":"workspace_william/data_dowloads/VA_Congress_New_Polsby_Popper_2.py","file_name":"VA_Congress_New_Polsby_Popper_2.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"431870990","text":"from .models import Question\n\n\ndef flagged_questions(request):\n \"\"\"\n Add the flagged questions to the context if user is staff\n :param request: The request\n :type request: HttpRequest\n :return: The context\n :rtype: dict[string, T]\n \"\"\"\n context = {}\n if request.user.is_staff:\n nfq = Question.objects.filter(flags__isnull=False).count()\n context['n_flagged_questions'] = nfq\n return context\n","sub_path":"choice_master/chm/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"350259899","text":"import json\nimport redis\nfrom tqdm import tqdm\n\nwith open(\"artist.json\", mode=\"r\") as f:\n artist = f.read().split(\"\\n\")[:-1]\n artist_json = [json.loads(x) for x in artist]\n\nr = redis.StrictRedis(connection_pool=redis.ConnectionPool(host=\"localhost\", port=6379, db=0))\n\nfor i, x in enumerate(tqdm(artist_json)):\n r.set(x[\"name\"] + \" \" + str(x[\"id\"]), x[\"area\"] if \"area\" in x else \"\")\n\nprint(\"{}件登録しました。\".format(i + 1))\n\n","sub_path":"6/60.py","file_name":"60.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"513488171","text":"import json\nimport subprocess\nimport urllib2\nfrom os import path, devnull\nfrom shutil import rmtree\n\nimport yaml\n\nFNULL = open(devnull, \"w\")\n\n\ndef print_msg(msg, verbose=False):\n if verbose:\n print(msg)\n\n\ndef load_yaml(yaml_path):\n \"\"\"\n Load yaml data from specified file.\n :param yaml_path: The path of yaml file to load\n :return: The object repersenting the data f yaml file, None if load failed.\n \"\"\"\n if not path.exists(yaml_path):\n raise Exception(\"The yaml file does not exist at \" + yaml_path)\n with open(yaml_path, \"r\") as yaml_file:\n return yaml.load(yaml_file, Loader=yaml.BaseLoader)\n\n\ndef run_cmd(cmd, check_call=True, no_shell=False):\n \"\"\"\n Run a specfied linux command\n :param cmd: The command to run\n :param check_call: If true, check call is used. Recommendedif no data is\n needed\n :param no_shell: If true, then command output is redirected to devnull\n \"\"\"\n stdout = FNULL if no_shell else subprocess.PIPE\n if not check_call:\n process = subprocess.Popen(\n cmd,\n stdin=subprocess.PIPE,\n stdout=stdout,\n stderr=subprocess.PIPE\n )\n process.communicate()\n if process.returncode > 0:\n raise Exception(\"Failed to execute command\")\n else:\n subprocess.check_call(cmd)\n\n\ndef rm(p):\n \"\"\"\n Removes the path from directoy structure, if it exists.\n :param p: The path to remove.\n \"\"\"\n if path.exists(p):\n rmtree(p)\n\n\ndef request_url(url):\n \"\"\"\n Queries a specified url and returns data, if any\n :param url: The url to query\n :return: The request object, or None upon failure.\n \"\"\"\n try:\n req = urllib2.urlopen(url)\n except Exception:\n req = None\n\n return req\n\n\ndef clone_repo(git_url, clone_location):\n \"\"\"\n Clones a git repo at specified location.\n :param git_url: The url of git repo\n :param clone_location: The path to clone repo.\n \"\"\"\n cmd = [\"git\", \"clone\", git_url, clone_location]\n if path.exists(clone_location):\n rmtree(clone_location)\n run_cmd(cmd)\n","sub_path":"container_pipeline/cleanup_registry/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"413526850","text":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport os\nimport json\nimport numpy as np\nimport mindspore\nfrom mindspore import Tensor\nfrom mindspore.train.serialization import load_checkpoint\n\nfrom model import lenet5, resnet50\n\n\ndef predict(instance, name=\"lenet5\", model_format=\"ckpt\", class_num=10):\n # check if servable name is valid\n if name not in (\"lenet5\", \"resnet50\"):\n err_msg = \"Currently model_name only supports `lenet5` and `resnet50`!\"\n return {\"status\": 1, \"err_msg\": err_msg}\n input = np.array(json.loads(instance['data']), dtype='uint8')\n net = lenet5(class_num=class_num) if name == \"lenet5\" else resnet50(class_num=class_num)\n input = input.reshape((1, 1, 28, 28)) if name == \"lenet5\" else input.reshape((1, 3, 224, 224))\n\n # check if model_format is valid\n if model_format not in (\"ckpt\"):\n err_msg = \"Currently model_format only supports `ckpt`!\"\n return {\"status\": 1, \"err_msg\": err_msg}\n # load checkpoint\n ckpt_path = os.path.join(\"ckpt\", name+\".\"+model_format)\n if not os.path.isfile(ckpt_path):\n err_msg = \"The model path \"+ckpt_path+\" not exist!\"\n return {\"status\": 1, \"err_msg\": err_msg}\n load_checkpoint(ckpt_path, net=net)\n\n # execute the network to perform model prediction\n data = net(Tensor(input, mindspore.float32)).asnumpy()\n return {\"status\": 0, \"instance\": {\"shape\": data.shape, \"data\": json.dumps(data.tolist())}}\n","sub_path":"mindspore_serving/mindspore_serving.py","file_name":"mindspore_serving.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"580042210","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 01 15:48:39 2016\r\n\r\n@author: wu34\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport utilise \r\nimport dataGen4DietAct\r\nimport buildItemIndex\r\nimport buildTypeIndex\r\nimport dietActInfoRetrv\r\nimport matplotlib.pyplot as plt\r\n\r\n# Domain = ['ActItem','DietItem','DietType','ActType']\r\nDomain = ['DietType','ActType']\r\navailable_list = ['039','044','045','048','049','050','051','052','053','054','056','057','058','059','060','061','063','064','065','066','067','068','069','070','071','072','073','074','075']\r\n\r\nlabelsDietType = utilise.string2array('1 1 0 1 1 1 0 0 0 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 1 0 1 1 1')\r\nlabelsActType = utilise.string2array('1 0 1 1 0 0 2 2 2 1 2 0 1 2 1 2 0 1 1 2 0 1 1 2 1 1 0 2 1')\r\n\r\ndef singleSubjectDailyArray(domain,subjectID):\r\n\t'''\r\n\tbuild daily item TF array \r\n\t'''\r\n\tif domain == 'ActItem':\r\n\t\titem_dict = dataGen4DietAct.genActItemDict()\r\n\telif domain == 'DietItem':\r\n\t\titem_dict = dataGen4DietAct.genDietItemDict()\r\n\telif domain == 'DietType':\r\n\t\titem_dict = dataGen4DietAct.genDietTypeDict()\r\n\telif domain == 'ActType':\r\n\t\titem_dict = dataGen4DietAct.genActTypeDict()\r\n\t# print item_dict\r\n\t\r\n\tduration = dietActInfoRetrv.getDuration(subjectID)\r\n\tx = duration \r\n\tn = len(item_dict)\r\n\tdims = (x,n)\r\n\tarray = np.zeros(dims)\r\n\t\r\n\tif domain == 'ActItem':\r\n\t\tfor i in range(duration):\r\n\t\t\tItemIndex = buildItemIndex.build_daily_single_activity_index(subjectID,i+1)\r\n\t\t\tfor key in item_dict:\r\n\t\t\t\tif item_dict[key] in ItemIndex:\r\n\t\t\t\t\tarray[i,key] = ItemIndex[item_dict[key]]\r\n\t\t\t\telse:\r\n\t\t\t\t\tarray[i,key] = 0.0\r\n\t\r\n\tif domain == 'DietItem':\r\n\t\tfor i in range(duration):\r\n\t\t\tItemIndex = buildItemIndex.build_daily_single_diet_index(subjectID,i+1)\r\n\t\t\t# print ItemIndex\r\n\t\t\tfor key in item_dict:\r\n\t\t\t\tif item_dict[key] in ItemIndex:\r\n\t\t\t\t\tarray[i,key] = ItemIndex[item_dict[key]]\r\n\t\t\t\telse:\r\n\t\t\t\t\tarray[i,key] = 0.0\r\n\t\r\n\tif domain == 'DietType':\r\n\t\tfor i in range(duration):\r\n\t\t\tItemIndex = buildTypeIndex.build_daily_single_diet_index(subjectID,i+1)\r\n\t\t\t# print ItemIndex\r\n\t\t\tfor key in item_dict:\r\n\t\t\t\tif item_dict[key] in ItemIndex:\r\n\t\t\t\t\tarray[i,key] = ItemIndex[item_dict[key]]\r\n\t\t\t\telse:\r\n\t\t\t\t\tarray[i,key] = 0.0\r\n\t\r\n\tif domain == 'ActType':\r\n\t\tfor i in range(duration):\r\n\t\t\tItemIndex = buildTypeIndex.build_daily_single_activity_index(subjectID,i+1)\r\n\t\t\tfor key in item_dict:\r\n\t\t\t\tif item_dict[key] in ItemIndex:\r\n\t\t\t\t\tarray[i,key] = ItemIndex[item_dict[key]]\r\n\t\t\t\telse:\r\n\t\t\t\t\tarray[i,key] = 0.0\r\n\t'''\r\n\tchange the TF array to TFIDF array. But the DF here is not equal to the one we use for mean Vector \r\n\t'''\r\n\t# transformer = TfidfTransformer(norm=None)\r\n\t# tfidf = transformer.fit_transform(array)\r\n\t# aa = tfidf.toarray() \r\n\t# tfidfNorm = utilise.normArray(aa)\r\n\t\r\n\t# result = utilise.normArray(array)\r\n\t\r\n\t# print array \r\n\treturn array \r\n\r\ndef whichGroup(domain,subjectID):\r\n\t'''\r\n\tTo find which group is the subject belong to \r\n\t'''\r\n\tif domain == 'ActItem':\r\n\t\tlabels = labelsActItem\r\n\tif domain == 'DietItem':\r\n\t\tlabels = labelsDietItem\r\n\tif domain == 'DietType':\r\n\t\tlabels = labelsDietType\r\n\tif domain == 'ActType':\r\n\t\tlabels = labelsActType\r\n\t\r\n\tfor i in range(len(available_list)):\r\n\t\tif available_list[i] == subjectID:\r\n\t\t\tgroupID = labels[i]\r\n\t\r\n\treturn groupID\r\n\r\ndef getMeanVec(domain,groupID):\r\n\t'''\r\n\tget the intragroup mean TF vector \r\n\t'''\r\n\tif domain == 'ActItem':\r\n\t\tlabels = labelsActItem\r\n\t\tX = dataGen4DietAct.genActItemTFArray()\r\n\tif domain == 'DietItem':\r\n\t\tlabels = labelsDietItem\r\n\t\tX = dataGen4DietAct.genDietItemTFArray()\r\n\tif domain == 'DietType':\r\n\t\tlabels = labelsDietType\r\n\t\tX = dataGen4DietAct.genDietTypeTFArray()\r\n\tif domain == 'ActType':\r\n\t\tlabels = labelsActType\r\n\t\tX = dataGen4DietAct.genActTypeTFArray()\r\n\r\n\tclass_members = labels == groupID\r\n\tnumber = 0\r\n\tsumVec = np.zeros(X.shape[1])\r\n\tfor x in X[class_members]:\r\n\t\tnumber += 1\r\n\t\tsumVec += x \r\n\tmeanVec = sumVec/number \r\n\tmeanVec.tolist()\r\n\t\r\n\t# firstMax = np.max(meanVec)\r\n\t# meanVec = meanVec/firstMax\r\n\t\r\n\treturn meanVec\r\n\t\r\ndef visSBDailyPatternIntraGroup(domain,subjectID):\r\n\t'''\r\n\tsingle subject intra group daily pattern view \r\n\t'''\t\r\n\tgroupID = whichGroup(domain,subjectID)\r\n\tmeanVec = getMeanVec(domain,groupID)\r\n\ttf = singleSubjectDailyArray(domain,subjectID)\r\n\ty = np.zeros(tf.shape[0])\r\n\tx = range(tf.shape[0])\r\n\t\r\n\tfor i in range(tf.shape[0]):\r\n\t\ty[i] = 1/np.sqrt(sum(np.power(tf[i] - meanVec, 2)))\r\n\t\t# print i,tf[i],meanVec,y[i]\r\n\t\r\n\tplt.figure()\r\n\tplt.title(domain+'_'+subjectID+'_IntraGroupDailyPattern')\r\n\tplt.plot(x,y)\r\n\tplt.savefig('visIntraGroupDailyPattern/'+domain+'/daily'+domain+'Pattern_'+subjectID)\r\n\r\ndef visDailyPatternIntraGroup():\r\n\tfor domain in Domain:\r\n\t\tfor subjectID in available_list:\r\n\t\t\tvisSBDailyPatternIntraGroup(domain,subjectID)\r\n\r\n# visSBDailyPatternIntraGroup('DietItem','060')\r\n\r\n# for subjectID in available_list:\r\n\t# aa = singleSubjectDailyArray('ActType',subjectID)\r\n\t# print aa\r\n\r\n# aa = singleSubjectDailyArray('DietItem','039')\r\n# print aa \r\n# aa = singleSubjectDailyArray('DietType','039')\r\n# print aa \r\nvisDailyPatternIntraGroup()\r\n","sub_path":"visDailyPatternIntraGroup.py","file_name":"visDailyPatternIntraGroup.py","file_ext":"py","file_size_in_byte":5050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"198215001","text":"import tensorflow as tf\nimport tensorflow.contrib.layers as layers\nfrom tensorflow.python.ops import array_ops, tensor_array_ops, io_ops\nfrom tensorflow.python.framework import tensor_shape\n\nimport contextlib\nfrom tensorflow.contrib import rnn\n#from tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl import _linear\nfrom tensorflow.python.ops.math_ops import sigmoid\nfrom tensorflow.python.ops.math_ops import tanh\nfrom tensorflow.python.ops.rnn_cell_impl import _RNNCell as RNNCell\nfrom tensorflow.python.ops import variable_scope as vs\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import nn_ops\n\n\n_BIAS_VARIABLE_NAME = \"biases\"\n_WEIGHTS_VARIABLE_NAME = \"weights\"\n\nclass BasicLSTMCell2(RNNCell):\n \"\"\"Basic LSTM recurrent network cell.\n The implementation is based on: http://arxiv.org/abs/1409.2329.\n We add forget_bias (default: 1) to the biases of the forget gate in order to\n reduce the scale of forgetting in the beginning of the training.\n It does not allow cell clipping, a projection layer, and does not\n use peep-hole connections: it is the basic baseline.\n For advanced models, please use the full LSTMCell that follows.\n \"\"\"\n\n def __init__(self, in_dim, num_units, weights = None, forget_bias=1.0, input_size=None,\n state_is_tuple=True, activation=tanh, reuse=None):\n \"\"\"Initialize the basic LSTM cell.\n Args:\n num_units: int, The number of units in the LSTM cell.\n forget_bias: float, The bias added to forget gates (see above).\n input_size: Deprecated and unused.\n state_is_tuple: If True, accepted and returned states are 2-tuples of\n the `c_state` and `m_state`. If False, they are concatenated\n along the column axis. The latter behavior will soon be deprecated.\n activation: Activation function of the inner states.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n \"\"\"\n if not state_is_tuple:\n logging.warn(\"%s: Using a concatenated state is slower and will soon be \"\n \"deprecated. Use state_is_tuple=True.\", self)\n if input_size is not None:\n logging.warn(\"%s: The input_size parameter is deprecated.\", self)\n self._num_units = num_units\n self._forget_bias = forget_bias\n self._state_is_tuple = state_is_tuple\n self._activation = activation\n self._reuse = reuse\n self._in_dim = in_dim\n self.weights = weights\n \n\n @property\n def state_size(self):\n return (rnn.LSTMStateTuple(self._num_units, self._num_units)\n if self._state_is_tuple else 2 * self._num_units)\n\n @property\n def output_size(self):\n return self._num_units\n \n def linear(self,arys):\n scope = tf.get_variable_scope()\n if self.weights != None:\n w_i2h, w_h2h, w_b = self.weights\n else:\n with tf.variable_scope(scope): #initializer=tf.contrib.layers.xavier_initializer()\n with tf.device('/cpu:0'):\n w_i2h = tf.get_variable('w_i2h', (self._in_dim, 4*self._num_units), dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(),trainable=True)\n w_h2h = tf.get_variable('w_h2h', (self._num_units, 4*self._num_units), dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(),trainable=True)\n w_b = tf.get_variable('w_b', (1, 4*self._num_units), dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(),trainable=True)\n \n i2h = tf.matmul(arys[0],w_i2h)\n h2h = tf.matmul(arys[1],w_h2h)\n out = i2h + h2h + w_b\n return out\n\n def __call__(self, inputs, state, scope=None):\n \"\"\"Long short-term memory cell (LSTM).\"\"\"\n with _checked_scope(self, scope or \"basic_lstm_cell\", reuse=self._reuse):\n # Parameters of gates are concatenated into one multiply for efficiency.\n if self._state_is_tuple:\n c, h = state\n else:\n c, h = array_ops.split(value=state, num_or_size_splits=2, axis=1)\n\n concat = self.linear([inputs, h])\n#concat = _linear([inputs, h], 4 * self._num_units, True)\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)\n\n new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *\n self._activation(j))\n new_h = self._activation(new_c) * sigmoid(o)\n\n if self._state_is_tuple:\n new_state = rnn.LSTMStateTuple(new_c, new_h)\n else:\n new_state = array_ops.concat([new_c, new_h], 1)\n return new_h, new_state\n\n\n\n@contextlib.contextmanager\ndef _checked_scope(cell, scope, reuse=None, **kwargs):\n if reuse is not None:\n kwargs[\"reuse\"] = reuse\n with vs.variable_scope(scope, **kwargs) as checking_scope:\n scope_name = checking_scope.name\n if hasattr(cell, \"_scope\"):\n cell_scope = cell._scope # pylint: disable=protected-access\n if cell_scope.name != checking_scope.name:\n raise ValueError(\n \"Attempt to reuse RNNCell %s with a different variable scope than \"\n \"its first use. First use of cell was with scope '%s', this \"\n \"attempt is with scope '%s'. Please create a new instance of the \"\n \"cell if you would like it to use a different set of weights. \"\n \"If before you were using: MultiRNNCell([%s(...)] * num_layers), \"\n \"change to: MultiRNNCell([%s(...) for _ in range(num_layers)]). \"\n \"If before you were using the same cell instance as both the \"\n \"forward and reverse cell of a bidirectional RNN, simply create \"\n \"two instances (one for forward, one for reverse). \"\n \"In May 2017, we will start transitioning this cell's behavior \"\n \"to use existing stored weights, if any, when it is called \"\n \"with scope=None (which can lead to silent model degradation, so \"\n \"this error will remain until then.)\"\n % (cell, cell_scope.name, scope_name, type(cell).__name__,\n type(cell).__name__))\n else:\n weights_found = False\n try:\n with vs.variable_scope(checking_scope, reuse=True):\n vs.get_variable(_WEIGHTS_VARIABLE_NAME)\n weights_found = True\n except ValueError:\n pass\n if weights_found and reuse is None:\n raise ValueError(\n \"Attempt to have a second RNNCell use the weights of a variable \"\n \"scope that already has weights: '%s'; and the cell was not \"\n \"constructed as %s(..., reuse=True). \"\n \"To share the weights of an RNNCell, simply \"\n \"reuse it in your second calculation, or create a new one with \"\n \"the argument reuse=True.\" % (scope_name, type(cell).__name__))\n\n # Everything is OK. Update the cell's scope and yield it.\n cell._scope = checking_scope # pylint: disable=protected-access\n yield checking_scope\n\n","sub_path":"LSTMCell.py","file_name":"LSTMCell.py","file_ext":"py","file_size_in_byte":7104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"465803468","text":"# Simple Network Dynamics simulator in Python\n#\n# *** Network Growth ***\n#\n# Copyright 2011-2012 Hiroki Sayama\n# sayama@binghamton.edu\n\nimport matplotlib\nmatplotlib.use('TkAgg')\n\nimport pylab as PL\nimport networkx as NX\nimport csv\nimport math as MT\n\nnetwork_size = 12 # number of robots\ndata = [[] for i in xrange(network_size)] # initializing an empty list of lists to store data\n\nwith open('../../matlab/data/Ex3.txt', 'rb') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=' ')\n for row in csv_reader:\n t = float(row[0])\n r = int(row[1])\n x = float(row[2])\n y = float(row[3])\n theta = float(row[4])\n\n data[r-1].append([t,x,y,theta])\n\n\ndef init():\n global time, network, positions, network_size, fitness\n\n time = 0\n\n network = NX.Graph()\n positions = {}\n fitness = [0.01] * network_size\n\n for r in xrange(network_size):\n network.add_node(r)\n positions[r] = (data[r][1][1],data[r][1][2])\n\n network.add_node(\"c0\")\n network.add_node(\"c1\")\n network.add_node(\"c2\")\n network.add_node(\"c3\")\n network.add_edge(\"c0\",\"c1\");network.add_edge(\"c1\",\"c3\");network.add_edge(\"c2\",\"c3\");network.add_edge(\"c2\",\"c0\")\ndef draw():\n\n PL.cla()\n node_color = ['g','g','g','g','g','g',\n 'r','r','r','r','r','r',\n 'y','y','y','y']\n\n positions[\"c0\"] = (-10,-10); positions[\"c1\"] = (-10,10); positions[\"c2\"] = (10,-10); positions[\"c3\"] = (10,10)\n fitness.append(0); fitness.append(0); fitness.append(0); fitness.append(0)\n\n NX.draw(network, pos=positions, node_color=node_color, node_size=[300 * MT.pow(f,.5) for f in fitness])\n PL.axis('image')\n PL.title('t = ' + str(data[0][time][0]))\n\n\ndef step():\n global time, network, positions, network_size, fitness\n\n time += 1\n for r in xrange(network_size):\n positions[r] = (data[r][time][1],data[r][time][2])\n speed = MT.sqrt( MT.pow(data[r][time][1] - data[r][time-1][1], 2)\n + MT.pow(data[r][time][2] - data[r][time-1][2], 2))\n\n # fitness[r] = (10000.0 * speed)\n fitness[r] = ( fitness[r] * data[r][time-1][0] + speed ) / data[r][time][0]\n\n for r2 in xrange(network_size):\n if r != r2:\n distance = MT.sqrt( MT.pow(data[r][time][1] - data[r2][time][1], 2)\n + MT.pow(data[r][time][2] - data[r2][time][2],2) )\n if distance < 2:\n network.add_edge(r,r2)\n else:\n try:\n network.remove_edge(r,r2)\n except:\n pass\n\nimport pycxsimulator\npycxsimulator.GUI().start(func=[init,draw,step])\n","sub_path":"visualization/modules/turtlepy.py","file_name":"turtlepy.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"398207786","text":"import os\nfrom pymongo import Connection\nfrom flask import Flask, g\nfrom flaskext.mail import Mail\nfrom parltrack import default_settings\n\napp = Flask(__name__)\napp.config.from_object(default_settings)\napp.config.from_envvar('PARLTRACK_SETTINGS', silent=True)\nmail = Mail(app)\n\ndef connect_db():\n conn = Connection(app.config.get('MONGODB_HOST'))\n return conn[app.config.get('MONGODB_DB')]\n\ndef get_data_dir():\n data_dir = app.config.get('DATA_DIR', '/tmp/parltrack')\n if not os.path.isdir(data_dir):\n os.makedirs(data_dir)\n return data_dir\n\n","sub_path":"parltrack/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"641045915","text":"# Create two meshes and overwrite ``mesh_a`` with ``mesh_b``.\n# Show that ``mesh_a`` is equal to ``mesh_b``.\n#\nimport pyvista\nmesh_a = pyvista.Sphere()\nmesh_b = pyvista.Cube()\nmesh_a.copy_from(mesh_b)\nmesh_a == mesh_b\n# Expected:\n## True\n","sub_path":"version/dev/api/core/_autosummary/pyvista-DataSet-copy_from-1.py","file_name":"pyvista-DataSet-copy_from-1.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"99523942","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n\n'''\nhere we will define the main function and for expython system main user interface.\n'''\n\nMODULES = ['image.convert_image',\n 'image.resize_image',\n 'memory.cache',\n ]\n\n\n\nimport click\nfrom expython import __version__\n\n\n\nfrom fuzzywuzzy import fuzz\nfrom statistics import mean\nimport logging\ndef fuzz_search(keyword,collection):\n if isinstance(keyword,str):\n string_list = keyword.split()# 默认空格\n elif isinstance(keyword,list):\n string_list = keyword\n else:\n raise\n\n result = {}\n\n for string in string_list:\n length = len(string)\n for index,m in enumerate(collection):\n res = []\n for i in range(len(m)-length+1):\n s = m[i:i+length]\n res.append((s,fuzz.ratio(string,s)))\n nums = []\n for _,n in res:\n n = n*2-100### 去噪\n if n> 50:\n nums.append(n)\n\n if nums:\n nums_aver = mean(nums)\n if m in result:\n result[m] += nums_aver\n else:\n result[m] = nums_aver\n else:\n logging.debug('{} passed,matching string is {}'.format(m,string))\n\n result = result.items()\n result = sorted(result,key=lambda x:x[1],reverse=True)\n return result\n\n\ndef print_version(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n click.echo('Version {}'.format(__version__))\n ctx.exit()\n\n@click.group()\n@click.option('--version', is_flag=True, callback=print_version,\n expose_value=False, is_eager=True,help=\"print this software version\")\ndef main():\n '''expython --version\n please remember here what will do if you call the subcommand,so here generally is doing nothing.\n '''\n pass\n\n\n\n@click.argument('command')\n@main.command()\ndef list(command):\n '''list the related modules'''\n suggests = fuzz_search(command,MODULES)\n if suggests:\n for s,_ in suggests:\n click.echo('suggested commands: {}'.format(s))\n else:\n click.echo('no such command')\n\n\n@click.argument('command')\n@main.command()\ndef test(command):\n '''test you input command'''\n suggests = fuzz_search(command,MODULES)\n if suggests:\n for s,_ in suggests:\n click.echo('suggested command: {}'.format(s))\n break\n else:\n click.echo('no such command')\n\n@click.argument('command')\n@click.option('-p','--parameters',prompt=\"please enter parameters\",default=\"\")\n@main.command()\ndef do(command,parameters):\n '''actually do the command'''\n suggests = fuzz_search(command,MODULES)\n if suggests:\n for s,_ in suggests:\n click.echo('suggested command: {}'.format(s))\n command = s\n import subprocess\n subprocess.call('python3 -m \"expython.{command}\" {parameters}'.format(command=command,parameters=parameters),shell=True)\n break\n else:\n click.echo('no such command')\n\nif __name__ == '__main__':\n main()\n","sub_path":"lib/python3.4/site-packages/expython/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"107748668","text":"#!/usr/bin/env python\n\"\"\"Django's command-line utility for administrative tasks.\"\"\"\nimport os\nimport sys\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\nSTATIC_URL = '/static/'\nMIDDLEWARE_CLASSES = (\n # Simplified static file serving.\n # https://warehouse.python.org/project/whitenoise/\n 'whitenoise.middleware.WhiteNoiseMiddleware'\n)\n\n# Extra places for collectstatic to find static files.\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static'),\n)\ndef main():\n \"\"\"Run administrative tasks.\"\"\"\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings')\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line(sys.argv)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"157982","text":"import argparse\nimport logging\nimport os\nimport pprint\nimport random\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\n\nfrom common.dataset import DatasetFactory\nfrom common.evaluation import EvaluatorFactory\nfrom common.train import TrainerFactory\nfrom utils.serialization import load_checkpoint\nfrom .model import DecAtt\n\n\ndef get_logger():\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n return logger\n\n\ndef evaluate_dataset(split_name, dataset_cls, model, embedding, loader, batch_size, device, keep_results=False):\n saved_model_evaluator = EvaluatorFactory.get_evaluator(dataset_cls, model, embedding, loader, batch_size, device,\n keep_results=keep_results)\n scores, metric_names = saved_model_evaluator.get_scores()\n logger.info('Evaluation metrics for {}'.format(split_name))\n logger.info('\\t'.join([' '] + metric_names))\n logger.info('\\t'.join([split_name] + list(map(str, scores))))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='PyTorch implementation of Multi-Perspective CNN')\n parser.add_argument('model_outfile', help='file to save final model')\n parser.add_argument('--dataset', help='dataset to use, one of [sick, msrvid, trecqa, wikiqa]', default='sick')\n parser.add_argument('--word-vectors-dir', help='word vectors directory',\n default=os.path.join(os.pardir, 'Castor-data', 'embeddings', 'GloVe'))\n parser.add_argument('--word-vectors-file', help='word vectors filename', default='glove.840B.300d.txt')\n parser.add_argument('--word-vectors-dim', type=int, default=300,\n help='number of dimensions of word vectors (default: 300)')\n parser.add_argument('--skip-training', help='will load pre-trained model', action='store_true')\n parser.add_argument('--device', type=int, default=0, help='GPU device, -1 for CPU (default: 0)')\n parser.add_argument('--wide-conv', action='store_true', default=False,\n help='use wide convolution instead of narrow convolution (default: false)')\n parser.add_argument('--sparse-features', action='store_true',\n default=False, help='use sparse features (default: false)')\n parser.add_argument('--batch-size', type=int, default=64, help='input batch size for training (default: 64)')\n parser.add_argument('--epochs', type=int, default=10, help='number of epochs to train (default: 10)')\n parser.add_argument('--optimizer', type=str, default='adam', help='optimizer to use: adam or sgd (default: adam)')\n parser.add_argument('--lr', type=float, default=0.001, help='learning rate (default: 0.001)')\n parser.add_argument('--lr-reduce-factor', type=float, default=0.3,\n help='learning rate reduce factor after plateau (default: 0.3)')\n parser.add_argument('--patience', type=float, default=2,\n help='learning rate patience after seeing plateau (default: 2)')\n parser.add_argument('--momentum', type=float, default=0, help='momentum (default: 0)')\n parser.add_argument('--epsilon', type=float, default=1e-8, help='Optimizer epsilon (default: 1e-8)')\n parser.add_argument('--log-interval', type=int, default=10,\n help='how many batches to wait before logging training status (default: 10)')\n parser.add_argument('--regularization', type=float, default=0.0001,\n help='Regularization for the optimizer (default: 0.0001)')\n parser.add_argument('--max-window-size', type=int, default=3,\n help='windows sizes will be [1,max_window_size] and infinity (default: 3)')\n parser.add_argument('--dropout', type=float, default=0.5, help='dropout probability (default: 0.1)')\n parser.add_argument('--maxlen', type=int, default=60, help='maximum length of text (default: 60)')\n parser.add_argument('--seed', type=int, default=1234, help='random seed (default: 1234)')\n parser.add_argument('--tensorboard', action='store_true', default=False,\n help='use TensorBoard to visualize training (default: false)')\n parser.add_argument('--run-label', type=str, help='label to describe run')\n parser.add_argument('--keep-results', action='store_true',\n help='store the output score and qrel files into disk for the test set')\n\n args = parser.parse_args()\n\n device = torch.device(f'cuda:{args.device}' if torch.cuda.is_available() and args.device >= 0 else 'cpu')\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.device != -1:\n torch.cuda.manual_seed(args.seed)\n\n logger = get_logger()\n logger.info(pprint.pformat(vars(args)))\n\n dataset_cls, embedding, train_loader, test_loader, dev_loader \\\n = DatasetFactory.get_dataset(args.dataset, args.word_vectors_dir, args.word_vectors_file, args.batch_size, args.device)\n\n filter_widths = list(range(1, args.max_window_size + 1)) + [np.inf]\n ext_feats = dataset_cls.EXT_FEATS if args.sparse_features else 0\n\n model = DecAtt(embedding_size=args.word_vectors_dim, device=args.device, num_units=args.word_vectors_dim,\n num_classes=dataset_cls.NUM_CLASSES, dropout=args.dropout, max_sentence_length=args.maxlen)\n\n model = model.to(device)\n embedding = embedding.to(device)\n\n optimizer = None\n if args.optimizer == 'adam':\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.regularization, eps=args.epsilon)\n elif args.optimizer == 'sgd':\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.regularization)\n else:\n raise ValueError('optimizer not recognized: it should be either adam or sgd')\n\n train_evaluator = EvaluatorFactory.get_evaluator(dataset_cls, model, embedding, train_loader, args.batch_size,\n args.device)\n test_evaluator = EvaluatorFactory.get_evaluator(dataset_cls, model, embedding, test_loader, args.batch_size,\n args.device)\n dev_evaluator = EvaluatorFactory.get_evaluator(dataset_cls, model, embedding, dev_loader, args.batch_size,\n args.device)\n\n trainer_config = {\n 'optimizer': optimizer,\n 'batch_size': args.batch_size,\n 'log_interval': args.log_interval,\n 'model_outfile': args.model_outfile,\n 'lr_reduce_factor': args.lr_reduce_factor,\n 'patience': args.patience,\n 'tensorboard': args.tensorboard,\n 'run_label': args.run_label,\n 'logger': logger\n }\n trainer = TrainerFactory.get_trainer(args.dataset, model, embedding, train_loader, trainer_config, train_evaluator, test_evaluator, dev_evaluator)\n\n if not args.skip_training:\n total_params = 0\n for param in model.parameters():\n size = [s for s in param.size()]\n total_params += np.prod(size)\n logger.info('Total number of parameters: %s', total_params)\n trainer.train(args.epochs)\n\n _, _, state_dict, _, _ = load_checkpoint(args.model_outfile)\n\n for k, tensor in state_dict.items():\n state_dict[k] = tensor.to(device)\n\n model.load_state_dict(state_dict)\n if dev_loader:\n evaluate_dataset('dev', dataset_cls, model, embedding, dev_loader, args.batch_size, args.device)\n evaluate_dataset('test', dataset_cls, model, embedding, test_loader, args.batch_size, args.device, args.keep_results)\n","sub_path":"decatt/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":7814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"344119581","text":"'''\r\nTasks to create and run a BMI controlling a point mass\r\n'''\r\nimport numpy as np\r\nfrom riglib.bmi.state_space_models import State, StateSpace, offset_state\r\nfrom riglib.bmi.assist import FeedbackControllerAssist\r\nfrom riglib.bmi.goal_calculators import ZeroVelocityAccelGoal\r\nfrom riglib.bmi.bmi import Decoder\r\nfrom riglib.bmi.clda import OFCLearner, Learner, RegexKeyDict\r\nfrom riglib.bmi import feedback_controllers, clda\r\n\r\nfrom riglib import plants\r\nfrom riglib.plants import CursorPlant\r\nfrom passivetasks import EndPostureFeedbackController, MachineOnlyFilter\r\nfrom bmimultitasks import BMIControlMulti\r\nfrom cursor_clda_tasks import CLDAControlMulti\r\nimport os\r\nfrom riglib.bmi.extractor import DummyExtractor\r\n\r\nfrom features import simulation_features\r\nfrom features.bmi_task_features import LinearlyDecreasingAssist, LinearlyDecreasingHalfLife\r\nfrom riglib.experiment import traits\r\n\r\n\r\nclass CursorPlantWithMass(CursorPlant):\r\n Delta = 1./60 # call rate\r\n hdf_attrs = [('cursor_pos', 'f8', (3,)), ('cursor_vel', 'f8', (3,))]\r\n def __init__(self, *args, **kwargs):\r\n super(CursorPlantWithMass, self).__init__(*args, **kwargs)\r\n self.velocity = np.zeros(3)\r\n self.acceleration = np.zeros(3)\r\n self.mass = 1 # kg\r\n\r\n def drive(self, decoder):\r\n # decoder supplies 3-D force vector\r\n force = decoder['force_x', 'force_y', 'force_z']\r\n\r\n # run kinematics\r\n acceleration = 1./self.mass * force\r\n self.velocity += self.Delta * acceleration\r\n self.position += self.Delta * self.velocity + 0.5*self.Delta**2 * acceleration\r\n\r\n # bound position and velocity\r\n self.position, self.velocity = self._bound(self.position, self.velocity)\r\n decoder['q'] = self.position\r\n decoder['qdot'] = self.velocity\r\n decoder['hand_ax', 'hand_ay', 'hand_az'] = self.acceleration\r\n self.draw()\r\n\r\n def get_data_to_save(self):\r\n return dict(cursor_pos=self.position, cursor_vel=self.velocity)\r\n\r\n\r\n###################################\r\n##### State space model declaration\r\n###################################\r\nclass PointForceStateSpace(StateSpace):\r\n def __init__(self):\r\n self.states = [\r\n State('hand_px', stochastic=False, drives_obs=False, order=0, aux=True),\r\n State('hand_py', stochastic=False, drives_obs=False, order=0, aux=True),\r\n State('hand_pz', stochastic=False, drives_obs=False, order=0, aux=True),\r\n\r\n State('hand_vx', stochastic=False, drives_obs=False, order=1, aux=True),\r\n State('hand_vy', stochastic=False, drives_obs=False, order=1, aux=True),\r\n State('hand_vz', stochastic=False, drives_obs=False, order=1, aux=True),\r\n\r\n State('hand_ax', stochastic=False, drives_obs=False, order=2, aux=True),\r\n State('hand_ay', stochastic=False, drives_obs=False, order=2, aux=True),\r\n State('hand_az', stochastic=False, drives_obs=False, order=2, aux=True),\r\n\r\n State('force_x', stochastic=True, drives_obs=True, order=2),\r\n State('force_y', stochastic=False, drives_obs=False, order=2),\r\n State('force_z', stochastic=True, drives_obs=True, order=2),\r\n offset_state]\r\n\r\n self.mass = 1 # kg\r\n\r\n def get_ssm_matrices(self, update_rate=0.1):\r\n I = np.mat(np.eye(3))\r\n Delta = update_rate\r\n zero_vec = np.zeros([3,1])\r\n D60 = 1./60\r\n pos_vel_int_gain = D60 + D60**2 + D60**3 + D60**4 + D60**5 + D60**6\r\n A = np.vstack([np.hstack([I, Delta*I, 0.5*Delta**2*I, 0*I, zero_vec]), \r\n np.hstack([0*I, I, Delta*I, 0*I, zero_vec]), \r\n np.hstack([0*I, 0*I, 0*I, 0*I, zero_vec]),\r\n np.hstack([0*I, 0*I, 0*I, 0*I, zero_vec]),\r\n np.hstack([zero_vec.T, zero_vec.T, zero_vec.T, zero_vec.T, np.ones([1,1])]),\r\n ])\r\n\r\n B = np.vstack([0*I, \r\n 0*I, \r\n Delta*1000*I, \r\n self.mass*Delta*1000*I, \r\n zero_vec.T])\r\n\r\n W = np.vstack([np.hstack([0*I, 0*I, 0*I, 0*I, zero_vec]), \r\n np.hstack([0*I, 0*I, 0*I, 0*I, zero_vec]), \r\n np.hstack([0*I, 0*I, 70*I, 0*I, zero_vec]),\r\n np.hstack([0*I, 0*I, 0*I, 70*I, zero_vec]),\r\n np.hstack([zero_vec.T, zero_vec.T, zero_vec.T, zero_vec.T, np.zeros([1,1])]),\r\n ]) \r\n\r\n return A, B, W\r\n\r\n###################################\r\n##### Ideal feedback control policy\r\n###################################\r\nssm = PointForceStateSpace()\r\nclass PointMassFBController(feedback_controllers.LQRController):\r\n def __init__(self):\r\n I = np.mat(np.eye(3))\r\n Delta = 1./60\r\n ssm = PointForceStateSpace()\r\n A, B, _ = ssm.get_ssm_matrices(update_rate=0.1)\r\n Q = np.mat(np.diag(np.hstack([np.ones(3), np.zeros(3), np.zeros(3), np.zeros(3), 0])))\r\n R = np.mat(np.eye(3)) * 1000\r\n super(PointMassFBController, self).__init__(A, B, Q, R)\r\n\r\n\r\nclass BMIPointMassCursor(BMIControlMulti):\r\n exclude_parent_traits = ['plant_type', 'plant_visible', 'plant_hide_rate']\r\n def __init__(self, *args, **kwargs):\r\n self.plant = CursorPlantWithMass(endpt_bounds=(-14, 14, 0., 0., -14, 14))\r\n super(BMIPointMassCursor, self).__init__(*args, **kwargs)\r\n\r\n def init_decoder_state(self):\r\n pass\r\n\r\n def create_assister(self):\r\n fb_ctrl = PointMassFBController()\r\n self.assister = FeedbackControllerAssist(fb_ctrl, style='mixing')\r\n\r\n def create_goal_calculator(self):\r\n pass\r\n\r\n def get_target_BMI_state(self, *args):\r\n '''\r\n Run the goal calculator to determine the target state of the task\r\n '''\r\n target_state = np.array(np.hstack([self.target_location, np.zeros(3), np.zeros(3), np.zeros(3), 1]))\r\n return np.tile(np.array(target_state).reshape(-1,1), [1, self.decoder.n_subbins]) \r\n\r\n\r\nclass PointMassVisualFeedback(BMIPointMassCursor):\r\n exclude_parent_traits = []\r\n assist_level = (1, 1)\r\n is_bmi_seed = True\r\n\r\n def load_decoder(self):\r\n self.ssm = PointForceStateSpace()\r\n A, B, W = self.ssm.get_ssm_matrices()\r\n filt = MachineOnlyFilter(A, W)\r\n units = []\r\n self.decoder = Decoder(filt, units, self.ssm, binlen=0.1)\r\n self.decoder.n_features = 1\r\n\r\n def create_assister(self):\r\n fb_ctrl = PointMassFBController()\r\n self.assister = FeedbackControllerAssist(fb_ctrl, style='mixing')\r\n\r\n def create_feature_extractor(self):\r\n self.extractor = DummyExtractor()\r\n self._add_feature_extractor_dtype()\r\n\r\n\r\nclass CLDAPointMassCursor(BMIPointMassCursor, LinearlyDecreasingHalfLife):\r\n batch_time = traits.Float(0.1, desc='The length of the batch in seconds')\r\n decoder_sequence = traits.String('test', desc='signifier to group together sequences of decoders')\r\n\r\n def create_learner(self):\r\n self.batch_size = int(self.batch_time/self.decoder.binlen)\r\n fb_ctrl = PointMassFBController()\r\n self.learner = clda.FeedbackControllerLearner(self.batch_size, fb_ctrl)\r\n self.learn_flag = True\r\n\r\n def create_updater(self):\r\n self.updater = clda.KFRML(self.batch_time, self.half_life[0])\r\n\r\n def _cycle(self):\r\n super(CLDAPointMassCursor, self)._cycle()\r\n if self.calc_state_occurrences('reward') > 16:\r\n self.learner.batch_size = np.inf\r\n\r\n def call_decoder(self, *args, **kwargs):\r\n kwargs['half_life'] = self.current_half_life\r\n return super(CLDAPointMassCursor, self).call_decoder(*args, **kwargs)\r\n\r\n\r\nclass CLDABaselinePointMassCursor(CLDAPointMassCursor):\r\n '''\r\n Only re-estimate baseline firing rates\r\n '''\r\n def create_updater(self):\r\n self.updater = clda.KFRML_baseline(self.batch_time, self.half_life[0])\r\n \r\n\r\nclass SimCLDAPointMassCursor(simulation_features.SimKalmanEnc, simulation_features.SimKFDecoderShuffled, CLDAPointMassCursor):\r\n assist_level = (0, 0)\r\n assist_level_time = 60.\r\n half_life = (10., 450.)\r\n half_life_time = 300\r\n def __init__(self, *args, **kwargs):\r\n kwargs['fb_ctrl'] = PointMassFBController()\r\n kwargs['ssm'] = PointForceStateSpace()\r\n super(SimCLDAPointMassCursor, self).__init__(*args, **kwargs)\r\n\r\n\r\n","sub_path":"tasks/point_mass_cursor.py","file_name":"point_mass_cursor.py","file_ext":"py","file_size_in_byte":8569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"614231932","text":"from ..utils import *\n\n__all__ = [\"ADMACRegs\", \"ADMAC\"]\n\n\nclass R_RING(Register32):\n # overflow/underflow counter\n OF_UF = 31, 16\n\n # goes through 0, 1, 2, 3 as the pieces of a report/descriptor\n # are being read/written through REPORT_READ/DESC_WRITE\n READOUT_PROGRESS = 13, 12\n\n # when READ_SLOT==WRITE_SLOT one of the two is set\n EMPTY = 8\n FULL = 9\n\n ERR = 10\n\n # next slot to read\n READ_SLOT = 5, 4\n\n # next slot to be written to\n WRITE_SLOT = 1, 0\n\nclass R_CHAN_STATUS(Register32):\n # only raised if the descriptor had NOTIFY set\n DESC_DONE = 1\n\n DESC_RING_EMPTY = 4\n REPORT_RING_FULL = 5\n\n # cleared by writing ERR=1 either to TX_DESC_RING or TX_REPORT_RING\n ERR = 7\n\n UNK3 = 8\n UNK4 = 9\n UNK5 = 10\n\nclass R_CHAN_CONTROL(Register32):\n RESET_RINGS = 0\n CLEAR_OF_UF_COUNTERS = 1\n UNK1 = 3\n\nclass ADMACRegs(RegMap):\n TX_EN = 0x0, Register32 # one bit per channel\n TX_EN_CLR = 0x4, Register32\n\n RX_EN = 0x8, Register32\n RX_EN_CLR = 0xc, Register32\n\n UNK_CTL = 0x10, Register32\n\n # each of the four registers represents an internal interrupt line,\n # bits represent DMA channels which at the moment raise that particular line\n #\n # the irq-destination-index prop in ADT maybe selects the line which\n # is actually wired out\n #\n TX_INTSTATE = irange(0x30, 4, 0x4), Register32\n\n # a 24 MHz always-running counter, top bit is always set\n COUNTER = 0x70, Register64\n\n # -- per-channel registers --\n\n TX_CTL = (irange(0x8000, 16, 0x400)), R_CHAN_CONTROL\n\n TX_DESC_RING = irange(0x8070, 16, 0x400), R_RING\n TX_REPORT_RING = irange(0x8074, 16, 0x400), R_RING\n\n TX_DESC_WRITE = irange(0x10000, 16, 4), Register32\n TX_REPORT_READ = irange(0x10100, 16, 4), Register32\n\n # per-channel, per-internal-line\n TX_STATUS = (irange(0x8010, 16, 0x400), irange(0x0, 4, 0x4)), R_CHAN_STATUS\n TX_INTMASK = (irange(0x8010, 16, 0x400), irange(0x0, 4, 0x4)), R_CHAN_STATUS\n\n # missing: RX variety of registers shifted by +0x200\n\n\nclass ADMACDescriptorFlags(Register32):\n # whether to raise DESC_DONE in TX_STATUS\n NOTIFY = 16\n\n # arbitrary ID propagated into reports\n DESC_ID = 7, 0\n\nclass ADMACDescriptor(Reloadable):\n def __init__(self, addr, length, **flags):\n self.addr = addr\n self.length = length\n self.flags = ADMACDescriptorFlags(**flags)\n\n def __repr__(self):\n return f\"\"\n\n def ser(self):\n return [\n self.addr & (1<<32)-1,\n self.addr>>32 & (1<<32)-1,\n self.length & (1<<32)-1,\n int(self.flags)\n ]\n\n @classmethod\n def deser(self, seq):\n if not len(seq) == 4:\n raise ValueError\n return ADMACDescriptor(\n seq[0] | seq[1] << 32, # addr\n seq[2], # length (in bytes)\n seq[3] # flags\n )\n\n\nclass ADMACReportFlags(Register32):\n UNK1 = 24\n UNK2 = 25\n UNK3 = 27\n DESC_ID = 7, 0\n\nclass ADMACReport(Reloadable):\n def __init__(self, countval, unk1, flags):\n self.countval, self.unk1, self.flags = countval, unk1, ADMACReportFlags(flags)\n\n def __repr__(self):\n return f\"\"\n\n def ser(self):\n return [\n self.countval & (1<<32)-1,\n self.countval>>32 & (1<<32)-1,\n self.unk1 & (1<<32)-1,\n int(self.flags)\n ]\n\n @classmethod\n def deser(self, seq):\n if not len(seq) == 4:\n raise ValueError\n return ADMACReport(\n seq[0] | seq[1] << 32, # countval\n seq[2], # unk1\n seq[3] # flags\n )\n\n\nclass ADMACTXChannel(Reloadable):\n def __init__(self, parent, channo):\n self.p = parent\n self.iface = parent.p.iface\n self.dart = parent.dart\n self.regs = parent.regs\n self.ch = channo\n self.desc_id = 0\n\n def reset(self):\n self.regs.TX_CTL[self.ch].set(RESET_RINGS=1)\n self.regs.TX_CTL[self.ch].set(RESET_RINGS=0)\n\n def enable(self):\n self.regs.TX_EN.val = 1 << self.ch\n\n def disable(self):\n self.regs.TX_EN_CLR.val = 1 << self.ch\n\n def can_submit(self):\n return not self.regs.TX_DESC_RING[self.ch].reg.FULL\n\n def submit_desc(self, desc):\n if self.regs.TX_DESC_RING[self.ch].reg.FULL:\n raise Exception(f\"ch{self.ch} descriptor ring full\")\n\n if self.p.debug:\n print(f\"admac: submitting (ch{self.ch}): {desc}\")\n\n for piece in desc.ser():\n self.regs.TX_DESC_WRITE[self.ch].val = piece\n\n def submit(self, data):\n assert self.dart is not None\n\n self.poll()\n\n buf, iova = self.p._get_buffer(len(data))\n self.iface.writemem(buf, data)\n self.submit_desc(ADMACDescriptor(\n iova, len(data), DESC_ID=self.desc_id, NOTIFY=1,\n ))\n self.desc_id += 1\n\n def poll(self):\n if self.regs.TX_STATUS[self.ch, 1].reg.ERR:\n if self.p.debug:\n print(f\"TX_STATUS={self.regs.TX_STATUS[self.ch,1].reg} \" + \\\n f\"REPORT_RING={self.regs.TX_DESC_RING[self.ch]} \" + \\\n f\"DESC_RING={self.regs.TX_REPORT_RING[self.ch]}\")\n self.regs.TX_DESC_RING[self.ch].set(ERR=1)\n self.regs.TX_REPORT_RING[self.ch].set(ERR=1)\n\n while not self.regs.TX_REPORT_RING[self.ch].reg.EMPTY:\n pieces = []\n for _ in range(4):\n pieces.append(self.regs.TX_REPORT_READ[self.ch].val)\n report = ADMACReport.deser(pieces)\n\n if self.p.debug:\n print(f\"admac: picked up (ch{self.ch}): {report}\")\n\n\nclass ADMAC(Reloadable):\n def __init__(self, u, devpath, dart=None, dart_stream=2, nchans=12,\n reserved_size=4*1024*1024, debug=False):\n self.u = u\n self.p = u.proxy\n self.debug = debug\n\n self.base, _ = u.adt[devpath].get_reg(0)\n self.regs = ADMACRegs(u, self.base)\n self.dart = dart\n\n if dart is not None:\n self.resmem_base = u.heap.memalign(128*1024, reserved_size)\n self.resmem_size = reserved_size\n self.resmem_pos = self.resmem_base\n self.iova_base = self.dart.iomap(dart_stream, self.resmem_base, self.resmem_size)\n self.dart.invalidate_streams(1 << dart_stream)\n\n self.tx = [ADMACTXChannel(self, no) for no in range(nchans)]\n\n def _get_buffer(self, size):\n assert size < self.resmem_size\n\n if self.resmem_pos + size > self.resmem_base + self.resmem_size:\n self.resmem_pos = self.resmem_base\n\n bufptr = self.resmem_pos\n self.resmem_pos += size\n return bufptr, bufptr - self.resmem_base + self.iova_base\n","sub_path":"proxyclient/m1n1/hw/admac.py","file_name":"admac.py","file_ext":"py","file_size_in_byte":6909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"406717179","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom swagger_server.models.base_model_ import Model\nfrom swagger_server import util\n\n\nclass GameItemNew(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, nombre: str=None, descripcion: str=None, localizacion: str=None, dificultad: str=None, jugadoresmin: str=None, jugadoresmax: str=None): # noqa: E501\n \"\"\"GameItemNew - a model defined in Swagger\n\n :param nombre: The nombre of this GameItemNew. # noqa: E501\n :type nombre: str\n :param descripcion: The descripcion of this GameItemNew. # noqa: E501\n :type descripcion: str\n :param localizacion: The localizacion of this GameItemNew. # noqa: E501\n :type localizacion: str\n :param dificultad: The dificultad of this GameItemNew. # noqa: E501\n :type dificultad: str\n :param jugadoresmin: The jugadoresmin of this GameItemNew. # noqa: E501\n :type jugadoresmin: str\n :param jugadoresmax: The jugadoresmax of this GameItemNew. # noqa: E501\n :type jugadoresmax: str\n \"\"\"\n self.swagger_types = {\n 'nombre': str,\n 'descripcion': str,\n 'localizacion': str,\n 'dificultad': str,\n 'jugadoresmin': str,\n 'jugadoresmax': str\n }\n\n self.attribute_map = {\n 'nombre': 'nombre',\n 'descripcion': 'descripcion',\n 'localizacion': 'localizacion',\n 'dificultad': 'dificultad',\n 'jugadoresmin': 'jugadoresmin',\n 'jugadoresmax': 'jugadoresmax'\n }\n\n self._nombre = nombre\n self._descripcion = descripcion\n self._localizacion = localizacion\n self._dificultad = dificultad\n self._jugadoresmin = jugadoresmin\n self._jugadoresmax = jugadoresmax\n\n @classmethod\n def from_dict(cls, dikt) -> 'GameItemNew':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The GameItemNew of this GameItemNew. # noqa: E501\n :rtype: GameItemNew\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def nombre(self) -> str:\n \"\"\"Gets the nombre of this GameItemNew.\n\n\n :return: The nombre of this GameItemNew.\n :rtype: str\n \"\"\"\n return self._nombre\n\n @nombre.setter\n def nombre(self, nombre: str):\n \"\"\"Sets the nombre of this GameItemNew.\n\n\n :param nombre: The nombre of this GameItemNew.\n :type nombre: str\n \"\"\"\n if nombre is None:\n raise ValueError(\"Invalid value for `nombre`, must not be `None`\") # noqa: E501\n\n self._nombre = nombre\n\n @property\n def descripcion(self) -> str:\n \"\"\"Gets the descripcion of this GameItemNew.\n\n\n :return: The descripcion of this GameItemNew.\n :rtype: str\n \"\"\"\n return self._descripcion\n\n @descripcion.setter\n def descripcion(self, descripcion: str):\n \"\"\"Sets the descripcion of this GameItemNew.\n\n\n :param descripcion: The descripcion of this GameItemNew.\n :type descripcion: str\n \"\"\"\n if descripcion is None:\n raise ValueError(\"Invalid value for `descripcion`, must not be `None`\") # noqa: E501\n\n self._descripcion = descripcion\n\n @property\n def localizacion(self) -> str:\n \"\"\"Gets the localizacion of this GameItemNew.\n\n\n :return: The localizacion of this GameItemNew.\n :rtype: str\n \"\"\"\n return self._localizacion\n\n @localizacion.setter\n def localizacion(self, localizacion: str):\n \"\"\"Sets the localizacion of this GameItemNew.\n\n\n :param localizacion: The localizacion of this GameItemNew.\n :type localizacion: str\n \"\"\"\n if localizacion is None:\n raise ValueError(\"Invalid value for `localizacion`, must not be `None`\") # noqa: E501\n\n self._localizacion = localizacion\n\n @property\n def dificultad(self) -> str:\n \"\"\"Gets the dificultad of this GameItemNew.\n\n\n :return: The dificultad of this GameItemNew.\n :rtype: str\n \"\"\"\n return self._dificultad\n\n @dificultad.setter\n def dificultad(self, dificultad: str):\n \"\"\"Sets the dificultad of this GameItemNew.\n\n\n :param dificultad: The dificultad of this GameItemNew.\n :type dificultad: str\n \"\"\"\n if dificultad is None:\n raise ValueError(\"Invalid value for `dificultad`, must not be `None`\") # noqa: E501\n\n self._dificultad = dificultad\n\n @property\n def jugadoresmin(self) -> str:\n \"\"\"Gets the jugadoresmin of this GameItemNew.\n\n\n :return: The jugadoresmin of this GameItemNew.\n :rtype: str\n \"\"\"\n return self._jugadoresmin\n\n @jugadoresmin.setter\n def jugadoresmin(self, jugadoresmin: str):\n \"\"\"Sets the jugadoresmin of this GameItemNew.\n\n\n :param jugadoresmin: The jugadoresmin of this GameItemNew.\n :type jugadoresmin: str\n \"\"\"\n if jugadoresmin is None:\n raise ValueError(\"Invalid value for `jugadoresmin`, must not be `None`\") # noqa: E501\n\n self._jugadoresmin = jugadoresmin\n\n @property\n def jugadoresmax(self) -> str:\n \"\"\"Gets the jugadoresmax of this GameItemNew.\n\n\n :return: The jugadoresmax of this GameItemNew.\n :rtype: str\n \"\"\"\n return self._jugadoresmax\n\n @jugadoresmax.setter\n def jugadoresmax(self, jugadoresmax: str):\n \"\"\"Sets the jugadoresmax of this GameItemNew.\n\n\n :param jugadoresmax: The jugadoresmax of this GameItemNew.\n :type jugadoresmax: str\n \"\"\"\n if jugadoresmax is None:\n raise ValueError(\"Invalid value for `jugadoresmax`, must not be `None`\") # noqa: E501\n\n self._jugadoresmax = jugadoresmax","sub_path":"Back-end/Code/Servidor/swagger_server/models/game_item_new.py","file_name":"game_item_new.py","file_ext":"py","file_size_in_byte":6058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"77691048","text":"#import the neccessary sklearn packages\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\n\n#import the packages that we have defined\nfrom preprocessing.simplepreprocessor import SimplePreprocessor\nfrom preprocessing.imagetoarraypreprocessor import ImageToArrayPreprocessor\nfrom datasets.simpleDatasetLoader import SimpleDatasetLoader\nfrom nn.conv.shallownet import ShallowNet\n\n#import the correct SGD optimizer, the other one gives errors smh\nfrom tensorflow.keras.optimizers import SGD\nfrom imutils import paths\nimport matplotlib.pyplot as plt \nimport numpy as np \nimport argparse\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\", \"--dataset\", required=True, help=\"path to input dataset\")\n#path to where we would like to save the network after training is complete\nap.add_argument(\"-m\", \"--model\", required=True, help=\"path to output model\")\nargs = vars(ap.parse_args())\n\n# grab the list of images that we’ll be describing\nprint(\"[INFO] loading images...\")\nimagePaths = list(paths.list_images(args[\"dataset\"]))\n\n# initialize the image preprocessors\nsp = SimplePreprocessor(32, 32)\niap = ImageToArrayPreprocessor()\n\n# load the dataset from disk then scale the raw pixel intensities\n# to the range [0, 1]\nsdl = SimpleDatasetLoader(preprocessors=[sp, iap])\n(data, labels) = sdl.load(imagePaths, verbose=500)\ndata = data.astype(\"float\") / 255.0\n\n\n# partition the data into training and testing splits using 75% of\n# the data for training and the remaining 25% for testing\n(trainX, testX, trainY, testY) = train_test_split(data, labels,test_size=0.25, random_state=42)\n\nlb=LabelBinarizer()\n#convert the labels from integers to vectors\ntrainY=lb.fit_transform(trainY)\ntestY=lb.transform(testY)\n#this is me forcing them to take the softmax format\n#If its a binary ouuput \n\nif(trainY.shape[1]==1):\n print(\"[INFO] converting binary output to softmax\")\n trainY=np.hstack((trainY, 1 - trainY))\n testY=np.hstack((testY, 1 - testY))\n\n# initialize the optimizer and model\nprint(\"[INFO] compiling model...\")\nopt = SGD(lr=0.005)\nmodel = ShallowNet.build(width=32, height=32, depth=3, classes=trainY.shape[1])\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=opt,\nmetrics=[\"accuracy\"])\n\n# train the network\nprint(\"[INFO] training network...\")\nH = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32, epochs=100, verbose=1)\n\n#save the network to disk\nprint(\"[INFO] serializing network...\")\n\n#This .save method takes the weights and state of the optimizer and serializes them to disk in HDF5\nmodel.save(args[\"model\"])\n\n#evaluate the network \nprint(\"[INFO] evaluating the network...\")\npredictions=model.predict(testX,batch_size=256)\nprint(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1),target_names=lb.classes_))\n\n# plot the training loss and accuracy\nplt.style.use(\"ggplot\")\nplt.figure()\nplt.plot(np.arange(0, 100), H.history[\"loss\"], label=\"train_loss\")\nplt.plot(np.arange(0, 100), H.history[\"val_loss\"], label=\"val_loss\")\nplt.plot(np.arange(0, 100), H.history[\"accuracy\"], label=\"train_acc\")\nplt.plot(np.arange(0, 100), H.history[\"val_accuracy\"], label=\"val_acc\")\nplt.title(\"Training Loss and Accuracy\")\nplt.xlabel(\"Epoch #\")\nplt.ylabel(\"Loss/Accuracy\")\nplt.legend()\nplt.show()\n\n","sub_path":"Networks/shallownet_train.py","file_name":"shallownet_train.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"393555860","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2017-2019 Naran Inc. All rights reserved.\n# __ _ _______ ______ _______ __ _\n# | | | | _ | _ | | _ | | | |\n# | |_| | |_| | | || | |_| | |_| |\n# | | | |_||_| | |\n# | _ | | __ | | _ |\n# | | | | _ | | | | _ | | | |\n# |_| |__|__| |__|___| |_|__| |__|_| |__|\n\n\nimport os\nimport json\nimport datetime\nimport logging\nimport logging.handlers\nimport uuid\nimport requests\nimport time\nimport asyncio\n\nimport reserve\nimport constant\nimport rdb_helper\n\nfrom constant import GROUP, ST_GRP, RES\n\n\nALARM_ADD = '''alarm.added'''\nALARM_UPDATE = '''alarm.changed'''\nALARM_REMOVE = '''alarm.removed'''\nGROUP_ADD = '''group.added'''\nGROUP_UPDATE = '''group.changed'''\nGROUP_REMOVE = '''group.removed'''\nRESERVE_ADD = '''reserve.added'''\nRESERVE_UPDATE = '''reserve.changed'''\nRESERVE_REMOVE = '''reserve.removed'''\nSTREAMING_UPDATE = '''streaming.changed'''\nFAILED_LIST = '''failed.list'''\nADD_PLAYLIST = '''list.add'''\nREMOVE_PLAYLIST = '''list.remove'''\nFILE_NOT_EXIST = '''file.none'''\n\n\nHEADERS = {\n 'Content-Type': 'application/json'\n}\n# RES is reservation sound. reservation sound is only group.\nGROUP_TYPE = [GROUP, ST_GRP, RES]\n\n\nasync def send_streaming_update(value, cast_type, cast_id):\n _data = {\n \"status\": value,\n \"groupIdList\": []\n }\n if cast_type in GROUP_TYPE:\n _data[\"groupIdList\"] = cast_id\n # TODO: remove\n if cast_type == RES:\n _data[\"castType\"] = cast_type\n logging.info(\"## Send streaming update event. value : %s\", _data)\n _ret = await send_request(STREAMING_UPDATE, _data)\n return _ret\n\n\ndef send_streaming_now_res(group_id_list):\n _data = {\n \"status\": True,\n \"groupIdList\": group_id_list\n }\n asyncio.ensure_future(send_request(STREAMING_UPDATE, _data))\n\n\nasync def send_failed_list(value):\n await send_request(FAILED_LIST, value)\n\n\nasync def send_request(topic, value):\n url = \"{}/paserver/event\".format(constant.API_SERVER)\n data = [\n {\n \"topic\": topic,\n \"value\": value,\n \"_t\": time.time()\n }\n ]\n try:\n logging.info(\"send request. topic : %s, value : %s\", topic, value)\n resp = requests.post(url, headers=HEADERS,\n data=json.dumps(data, default=default),\n verify=False)\n if resp.ok:\n return True\n else:\n logging.warning(\"Failed to requests event. Topic : %s, Code : %s, Text : %s\",\n topic, resp.status_code, resp.text)\n return False\n except:\n logging.exception(\"Failed to requests event. topic : %s, url : %s, data : %s\",\n topic, url, data)\n return False\n\n\nasync def delete_alarm(id_list):\n try:\n remove_list = []\n for pa_alarm_id in id_list:\n pa_alarm_data = rdb_helper.get_alarm_list_data(pa_alarm_id)\n if os.path.exists(pa_alarm_data['file_path']):\n os.remove(pa_alarm_data['file_path'])\n else:\n logging.warning(\"File is not exists. Name : %s, Path : %s\",\n pa_alarm_data['name'], pa_alarm_data['file_path'])\n rdb_helper.delete_alarm_data(pa_alarm_id)\n remove_list.append(pa_alarm_id)\n await send_request(ALARM_REMOVE, remove_list)\n await remove_playlist(remove_list)\n return True\n except:\n logging.exception(\"Failed to remove alarm file.\")\n return False\n\n\nasync def get_alarm_list():\n pa_alarm_list = rdb_helper.get_all_alarm_list_data()\n new_list = []\n for k, v in pa_alarm_list.items():\n new_list.append(v)\n return new_list\n\n\ndef default(obj):\n \"\"\"Default JSON serializer.\"\"\"\n import calendar, datetime\n\n if isinstance(obj, datetime.datetime):\n if obj.utcoffset() is not None:\n obj = obj - obj.utcoffset()\n millis = int(\n calendar.timegm(obj.timetuple()) * 1000 +\n obj.microsecond / 1000\n )\n return millis\n raise TypeError('Not sure how to serialize %s' % (obj,))\n\n\ndef _get_group_to_dict(group_list):\n new_list = []\n for group in group_list:\n new_list.append({\n 'id': group.id,\n 'name': group.name,\n 'speaker_id_list': json.loads(group.speaker_id_list)\n })\n return new_list\n\n\nasync def create_group(pa_group_name):\n try:\n pa_group_id = uuid.uuid4().hex\n _data = {\n 'id': pa_group_id,\n 'name': pa_group_name,\n 'speaker_id_list': json.dumps([])\n }\n rdb_helper.set_pa_groups_data(pa_group_id, _data)\n _data['speaker_id_list'] = []\n _send_list = [_data]\n await send_request(GROUP_ADD, _send_list)\n return True\n except:\n logging.exception(\"Failed to create group.\")\n return False\n\n\nasync def update_group_name(pa_group_id, pa_group_name):\n try:\n _data = rdb_helper.get_pa_groups_data(pa_group_id)\n _data['name'] = pa_group_name\n rdb_helper.set_pa_groups_data(pa_group_id, _data)\n _send_list = [_data]\n await send_request(GROUP_UPDATE, _send_list)\n return True\n except:\n logging.exception(\"Failed to update group name.\")\n return False\n\n\nasync def update_group_speaker(pa_group_id, pa_speaker_id_list):\n try:\n _data = rdb_helper.get_pa_groups_data(pa_group_id)\n _data['speaker_id_list'] = json.dumps(pa_speaker_id_list)\n _ret = rdb_helper.set_pa_groups_data(pa_group_id, _data)\n #TODO: send each speark\n _send_list = [_data]\n await send_request(GROUP_UPDATE, _send_list)\n return True\n except:\n logging.exception(\"Failed to update speaker id in group.\")\n return False\n\n\nasync def delete_group(pa_group_id_list):\n try:\n for pa_group_id in pa_group_id_list:\n rdb_helper.delete_pa_groups_data(pa_group_id)\n # TODO: speaker update eveent\n await send_request(GROUP_REMOVE, pa_group_id_list)\n return True\n except:\n logging.exception(\"Failed to delete group.\")\n return False\n\n\nasync def get_group_list():\n _pa_group_list = []\n pa_group_id_list = rdb_helper.get_all_pa_groups_data()\n for k, v in pa_group_id_list.items():\n data = {\n \"id\": k,\n \"name\": v['name'],\n \"speaker_id_list\": json.loads(v['speaker_id_list'])\n }\n _pa_group_list.append(data)\n return _pa_group_list\n\n\nasync def create_reserve(data):\n try:\n typ = data['type']\n name = data['name']\n hour = data['hour']\n minute = data['minute']\n pause = data['pause']\n repeat = data['repeat']\n alarm_id = data['alarm_id']\n group_id_list = data['group_id_list']\n\n ret = await reserve.add_reserve(int(typ), int(hour), int(minute),\n group_id_list, alarm_id, int(repeat),\n name, int(pause))\n # TODO:\n #await send_event(int(hour), int(minute), group_id_list)\n _send_list = [ret]\n await send_request(RESERVE_ADD, _send_list)\n return True\n except:\n logging.exception(\"Failed to create reserve.\")\n return False\n\n\nasync def send_event(hour, minute, group_id_list):\n cur_date = datetime.datetime.now()\n target_time = datetime.datetime(cur_date.year, cur_date.month, cur_date.day,\n hour, minute).timestamp()\n gap = target_time - cur_date.timestamp()\n asyncio.get_event_loop().call_later(gap, send_streaming_now_res,\n group_id_list)\n\n\nasync def delete_reserve(id_list):\n try:\n for _id in id_list:\n await reserve.remove_reserve(_id)\n await send_request(RESERVE_REMOVE, id_list)\n return True\n except:\n logging.exception(\"Failed to remove reserve.\")\n return False\n\n\nasync def delete_reserve_cache(_id):\n try:\n await send_request(RESERVE_REMOVE, [_id])\n return True\n except:\n logging.exception(\"Failed to remove reserve cache.\")\n return False\n\n\nasync def stop_reserve(_id):\n logging.info(\"## stop reserve : %s\", _id)\n ret = await reserve.get_reserve(_id)\n if ret:\n logging.info(\"## stop reserve send requestt : %s. ret : %s\", _id, ret)\n await send_request(RESERVE_UPDATE, [ret])\n return True\n return False\n\n\nasync def clear_reserve():\n key_list = await reserve.get_reserve_key_list()\n ret = await reserve.clear_reserve()\n if ret:\n await send_request(RESERVE_REMOVE, key_list)\n return ret\n\n\nasync def pause_reserve(_id, is_pause):\n ret = await reserve.pause_reserve(_id, is_pause)\n if ret:\n await send_request(RESERVE_UPDATE, [ret])\n return True\n return False\n\n\nasync def get_reserve_list():\n reserve_list = await reserve.get_reserve_list()\n return reserve_list\n\n\nasync def add_playlist(id_list):\n try:\n add_list = []\n for playlist_id in id_list:\n if not rdb_helper.has_play_list(playlist_id):\n rdb_helper.add_play_list(playlist_id, \"\")\n add_list.append(playlist_id)\n await send_request(ADD_PLAYLIST, add_list)\n return True\n except:\n logging.exception(\"Failed to add playlist. List : %s\", id_list)\n return False\n\n\nasync def remove_playlist(id_list):\n try:\n remove_list = []\n for playlist_id in id_list:\n rdb_helper.remove_play_list(playlist_id)\n remove_list.append(playlist_id)\n await send_request(REMOVE_PLAYLIST, remove_list)\n return True\n except:\n logging.exception(\"Failed to remove playlist. List : %s.\", id_list)\n return False\n\n\nasync def get_all_play_list():\n play_list = rdb_helper.get_all_play_list()\n new_list = []\n for k, v in play_list.items():\n new_list.append(k)\n return new_list\n\n","sub_path":"src/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":9292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"490160109","text":"from keras.models import Model, load_model\nfrom keras.layers import Dense, Dropout, Flatten, Input, BatchNormalization, Conv2D, MaxPool2D\nfrom keras.optimizers import Adam\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import Callback, ModelCheckpoint\n# from keras.losses import SparseCategoricalCrossentropy\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport tensorflow as tf\nfrom My_Model import create_model\n\nimport numpy as np\nimport cv2\nfrom preprocess import BGR_to_Binary\n\ntest_percen = 10\nIMAGE_SIZE = (60,60)\n\n# columns = [\"FileName\",\"Class0\",\"Class1\",\"Class2\",\"Class3\",\"Class4\",\"Class\"]\ndataframe = pd.read_csv('Data_Set/ANS.csv', delimiter=',', header=0)\n\nin_train , out_train = [] , []\nin_test , out_test = [] , []\n\n# in_train\nfor i in dataframe['FileName'] :\n in_train.append( BGR_to_Binary( cv2.imread(\"Data Set/\"+i) ) )\n# out_train\nfor i in dataframe[\"Class\"] :\n split = i.split(\",\")\n out_train.append(np.array([float(split[0]),float(split[1]),float(split[2]),float(split[3]),float(split[4])]))\n\n# in_test and out_test\nfor i in range( int(( len(in_train) / 100 ) * test_percen )):\n in_test.append(in_train.pop())\n out_test.append(out_train.pop())\n\n# print(\"Data train: \", len(in_train) , \"Data test\", len(in_test) ,\" sum : \",len(in_train)+len(in_test))\n\nin_train = np.uint16(in_train)\nout_train = np.uint16(out_train)\nin_test = np.uint16(in_test)\nout_test = np.uint16(out_test)\n\nin_train = in_train / 255.\nin_test = in_test / 255.\n\nprint(\"Data train: \", len(in_train) , \"Data test\", len(in_test) ,\" sum : \",len(in_train)+len(in_test))\n\n\n\n# create Model\nmodel = create_model(IMAGE_SIZE[0],IMAGE_SIZE[1],1)\nmodel.summary()\n\n# >>> !!! TRAIN START !!! <<<\ncheckpoint = ModelCheckpoint('model_weights', verbose=1, save_weights_only=True,monitor='val_accuracy',save_best_only=True, mode='max')\nmodel.fit(\n in_train,\n out_train,\n epochs = 60, \n validation_data = (in_test , out_test),\n callbacks = [checkpoint])\n","sub_path":"Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"248075308","text":"from django import template\n\nfrom Main.models import Profile\n\nregister = template.Library()\n\n\ndef human_readable(preference, choices):\n for choice in choices:\n if choice[0] == preference:\n return choice\n\n\n@register.filter\ndef get_preferences(user):\n exceptions = [\"id\", \"user_id\", \"temp_profile\", \"_state\"]\n current_profile = Profile.objects.get(user=user).__dict__\n fields = Profile.__dict__\n current_preferences = {}\n for preference in current_profile:\n if preference not in exceptions:\n current_preferences[preference] = {\"selected\": current_profile[preference],\n\n \"options\": fields[preference].field.choices\n }\n return current_preferences\n\n\n@register.filter\ndef beautify(text):\n # _ = \n # Capitalizes text\n text = text.replace(\"_\", \" \")\n return \" \".join([i.capitalize() for i in text.split()])\n\n\n@register.filter\ndef get_item(dic, key):\n return dic[key]\n","sub_path":"Settings/templatetags/settings_extras.py","file_name":"settings_extras.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"239074608","text":"'''\nCreated on 2016 máj. 24\n\n@author: Balázs\n'''\n\nfrom typing import Iterable\nfrom hu.farago.eum2.dto.Player import Player\nfrom hu.farago.eum2.dto.Model import Model\n\nclass ProbabilityOfStatusQuoCalculator(object):\n\n def __init__(self, model:Model):\n self.model = model\n\n def calculate(self):\n \n for i, playerI in enumerate(self.model.players):\n for j, playerJ in enumerate(self.model.players):\n multiplication = 1.0\n for k, playerK in enumerate(self.model.players):\n if k != i and k != j:\n multiplication *= (playerI.probabilityOfSuccess[playerK.name] + (1 - playerK.salience))\n playerI.probabilityOfStatusQuo[playerJ.name] = multiplication","sub_path":"hu/farago/eum2/calculator/ProbabilityOfStatusQuoCalculator.py","file_name":"ProbabilityOfStatusQuoCalculator.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"424892292","text":"# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration\n\nfrom TriggerMenuMT.HLTMenuConfig.Menu.ChainDictTools import splitChainDict\nfrom TriggerMenuMT.HLTMenuConfig.MinBias.MinBiasChainConfiguration import MinBiasChainConfig as MinBiasChainConfig\nfrom TriggerMenuMT.HLTMenuConfig.Menu.ChainMerging import mergeChainDefs\n\n\nfrom AthenaCommon.Logging import logging\nlog = logging.getLogger( __name__ )\nlog.info(\"Importing %s\",__name__)\n\n\n\ndef generateChainConfigs( chainDict ):\n \n listOfChainDicts = splitChainDict(chainDict)\n log.debug(\"Implement case for minbias chain with %d legs \",len(listOfChainDicts))\n\n listOfChainDefs = []\n\n for subChainDict in listOfChainDicts:\n\n MinBias = MinBiasChainConfig(subChainDict).assembleChain()\n\n listOfChainDefs += [MinBias]\n log.debug('length of chaindefs %s', len(listOfChainDefs) )\n\n\n if len(listOfChainDefs)>1:\n log.debug(\"Implement case for mulit-leg minbias chain\")\n theChainDef = mergeChainDefs(listOfChainDefs, chainDict)\n else:\n theChainDef = listOfChainDefs[0]\n\n log.debug(\"theChainDef %s\" , theChainDef)\n\n return theChainDef\n","sub_path":"Trigger/TriggerCommon/TriggerMenuMT/python/HLTMenuConfig/MinBias/GenerateMinBiasChainDefs.py","file_name":"GenerateMinBiasChainDefs.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"126888782","text":"from weather import app\nfrom weather.models import Station\nfrom flask import jsonify, request\nfrom datetime import datetime, timedelta\n\nLIST_OBS_FIELDS = ('issued', 'gust_kt', 'wind_spd_kt', 'wind_dir')\nDETAIL_OBS_FIELDS = ('issued', 'gust_kt', 'wind_spd_kt', 'wind_dir')\n\n@app.route(\"/latest\")\ndef latest():\n \"\"\"\n Get latest observations for Vic BOM stations\n \"\"\"\n\n stations = Station.query.all()\n jdict = {}\n for station in stations:\n jdict[station.name] = {\n \"lat\": station.lat,\n \"lng\": station.lng,\n \"station_id\": station.id,\n \"observation\": station.latest(LIST_OBS_FIELDS),\n }\n return jsonify(latest_observations=jdict)\n\n\n@app.route(\"/detail/\")\ndef detail(station):\n \"\"\"\n Get history of observations for a station. Default is last 24 hours.\n Takes GET params from and to and returns object with a 'next' attr\n to help UI extend series.\n \"\"\"\n\n ## get request bounds, default to last 24 hours\n args = request.args\n if \"from\" not in args or \"to\" not in args:\n to = datetime.now()\n frm = to - timedelta(days=1)\n else:\n to = datetime(args['to'])\n frm = datetime(args['from'])\n\n ## retrieve or error\n station_obj = Station.query.get(station)\n if station_obj is None:\n return jsonify(error=\"No station exists with id {}\".format(station))\n\n observations = station_obj.get_range(frm, to, DETAIL_OBS_FIELDS)\n nxt = \"/detail/{}?from={}&to={}\".format(station, to, to - timedelta(days=1))\n\n return jsonify(next=nxt, observations=observations)\n\n\n","sub_path":"api/weather/weather/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"446049895","text":"S = input('Введите строку S без пробелов! > ')\nn = int(input('Введите кол-во строк F > '))\nst = ''\nfor i in range(n):\n\tst += input('Введите строку F > ') + ' '\nst = st.split()\nprint(S)\nfor x in st:\n\tS1 = S\n\tx1 = x\n\tfor y in S1:\n\t\tif y in x:\n\t\t\tS1 = S1.replace(y, '')\n\t\t\tx = x.replace(y, '')\n\tprint(x, ' , ', S1,x == '')\n\tif x == '':\n\t\tprint('Строку ', x1, ' можно')\n\telse:\n\t\tprint('Строку ', x1, 'нельзя')\n\nd = 'bbb'\ng = d.replace('b','a')\nprint(g)","sub_path":"1st_semester/!РК/Sklifasovsky/РК 2/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"219981871","text":"from flask_restful import Resource\r\n\r\nfrom models.company import Company\r\nfrom models.people import People\r\n\r\nimport json\r\n\r\nclass CompanyView(Resource):\r\n def get(self, company):\r\n company_qs = Company.objects(company=company) \r\n if company_qs:\r\n people_list = []\r\n for c in company_qs:\r\n people_qs = People.objects(company_id=c.index)\r\n for p in people_qs:\r\n people_list.append(json.loads(p.to_json()))\r\n\r\n return { \"employees\": people_list }\r\n\r\n else:\r\n return { \"error\": \"Company not found.\" }, 404","sub_path":"app/views/CompanyView.py","file_name":"CompanyView.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"606744347","text":"#!/usr/bin/env python3\nimport argparse\nimport os\n\nfrom shlex import quote\n\n\nEXT = '.mp4'\n\n\ndef run(command):\n print(command)\n os.system(command)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Generate GIF-like MP4 video for Telegram/Twitter/etc.')\n parser.add_argument('video', type=str, help='The input video path.')\n parser.add_argument('--span', nargs=2, type=str, metavar=('START', 'END'), help='Specify cut time.')\n parser.add_argument('--frame', nargs=2, type=int, metavar=('START', 'END'), help='Specify cut frame.')\n parser.add_argument('--crop', nargs=4, type=str, metavar=('WIDTH', 'HEIGHT', 'X', 'Y'), help='Crop input video.')\n parser.add_argument('--crf', type=int, default=23, help='Specify the Constant Rate Factor of output video.')\n parser.add_argument('--resize', action='store_true', help='Auto resize input video to 720P.')\n parser.add_argument('--audio', action='store_true', help=\"Don't remove input video audio track.\")\n parser.add_argument('--scale', type=str, nargs=2, metavar=('W', 'H'), help='ffmepg -vf scale=W:H')\n parser.add_argument('--filter', type=str, nargs='+', help='ffmepg -vf ...')\n parser.add_argument('--other', type=str, default='', help='Other ffmepg arguments.')\n parser.add_argument('-o', '--output', metavar='output', type=str)\n args = parser.parse_args()\n\n if not os.path.isfile(args.video):\n raise RuntimeError('Wrong input video path.')\n\n fmt = {\n 'crf': args.crf,\n 'other': args.other,\n }\n vf = []\n\n if args.output:\n fmt['output'] = args.output\n else:\n # Auto name output file.\n fmt['output'] = '[GIF]{}'.format(os.path.basename(os.path.splitext(args.video)[0]))\n prefix = 1\n new_name = fmt['output']\n while os.path.exists(new_name+EXT):\n prefix += 1\n new_name = '{} - {}'.format(fmt['output'], prefix)\n fmt['output'] = new_name\n\n fmt['output'] += EXT\n fmt['output'] = quote(fmt['output'])\n\n fmt['time'] = ''\n\n if args.span and args.frame:\n raise RuntimeError(\"--span and --frame can't specified at the same time.\")\n else:\n if args.span:\n fmt['time'] = '-ss {} -to {}'.format(*args.span)\n if args.frame:\n vf.append('select=between(n\\,{}\\,{}),setpts=PTS-STARTPTS'.format(*args.frame))\n\n if args.crop:\n vf.append('crop={}:{}:{}:{}'.format(*args.crop))\n\n fmt['an'] = '-an'\n if args.audio:\n fmt['an'] = ''\n\n if args.scale:\n vf.append('scale={}:{}'.format(*args.size))\n elif args.resize:\n vf.append('scale=-1:720')\n if args.filter:\n vf.extend(args.filter)\n if len(vf) != 0:\n fmt['vf'] = '-vf {}'.format(quote(','.join(vf)))\n else:\n fmt['vf'] = ''\n\n run('ffmpeg -i {} -c:v libx264 -crf {crf} {vf} {time} -pix_fmt yuv420p -preset veryslow {other} {an} {output}'\n .format(quote(args.video), **fmt))\n\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"yibasuo.py","file_name":"yibasuo.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"166780056","text":"# Create unique name and url list from given id's list\n\nfrom functions import *\nimport argparse\nimport signal\n\n\ndef signal_handler(signal, frame):\n print('You pressed Ctrl+C!')\n exit()\n\n\ndef argumentParser():\n parser = argparse.ArgumentParser(description='Fetch Url')\n parser.add_argument('-u', action=\"store\", default=False, dest='url', help=\"story url which to be download\")\n parser.add_argument('-a', action=\"store\", default=False, dest='allStory', help=\"fetch all stories\")\n parser.add_argument('-x', action=\"store\", default=False, dest='author', help=\"save in author directory\")\n parser.add_argument('-b', action=\"store\", default=0, dest='batch', help=\"get batch number\", type=int)\n arguments = {}\n arguments[URL_ARGUMENT] = parser.parse_args().url\n arguments[ALLSTORY_ARGUMENT] = parser.parse_args().allStory\n arguments[AUTHOR_ARGUMENT] = parser.parse_args().author\n arguments[BATCH_ARGUMENT] = parser.parse_args().batch\n return arguments\n\n\ndef getStoriesFromUrls(urls, arguments):\n for url in urls:\n content = fetchContent(url)\n if content is None:\n continue\n details = fetchDetails(content, url)\n if details is None:\n continue\n story = \"\"\n lenLink = len(details[LINK])\n if lenLink == 1:\n story = fetchStory(details[LINK][0])\n else:\n for i in range(0, lenLink):\n tempStory = fetchStory(details[LINK][i])\n if tempStory is None:\n continue\n story += \"Section: {sectionName}\\n--------\\n\\n{story}\\n\\n\\n\\n\\n\\n\".format(sectionName=details[SECTION_LIST][i], story=tempStory)\n story = story.strip()\n if story is None:\n continue\n details[STORY] = story\n if arguments[AUTHOR_ARGUMENT]:\n saveStoryToDiskWithAuthor(details)\n else:\n saveStoryToDisk(details)\n\n\ndef fetchAllStories(arguments):\n mainUrl = arguments[URL_ARGUMENT]\n content = fetchContent(mainUrl)\n if content is None:\n return None\n html = parseHtml(content)\n aLinkArray = html.xpath(LINK_TABLE_XPATH)\n if len(aLinkArray) == 0 or aLinkArray is None:\n logging.warn(\"Wrong xpath for Link\")\n return None\n urls = []\n for urlElement in aLinkArray:\n url = urlElement.get(\"href\")\n url = urljoin(mainUrl, url)\n url = url.strip()\n urls.append(url)\n urls.sort()\n\n flag = True\n batchInit = arguments[BATCH_ARGUMENT]\n listLen = len(urls)\n increment = 0 + (batchInit * BATCH_SIZE)\n while flag:\n strBatch = \"Current Batch: {}\\tCurrent Increment: {}\".format(batchInit, increment)\n logging.info(strBatch)\n tempList = []\n while increment < listLen:\n tempList.append(urls[increment])\n increment += 1\n if increment % BATCH_SIZE == 0:\n batchInit += 1\n break\n getStoriesFromUrls(tempList, arguments)\n if increment >= listLen:\n flag = False\n\n\nsignal.signal(signal.SIGINT, signal_handler)\n\narguments = argumentParser()\n\nif arguments[ALLSTORY_ARGUMENT]:\n fetchAllStories(arguments)\nelse:\n if not arguments[URL_ARGUMENT]:\n url = URLS\n else:\n url = [url]\n\n if not os.path.exists(BASE_PATH):\n logging.error(\"BASE PATH Doesn't exist\")\n exit()\n\n getStoriesFromUrls(url, arguments)\n","sub_path":"mcstories/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"427639881","text":"# Rita\n\n\"\"\"\n\nWeb server main entry\n\nHow to run:\n$python scripts/rita.py\n\n\"\"\"\n\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.web\n\nfrom rita.www import assets\n\nclass PingHandler(tornado.web.RequestHandler):\n\tdef get(self):\n\t\tself.write('IPong')\n\n\nclass PageNotFoundHandler(tornado.web.RequestHandler):\n\tdef get(self):\n\t\tself.render('404.html', title='Page Not Found!', \n\t\t\t\t\tinfo='We can\\'t find the thing you were looking for.')\n\t\t\nhandlers=[(r\"/iping\", PingHandler),\n\t\t (r'/.*', PageNotFoundHandler)]\n\nsettings={\n'template_path': assets.ResourceManager.Instance().template_path,\n'static_path': assets.ResourceManager.Instance().static_path,\n'debug': True\n}\n\ndef\tStartWWWServer(port=8080):\n\tapp = tornado.web.Application(handlers, **settings)\n\thttp_server = tornado.httpserver.HTTPServer(app)\n\thttp_server.listen(port)\n\ttornado.ioloop.IOLoop.instance().start()\n\t\ndef StopWWWServer():\n\tpass\n\t\n\n\n","sub_path":"backend/www/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"364410476","text":"import logging\nimport os\n\nimport giphypop\n\nfrom twitterpibot.logic import fsh\n\nlogger = logging.getLogger(__name__)\n\ng = None\n\n\ndef _init(screen_name):\n global g\n if not g:\n g = giphypop.Giphy()\n\n folder = fsh.root + \"temp\" + os.sep + \"gif\" + os.sep + screen_name + os.sep\n fsh.ensure_directory_exists(folder)\n return folder\n\n\ndef get_random_gif(screen_name, text=None):\n folder = _init(screen_name)\n gif = None\n if text:\n gif = g.translate(text)\n if not gif:\n gif = g.random_gif()\n\n return _download_gif(folder, gif)\n\n\ndef get_gif(screen_name, text):\n folder = _init(screen_name)\n return _download_gif(folder, g.translate(text))\n\n\ndef _download_gif(folder, gif):\n if gif:\n return fsh.download_file(folder=folder, url=gif.media_url)\n else:\n return None\n","sub_path":"twitterpibot/logic/giphyhelper.py","file_name":"giphyhelper.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"401465889","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\" \"\"\"\n\nfrom __future__ import print_function\nimport sys\nsys.path.append('../..')\nimport cv2\nimport argparse\nimport subprocess\nimport numpy as np\nfrom os.path import basename, join, splitext\nfrom common import make_noisy\n\n__autor__ = 'Kyosuke Yamamoto (kyon)'\n__date__ = '01 Jan 2017'\n\n\ndef set_images(images, dtype):\n ''' Copy train/val images '''\n\n #assert\n if dtype not in ('train', 'val'):\n raise Exception('unknown dtype ' + dtype)\n\n #copy images\n for src in images:\n src = join(args.base, src)\n fn, ext = splitext(basename(src))\n im = cv2.imread(src, 3)\n\n #noisy image\n noisy = make_noisy(im)\n blocks = blockshaped(noisy, size, size)\n for i, block in enumerate(blocks):\n dst = join(args.dst, 'A', dtype, '{}_{}{}'.format(fn, i, ext))\n cv2.imwrite(dst, block)\n print(src, '->', dst)\n\n #original image\n blocks = blockshaped(im, size, size)\n for i, block in enumerate(blocks):\n dst = join(args.dst, 'B', dtype, '{}_{}{}'.format(fn, i, ext))\n cv2.imwrite(dst, block)\n print(src, '->', dst)\n\n\ndef blockshaped(arr, nrows, ncols):\n ''' Split arr into blocks with same shapes\n http://stackoverflow.com/questions/16856788/slice-2d-array-into-smaller-2d-arrays '''\n\n h, w, d = arr.shape\n return (arr.reshape(h//nrows, nrows, -1, ncols, 3)\n .swapaxes(1, 2)\n .reshape(-1, nrows, ncols, 3))\n\n\nif __name__ == '__main__':\n\n #argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--train', '-t', default='../../dataset/row/20_512/train.csv', help='path to list of training images')\n parser.add_argument('--eval', '-e', default='../../dataset/row/20_512/eval.csv', help='path to list of evaluation images')\n parser.add_argument('--base', default='../../', help='path to image dir')\n parser.add_argument('--dst', default='datasets/row')\n parser.add_argument('--epoch', type=int, default=500)\n parser.add_argument('--no_setimg', action='store_true')\n parser.add_argument('--no_train', action='store_true')\n parser.add_argument('--no_eval', action='store_true')\n args = parser.parse_args()\n\n #param\n size = 256 # input size to pix2pix\n\n #images for training/validation\n with open(args.train) as f:\n train_images = f.read().splitlines()\n with open(args.eval) as f:\n val_images = f.read().splitlines()\n\n #set images\n if not args.no_setimg:\n\n #init\n for t1 in ('A', 'B'):\n for t2 in ('train', 'val'):\n subprocess.check_call('mkdir -p {}/{}/{}'.format(args.dst, t1, t2), shell=True)\n\n #copy train images\n set_images(train_images, 'train')\n set_images(train_images + val_images, 'val')\n\n #combine\n cmd = 'python scripts/combine_A_and_B.py \\\n --fold_A {} --fold_B {} --fold_AB {}'.format(join(args.dst, 'A'), join(args.dst, 'B'), args.dst)\n subprocess.check_call(cmd, shell=True)\n\n #train\n if not args.no_train:\n cmd = 'DATA_ROOT={} name=row which_direction=AtoB niter={} th train.lua'.format(args.dst, args.epoch)\n print('running', cmd)\n subprocess.check_call(cmd, shell=True)\n\n #eval\n if not args.no_eval:\n cmd = 'DATA_ROOT={} name=row which_direction=AtoB which_epoch={} th test.lua'.format(args.dst, args.epoch)\n print('running', cmd)\n subprocess.check_call(cmd, shell=True)\n\n #combine blocks (todo: totally fixed)\n for fp in train_images + val_images:\n resdir = 'results/row/{}_net_G_val/images/output'.format(args.epoch)\n fp = join(resdir, basename(fp))\n im0 = cv2.imread(fp.replace('.png', '_0.png'))\n im1 = cv2.imread(fp.replace('.png', '_1.png'))\n im2 = cv2.imread(fp.replace('.png', '_2.png'))\n im3 = cv2.imread(fp.replace('.png', '_3.png'))\n im = np.vstack((np.hstack((im0, im1)),\n np.hstack((im2, im3))))\n out = join(resdir, basename(fp))\n cv2.imwrite(out, im)\n print('saved as', out)\n","sub_path":"my_train_eval.py","file_name":"my_train_eval.py","file_ext":"py","file_size_in_byte":4201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"255555490","text":"# -*- coding: utf-8 -*-\n# 3.0\n\n# \n\nimport psutil\nimport os\nimport time\nfrom collections import deque\nfrom threading import Thread\nimport json\nimport flask\nimport numpy as np\nimport json\nfrom flask import abort\nfrom flask import request, jsonify\nimport chartkick\nimport datetime\nimport logging\n\n# \nASSETS_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../static')\n\napp = flask.Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n\t\n\treturn flask.render_template(\"index.html\")\n\ndef worker():\n\titer=10\n\tcurrentstate=0\n\twhile True:\n\t\tif len(serverinfo)<60:\n\t\t\tinfo={}\n\t\t\tnow = datetime.datetime.now()\n\t\t\tinfo['Time']=str(now.hour) + \":\" + str(now.minute) + \":\" + str(now.second)\n\t\t\tinfo['Load']=os.getloadavg()[0]\n\t\t\tinfo['CPU']=psutil.cpu_percent(interval=1)\n\t\t\tinfo['Memory']=psutil.virtual_memory()[2]\n\t\t\tinfo['Disk_usage']=psutil.disk_usage('/')\n\t\t\tserverinfo.append(info)\n\t\telse:\n\t\t\tinfo={}\n\t\t\tnow = datetime.datetime.now()\n\t\t\tinfo['Time']=str(now.hour) + \":\" + str(now.minute) + \":\" + str(now.second)\n\t\t\tinfo['Load']=os.getloadavg()[0]\n\t\t\tinfo['CPU']=psutil.cpu_percent(interval=1)\n\t\t\tinfo['Memory']=psutil.virtual_memory()[2]\n\t\t\tinfo['Disk_usage']=psutil.disk_usage('/')\n\t\t\tserverinfo.popleft()\n\t\t\tserverinfo.append(info)\n\t\tif iter==10:\n\t\t\ti = serverinfo[0]['Load']\n\t\t\tif i>1:\n\t\t\t\tcurrentstate=1\n\t\t\t\tnow = datetime.datetime.now()\n\t\t\t\tinfo['Time']=str(now.hour) + \":\" + str(now.minute) + \":\" + str(now.second)\n\t\t\t\tinfo['State']='High'\n\t\t\telse:\n\t\t\t\tcurrentstate=0\n\t\t\t\tnow = datetime.datetime.now()\n\t\t\t\tinfo['Time']=str(now.hour) + \":\" + str(now.minute) + \":\" + str(now.second)\n\t\t\t\tinfo['State']='Low'\n\t\t\tloadstates.append(info)\n\n\t\tif (iter%60)==0:\n\t\t\tsum = 0 \n\t\t\tcount =0\n\t\t\tfor info in reversed(serverinfo):\n\t\t\t\tif(count>=12):\n\t\t\t\t\tbreak\n\t\t\t\tsum=info['Load']+sum\n\t\t\t\tcount=count+1\n\t\t\t\n\t\t\tmean = (sum/count);\n\t\t\tapp.logger.info('mean {}'.format(mean%120))\n\t\t\tif mean>1:\n\t\t\t\tif(currentstate!=1):\n\t\t\t\t\tcurrentstate=1\n\t\t\t\t\tnow = datetime.datetime.now()\n\t\t\t\t\tinfo['Time']=str(now.hour) + \":\" + str(now.minute) + \":\" + str(now.second)\n\t\t\t\t\tinfo['State']='High'\n\t\t\t\t\tloadstates.append(info)\n\t\t\telse:\n\t\t\t\tif(currentstate!=0):\n\t\t\t\t\tcurrentstate=0\n\t\t\t\t\tnow = datetime.datetime.now()\n\t\t\t\t\tinfo['Time']=str(now.hour) + \":\" + str(now.minute) + \":\" + str(now.second)\n\t\t\t\t\tinfo['State']='Low'\n\t\t\t\t\tloadstates.append(info)\n\t\titer=iter+10\n\t\ttime.sleep(10)\n@app.route(\"/serverinfo\")\ndef server_load():\n\treturn json.dumps(list(serverinfo))\n\n@app.route(\"/loadstates\")\ndef load_states():\n\treturn json.dumps(list(loadstates))\n\nif __name__ == \"__main__\":\n\t\n\tserverinfo = deque()\n\tloadstates = deque()\n\tt = Thread(target=worker)\n\tt.daemon = True\n\tt.start()\n\t\n\tport = 5002\n\n\t# Set up the development server on port 8000.\n\tapp.debug = True\n\tapp.run(port=port)\n\n# \n\n\n","sub_path":"datadog.py","file_name":"datadog.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"436744663","text":"\nfrom ClassTagRfid import RfidTag\nimport math\nclass RfIDCategory(object):\n \n def __init__(self, nameID):\n self.nameID=nameID\n self.RFIDList=[]\n self.XCM=0\n self.YCM=0\n self.AVGDIST=0\n self.MAXDIST=0\n \n def GETCM(self):\n SumX=0\n SumY=0\n \n for rfid in self.RFIDList:\n SumX=rfid.xpos+SumX\n SumY=rfid.ypos+SumY\n try: \n self.XCM=SumX/len(self.RFIDList)\n self.YCM=SumY/len(self.RFIDList)\n except:\n self.XCM=0\n self.YCM=0\n \n def GetAvgDistCM(self):\n DistSum=0\n for rfid in self.RFIDList:\n DistSum=DistSum+math.sqrt(((rfid.xpos-self.XCM)*(rfid.xpos-self.XCM))+((rfid.ypos-self.YCM)*(rfid.ypos-self.YCM))) \n try: \n self.AVGDIST=DistSum/len(self.RFIDList) \n except:\n self.AVGDIST=0\n \n \n def GetMaxDistCM(self):\n MaxDist=0\n for rfid in self.RFIDList: \n Dist=sqrt(((rfid.xpos-self.XCM)*(rfid.xpos-self.XCM))+((rfid.ypos-self.YCM)*(rfid.ypos-self.YCM))) \n if (MaxDist 0.01 &'\n 'jetID.fHPD < 0.98 &'\n 'jetID.n90Hits > 1'\n )\n\nprocess.trackCountingHighEffBJets = process.selectedPatJets.clone(src = 'goodJets',\n cut = 'bDiscriminator(\\\"trackCountingHighEffBJetTags\\\") > 1.9'\n )\n\nprocess.simpleSecondaryVertexBJets = process.selectedPatJets.clone(src = 'goodJets',\n cut = 'bDiscriminator(\\\"simpleSecondaryVertexBJetTags\\\") > 2.02'\n )\n\n## define ordered jets\nuds0 = cms.PSet(index = cms.int32(0), correctionLevel = cms.string('L3Absolute'), useTree = cms.bool(False))\nuds1 = cms.PSet(index = cms.int32(1), correctionLevel = cms.string('L3Absolute'), useTree = cms.bool(False))\nuds2 = cms.PSet(index = cms.int32(2), correctionLevel = cms.string('L3Absolute'), useTree = cms.bool(False))\nuds3 = cms.PSet(index = cms.int32(3), correctionLevel = cms.string('L3Absolute'), useTree = cms.bool(False))\nuds4 = cms.PSet(index = cms.int32(4), correctionLevel = cms.string('L3Absolute'), useTree = cms.bool(False))\nuds5 = cms.PSet(index = cms.int32(5), correctionLevel = cms.string('L3Absolute'), useTree = cms.bool(False))\nbottom0 = cms.PSet(index = cms.int32(0), correctionLevel = cms.string('L3Absolute'), useTree = cms.bool(False))\nbottom1 = cms.PSet(index = cms.int32(1), correctionLevel = cms.string('L3Absolute'), useTree = cms.bool(False))\n\n## ---\n## MONITOR JET KINEMATICS\n## ---\n\n## jet kinematics analyzer\nprocess.load(\"TopAnalysis.TopAnalyzer.JetKinematics_cfi\")\n\n## collect kinematics analyzers\nprocess.leadingJetKinematics = process.analyzeJetKinematics.clone (src = 'goodJets')\nprocess.lead_0_JetKinematics = process.analyzeJetKinematics.clone (src = 'goodJets', analyze = uds0 )\nprocess.lead_1_JetKinematics = process.analyzeJetKinematics.clone (src = 'goodJets', analyze = uds1 )\nprocess.lead_2_JetKinematics = process.analyzeJetKinematics.clone (src = 'goodJets', analyze = uds2 )\nprocess.lead_3_JetKinematics = process.analyzeJetKinematics.clone (src = 'goodJets', analyze = uds3 )\nprocess.lead_4_JetKinematics = process.analyzeJetKinematics.clone (src = 'goodJets', analyze = uds4 )\nprocess.lead_5_JetKinematics = process.analyzeJetKinematics.clone (src = 'goodJets', analyze = uds5 )\n\nprocess.bTCHEJetKinematics = process.analyzeJetKinematics.clone (src = 'trackCountingHighEffBJets' )\nprocess.bTCHE_0_JetKinematics = process.analyzeJetKinematics.clone (src = 'trackCountingHighEffBJets' , analyze = bottom0)\nprocess.bTCHE_1_JetKinematics = process.analyzeJetKinematics.clone (src = 'trackCountingHighEffBJets' , analyze = bottom1)\n\nprocess.bSSVJetKinematics = process.analyzeJetKinematics.clone (src = 'simpleSecondaryVertexBJets' )\nprocess.bSSV_0_JetKinematics = process.analyzeJetKinematics.clone (src = 'simpleSecondaryVertexBJets' , analyze = bottom0)\nprocess.bSSV_1_JetKinematics = process.analyzeJetKinematics.clone (src = 'simpleSecondaryVertexBJets' , analyze = bottom1)\n## analyze the kinematics of the jets in the event\nprocess.p1 = cms.Path(process.goodJets *\n process.trackCountingHighEffBJets *\n process.simpleSecondaryVertexBJets *\n process.bTCHEJetKinematics +\n process.bTCHE_0_JetKinematics +\n process.bTCHE_1_JetKinematics +\n process.bSSVJetKinematics +\n process.bSSV_0_JetKinematics +\n process.bSSV_1_JetKinematics +\n process.leadingJetKinematics +\n process.lead_0_JetKinematics +\n process.lead_1_JetKinematics +\n process.lead_2_JetKinematics +\n process.lead_3_JetKinematics +\n process.lead_4_JetKinematics +\n process.lead_5_JetKinematics \n )\n","sub_path":"TopAnalyzer/test/analyzeJetKinematics_cfg.py","file_name":"analyzeJetKinematics_cfg.py","file_ext":"py","file_size_in_byte":5428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"183312951","text":"####################################################################################################\ndef ElementFromURL(url):\n \"\"\"setup requests html\"\"\"\n\n match = False\n name = Hash.MD5(url)\n path = Core.storage.data_item_path(URL_CACHE_DIR)\n Core.storage.ensure_dirs(path)\n files = [f for f in Core.storage.list_dir(path) if not Core.storage.dir_exists(Core.storage.join_path(path, f))]\n\n for filename in files:\n if filename == name:\n match = True\n if (Datetime.FromTimestamp(Core.storage.last_modified(Core.storage.join_path(path, filename))) + TIMEOUT) <= Datetime.Now():\n Log.Debug('* Re-Caching \\'{}\\' to {}'.format(url, URL_CACHE_DIR))\n html = get_element_from_url(url, name)\n break\n else:\n Log.Debug('* Fetching \\'{}\\' from {}'.format(url, URL_CACHE_DIR))\n html = HTML.ElementFromString(Data.Load(Core.storage.join_path(URL_CACHE_DIR, filename)))\n break\n\n if not match:\n Log.Debug('* Caching \\'{}\\' to {}'.format(url, URL_CACHE_DIR))\n html = get_element_from_url(url, name)\n\n return html\n\n####################################################################################################\ndef get_element_from_url(url, name, count=0):\n \"\"\"error handling for URL requests\"\"\"\n\n try:\n page = requests.get(url, headers=Headers.get_headers_for_url(url))\n if (int(page.status_code) == 503) or (len(page.history) > 0):\n if count <= 1:\n count += 1\n if len(page.history) > 0:\n type_title = Common.GetTypeTitle(url)\n req_base_url = Regex(r'(https?\\:\\/\\/(?:www\\.)?\\w+\\.\\w+)').search(page.url).group(1)\n base_url = Regex(r'(https?\\:\\/\\/(?:www\\.)?\\w+\\.\\w+)').search(url).group(1)\n if req_base_url == base_url:\n page = requests.get(page.url, headers=Headers.get_headers_for_url(req_base_url))\n if Regex(r'(^The service is unavailable.$)').search(page.text):\n Log.Warn('* The service is unavailable. Not caching \\'{}\\''.format(page.url))\n elif Regex(r'\\/recaptcha\\/api\\.js').search(page.text):\n Log.Error(u'* Human Verification needed for \\'{}\\''.format(page.url))\n Log.Warn(str(page.text))\n return HTML.Element('head', 'Error')\n else:\n Data.Save(Core.storage.join_path(URL_CACHE_DIR, name), page.text)\n return HTML.ElementFromString(page.text)\n else:\n Log.Warn('* get_element_from_url Error: HTTP 301 Redirect Error. Refreshing {} Domain'.format(type_title))\n Log.Warn('* get_element_from_url Error: page history {} | {}'.format(url, page.history))\n Domain.UpdateDomain(type_title, True)\n url = Common.CorrectURL(url)\n else:\n Log.Warn('* get_element_from_url Error: HTTP 503 Site Error. Refreshing site cookies')\n Headers.get_headers_for_url(url, update=True)\n return get_element_from_url(url, name, count)\n else:\n Log.Error('* get_element_from_url Error: HTTP 503 Site error, tried refreshing cookies but that did not fix the issue')\n if Data.Exists(Core.storage.join_path(URL_CACHE_DIR, name)):\n Log.Warn('* Using old cached page')\n return HTML.ElementFromString(page.text)\n else:\n try:\n page.raise_for_status()\n if Regex(r'(^The service is unavailable.$)').search(page.text):\n Log.Warn('* The service is unavailable. Not caching \\'{}\\''.format(page.url))\n elif Regex(r'\\/recaptcha\\/api\\.js').search(page.text):\n Log.Error(u'* Human Verification needed for \\'{}\\''.format(page.url))\n Log.Warn(str(page.text))\n return HTML.Element('head', 'Error')\n else:\n Data.Save(Core.storage.join_path(URL_CACHE_DIR, name), page.text)\n return HTML.ElementFromString(page.text)\n except:\n if (int(page.status_code) == 522):\n Log.Exception('* get_element_from_url Error: HTTP 522 Site error, site is currently offline')\n elif (int(page.status_code) == 524):\n Log.Exception('* get_element_from_url Error: HTTP 524 Site Error, A timeout occurred')\n if count < 1:\n Log.Debug('* ReTrying \\'{}\\''.format(page.url))\n count += 1\n return get_element_from_url(url, name, count)\n else:\n Log.Exception('* get_element_from_url Error: Unknown Site Error, check output below.')\n except:\n Log.Exception('* get_element_from_url Error: Failed to load {}'.format(url))\n\n return HTML.Element('head', 'Error')\n","sub_path":"Contents/Code/rhtml.py","file_name":"rhtml.py","file_ext":"py","file_size_in_byte":5187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"222240100","text":"from ..token import Token\nfrom .node import Node\nfrom .identifier_list import IdentifierList\n\n\nclass ReadStatement(Node):\n \"\"\"\n A read statement.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Create the read statement.\n \"\"\"\n\n self.identifiers = IdentifierList()\n\n @classmethod\n def parse(cls, tokenizer, identifiers):\n \"\"\"\n Parse the read statement.\n \"\"\"\n\n statement = ReadStatement()\n cls.extract_token(tokenizer, Token.READ)\n statement.identifiers = IdentifierList.parse(tokenizer, identifiers)\n cls.extract_token(tokenizer, Token.SEMICOLON)\n return statement\n\n def execute(self, identifiers, data):\n \"\"\"\n Execute the statement.\n \"\"\"\n\n if len(data) < len(self.identifiers):\n raise RuntimeError(\n \"not enough data to read {0}\".format(self.identifiers))\n\n for identifier in self.identifiers:\n identifiers.set_value(identifier.name, data.popleft())\n\n def __str__(self):\n \"\"\"\n Human-readable string representation.\n \"\"\"\n\n return \"{0} {1}{2}\".format(\n Token.READ.value[1],\n self.identifiers,\n Token.SEMICOLON.value[1])\n","sub_path":"Core/cse3341/pt/read_statement.py","file_name":"read_statement.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"44208341","text":"\"\"\"orders\n\nRevision ID: 839020a2f1db\nRevises: 0c1502e5ff59\nCreate Date: 2020-07-11 13:23:32.146431\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '839020a2f1db'\ndown_revision = '0c1502e5ff59'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('orders',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('thunderservice_id', sa.Integer(), nullable=True),\n sa.Column('placeOrderTime', sa.BigInteger(), nullable=True),\n sa.Column('paymentMethod', sa.String(length=20), nullable=False),\n sa.Column('paymentTime', sa.BigInteger(), nullable=True),\n sa.Column('paymentSN', sa.String(length=100), nullable=True),\n sa.Column('emailNotification', sa.Boolean(), nullable=True),\n sa.Column('amount', sa.Float(), nullable=True),\n sa.Column('paymentStatus', sa.String(length=20), nullable=False),\n sa.Column('thunderserviceStatus', sa.String(length=20), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('orders')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/839020a2f1db_orders.py","file_name":"839020a2f1db_orders.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"516228124","text":"from docx import Document\r\nimport shutil\r\nimport os,stat\r\nimport datetime\r\nimport pyexcel\r\nimport openpyxl\r\nfrom openpyxl import Workbook\r\nfrom openpyxl.styles import Alignment, Border, Side, PatternFill\r\nimport ctypes # An included library with Python install.\r\n\r\n\r\ndef message_box(title, message):\r\n ctypes.windll.user32.MessageBoxA(None, message, title, 0)\r\n\r\nfiles = os.listdir(os.curdir)\r\nindus_folder_name = os.getcwd().split(\"\\\\\")[-1]\r\n\r\nerror_count = 0\r\nkey_position = 0\r\n\r\nselected_indus_number = 358\r\n\r\nsource_xl = 'EMS_Meeting_Minutes_2018.xls'\r\ndestination_xl = 'Follow-up_CR_CN_ECU_Daimler.xlsx'\r\ndestination_sheet_save_as = 'Follow-up_CR_CN_ECU_Daimler.xlsx'\r\ndestination_sheet_name = 'DAIMLER 651'\r\n\r\nsource_sheet = pyexcel.get_sheet(file_name=source_xl)\r\nsource_sheet.name_columns_by_row(0)\r\n\r\ndestination_book = openpyxl.load_workbook(destination_xl)\r\ndestination_sheet = destination_book[destination_sheet_name]\r\n\r\nmy_border = Border(left=Side(style='thin'), right=Side(style='thin'), top=Side(style='thin'),\r\n bottom=Side(style='thin'))\r\nblank_row_color = PatternFill(start_color='a6a6a6', end_color='a6a6a6', fill_type='solid')\r\ndelivery_color = PatternFill(start_color='fde9d9', end_color='a6a6a6', fill_type='solid')\r\n\r\nitem_quantity = 0\r\nvalues_to_xl = []\r\nblank_row = (' ', ' ', ' ', ' ', ' ')\r\n\r\nos.chmod('Follow-up_CR_CN_ECU_Daimler.xlsx', stat.S_IWRITE)\r\n\r\nif indus_folder_name.startswith(\"ME\"):\r\n selected_indus_number = indus_folder_name\r\nelif indus_folder_name.startswith(\"Indus\"):\r\n selected_indus_number = indus_folder_name.split(\"Indus\")[-1]\r\n\r\nfound_in_xl = 0\r\nappend_before = 0\r\n\r\nfor row in source_sheet:\r\n if row[3] == int(selected_indus_number):\r\n found_in_xl = 1\r\n codes_with_delivery = row[2]\r\n ecu_type = row[4][3:]\r\n sw_version = row[6]\r\n cr_number = row[8]\r\n cr_creation = row[10]\r\n break\r\n\r\nfor i in range(1, destination_sheet.max_row):\r\n if destination_sheet.cell(row=i, column=5).value == sw_version and destination_sheet.cell(row=i, column=6).value == int(selected_indus_number):\r\n append_before = 1\r\n break\r\n\r\nif found_in_xl == 0:\r\n message_box(\"Error\", \"Indus number couldnt find in Excel Sheet.\")\r\nelif append_before == 1:\r\n message_box(\"Error\", \"Appended Before\")\r\nelse:\r\n if codes_with_delivery == \"\" or ecu_type == \"\" or sw_version == \"\" or cr_number == \"\" or cr_creation == \"\":\r\n message_box(\"Error\",\"This indus number has missing values.\")\r\n else:\r\n for f in files:\r\n if f.endswith('.docx'):\r\n file_name = f\r\n\r\n document = Document(file_name)\r\n tables = document.tables\r\n\r\n if not tables[0].rows[31].cells[2].text:\r\n error_count += 1\r\n\r\n if not tables[0].rows[32].cells[2].text:\r\n\r\n if not tables[0].rows[33].cells[2].text:\r\n\r\n if not tables[0].rows[34].cells[2].text:\r\n key_position = 0\r\n with open(\"Error-Logs.txt\", \"a\") as text_file:\r\n text_file.write(str(error_count) + \"- \" + file_name + \" file has no code.\\n\")\r\n text_file.write(\r\n \"-----------------------------------------------------------------------------------\\n\")\r\n else:\r\n unique_key = tables[0].rows[34].cells[2].text\r\n key_position = 4\r\n else:\r\n unique_key = tables[0].rows[33].cells[2].text\r\n key_position = 3\r\n else:\r\n unique_key = tables[0].rows[32].cells[2].text\r\n key_position = 2\r\n\r\n if key_position != 0:\r\n with open(\"Error-Logs.txt\", \"a\") as text_file:\r\n text_file.write(\r\n str(error_count) + \"- \" + file_name + \" 's code is in \" + str(\r\n key_position) + \". line.\\n\")\r\n text_file.write(\r\n \"------------------------------------------------------------------------------ \\n\")\r\n\r\n\r\n else:\r\n unique_key = tables[0].rows[31].cells[2].text\r\n key_position = 1\r\n\r\n if key_position != 0:\r\n\r\n unique_key_file_name = unique_key + '.s19'\r\n sw_update_code = tables[0].rows[6].cells[3].text\r\n\r\n daimler_part_number = file_name.split(\"_\")[1]\r\n file_name_first = \"-\".join(unique_key.split(\"-\")[3:-1])\r\n file_name_sec = unique_key.split(\"-\")[-1].split(\"_\")[0]\r\n folder_name = file_name_first + '-' + file_name_sec + '-' + daimler_part_number\r\n\r\n title = \"-\".join(unique_key.split(\"-\")[0:3]) + '-' + daimler_part_number\r\n ref_daimler = daimler_part_number\r\n item_quantity += 1\r\n comment = unique_key_file_name\r\n\r\n sw_title = \"-\".join(unique_key.split(\"-\")[0:3]) + '-' + sw_update_code.replace(\" \", \"\")\r\n sw_ref_daimler = sw_update_code.replace(\" \", \"\")\r\n sw_item_quantity = 1\r\n sw_comment = \"CODE\"\r\n\r\n if os.path.exists(unique_key_file_name):\r\n if not os.path.exists(folder_name):\r\n os.makedirs(folder_name)\r\n\r\n shutil.move(unique_key_file_name, folder_name)\r\n shutil.move(file_name, folder_name)\r\n\r\n values_to_xl.append(\r\n (cr_number, cr_creation, '', '', sw_version, int(selected_indus_number), ecu_type, title,\r\n ref_daimler, '', item_quantity, '', '', '', '', '', '', '', '', '', comment))\r\n\r\n\r\n\r\n else:\r\n error_count += 1\r\n if not os.path.exists(\"Error-Logs.txt\"):\r\n with open(\"Error-Logs.txt\", \"w\") as text_file:\r\n text_file.write(str(error_count) + \"- \" + file_name + \" 's code dont match with\\n \"\r\n + unique_key_file_name + \"\\n\")\r\n text_file.write(\r\n \"---------------------------------------------------------------------------\\n\")\r\n else:\r\n with open(\"Error-Logs.txt\", \"a\") as text_file:\r\n text_file.write(str(error_count) + \"- \" + file_name + \" 's code dont match with\\n \"\r\n + unique_key_file_name + \"\\n\")\r\n text_file.write(\r\n \"------------------------------------------------------------------------- \\n\")\r\n if codes_with_delivery == 1:\r\n values_to_xl.append((cr_number, cr_creation, '', '', sw_version, int(selected_indus_number), ecu_type, sw_title,\r\n sw_ref_daimler, '', sw_item_quantity, '', '', '', '', '', '', '', '', '', sw_comment))\r\n\r\n last_row = destination_sheet.max_row\r\n i = 1\r\n\r\n for row in values_to_xl:\r\n destination_sheet.append(row)\r\n for col in range(1, 12):\r\n destination_sheet.cell(row=last_row + i, column=col).alignment = openpyxl.styles.Alignment(\r\n horizontal='center',\r\n vertical='center')\r\n destination_sheet.cell(row=last_row + i, column=col).border = my_border\r\n if codes_with_delivery == 1:\r\n destination_sheet.cell(row=last_row + i, column=col).fill = delivery_color\r\n\r\n destination_sheet.cell(row=last_row + i, column=2).number_format = 'DD.MM.YYYY'\r\n\r\n destination_sheet.cell(row=last_row + i, column=21).alignment = openpyxl.styles.Alignment(\r\n horizontal='center',\r\n vertical='center')\r\n destination_sheet.cell(row=last_row + i, column=21).border = my_border\r\n if codes_with_delivery == 1:\r\n destination_sheet.cell(row=last_row + i, column=21).fill = delivery_color\r\n\r\n i += 1\r\n\r\n destination_sheet.append(blank_row)\r\n\r\n for col in range(1, 12):\r\n destination_sheet.cell(row=last_row + i, column=col).fill = blank_row_color\r\n destination_sheet.cell(row=last_row + i, column=col).border = my_border\r\n\r\n destination_sheet.cell(row=last_row + i, column=21).fill = blank_row_color\r\n destination_sheet.cell(row=last_row + i, column=21).border = my_border\r\n\r\n destination_book.save(destination_sheet_save_as)\r\n\r\n message_box('Complete', 'Process Completed.')\r\n\r\nwith open(\"Error-Logs.txt\", \"a\") as text_file:\r\n text_file.write(\"Total Error:\"+str(error_count)+\" Time:\" + str(datetime.datetime.now().time()) + \"\\n\")\r\n text_file.write(\"-----------------------------------------------------------------------------------------------\\n\")\r\n\r\n\r\n","sub_path":"tool_old_versions/daimler_tool_v1.py","file_name":"daimler_tool_v1.py","file_ext":"py","file_size_in_byte":9415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"619969790","text":"import dash\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\n\nfrom graphs.group import game_length, attempts, winners\n\ndef body(data):\n return html.Center(dbc.Container([\n dbc.Row([\n dbc.Col([\n html.Br(),\n html.H1(\"Card Game Data (From {date})\"\n .format(date=data[0].start_date)),\n html.Hr()\n ])\n ]),\n dbc.Row([\n dbc.Col([\n html.Br(),\n html.H2(\"Average Game Length\"),\n dcc.Graph(\n figure=game_length.get_graph(data)\n )\n ]),\n ]),\n dbc.Row([\n dbc.Col([\n html.Br(),\n html.H2(\"Game Winners\"),\n dcc.Graph(\n figure=winners.get_graph(data)\n )\n ])\n ]),\n dbc.Row([\n dbc.Col([\n html.Br(),\n html.H3(\"Individual Guess Data\"),\n dcc.Graph(\n figure=attempts.get_graph(data)\n )\n ])\n ])\n ]))\n\ndef GroupData(data):\n return html.Div([\n body(data)\n ])","sub_path":"pages/card_group_data_page.py","file_name":"card_group_data_page.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"598497888","text":"'''\r\nlinear classification for image\r\n---created by Z.Zhang 3/21/2018\r\n'''\r\n# import the necessary packages\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.model_selection import train_test_split\r\nfrom imutils import paths\r\nimport numpy as np\r\nimport imutils\r\nimport cv2\r\nimport os\r\n\r\ndataPath = \"D:\\\\umkc\\\\2018Spring\\\\Big_data_analytics\\\\deep-learning-visual-eCommerce-master\\\\fashion-item-dataset\\\\data4\"\r\n\r\ndef extract_color_histogram(image, bins=(8, 8, 8)):\r\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\r\n hist = cv2.calcHist([hsv], [0, 1, 2], None, bins,\r\n [0, 180, 0, 256, 0, 256])\r\n if imutils.is_cv2():\r\n hist = cv2.normalize(hist)\r\n else:\r\n cv2.normalize(hist, hist)\r\n return hist.flatten()\r\n\r\n# initialize the data matrix and labels list\r\ndata_train = []\r\nlabels_train = []\r\ndata_test = []\r\nlabels_test = []\r\n\r\n# loop over the input images\r\nfor stage in ['train4', 'test4']:\r\n classList = os.listdir(os.path.join(dataPath, stage))\r\n for className in classList:\r\n imagePaths = os.path.join(dataPath, stage, className)\r\n for imageName in list(paths.list_images(imagePaths)):\r\n imagePath = os.path.join(dataPath, stage, className, imageName)\r\n image = cv2.imread(imagePath)\r\n label = className\r\n hist = extract_color_histogram(image)\r\n if stage is 'train4':\r\n data_train.append(hist)\r\n labels_train.append(label)\r\n elif stage is 'test4':\r\n data_test.append(hist)\r\n labels_test.append(label)\r\n\r\n# encode the labels, converting them from strings to integers\r\nle = LabelEncoder()\r\nlabels_train = le.fit_transform(labels_train)\r\nlabels_test = le.fit_transform(labels_test)\r\n\r\n# train the linear regression clasifier\r\nprint(\"[INFO] training Linear SVM classifier...\")\r\nmodel = LinearSVC()\r\nmodel.fit(data_train, labels_train)\r\n\r\n# evaluate the classifier\r\nprint(\"[INFO] evaluating classifier...\")\r\npredictions = model.predict(data_test)\r\nprint(classification_report(labels_test, predictions,\r\n target_names=le.classes_))","sub_path":"Lab-4/source/svm_classifier.py","file_name":"svm_classifier.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"485908050","text":"# Load the required libraries\nimport numpy as np\nimport cv2\n\n# Make a dummy function to be used with 'Taskbar' creation\ndef dummy(val):\n\tpass\n\n# Define all the kernels to be used with the filter functionality\nidentity_kernal=np.array([[0,0,0],[0,1,0],[0,0,0]])\nsharpen_kernel=np.array([[0,-1,0],[-1,4,-1],[0,-1,0]])\n#gaussian_kernel=np.array([[1,2,1],[2,4,2],[1,2,1]],np.float32)/16\ngaussian_kernel_1=cv2.getGaussianKernel(3,0)\ngaussian_kernel_2=cv2.getGaussianKernel(5,0)\nbox_kernel=np.array([[1,1,1],[1,1,1],[1,1,1]],np.float32)/9\n\n# Make a list of all the kernels so that they can be accessed using indices\nkernals=[identity_kernal,sharpen_kernel,gaussian_kernel_1,gaussian_kernel_2,box_kernel,sharpen_kernel]\n\n# Load the original image and make a copy of itso that we do not modify the original one.\ncolor_original=cv2.imread('Ironman.jpg')\ncolor_modified=color_original.copy()\n\n# Similarly create a copy of the original gray scale image \ngray_original=cv2.cvtColor(color_original,cv2.COLOR_BGR2GRAY)\ngray_modified=gray_original.copy()\n\n# Create a window to add all the buttons and taskbars. Set the range over which taskbar values can be varied\ncv2.namedWindow('app')\ncv2.createTrackbar('contrast','app',1,100,dummy)\ncv2.createTrackbar('brightness','app',50,100,dummy)\ncv2.createTrackbar('filter','app',0,len(kernals)-1,dummy)\ncv2.createTrackbar('grayscale','app',0,1,dummy)\n\n\ncount=1\t# Counter to save modified images\n\n# Create the loop over which the program will monitor any change to the taskbars made by the user\nwhile True:\n# Show the original image and make modifications on the original image if grayscale is 0 else use the grayscale image\n\tgrayscale=cv2.getTrackbarPos('grayscale','app')\n\tif grayscale==0:\n\t\tcv2.imshow('app',color_modified)\n\telse:\t\n\t\tcv2.imshow('app',gray_modified)\t\n\n# Wait for any key to be pressed and if the key matches the expected input, then perform the required operations\t\n\tk=cv2.waitKey(1) & 0xFF\n\tif k == ord('q'):\t# Press 'q' to quit\n\t\tbreak\n\telif k==ord('s'):\t# Press 's' to save\n\t\tif grayscale==0:\n\t\t\tcv2.imwrite('Ironman_modified%d.png'%count,color_modified)\t\n\t\telse:\n\t\t\tcv2,imwrite('Ironman_modified%d.png'%count,gray_modified)\n\t\tcount=count+1\t\n# Get the trackbar position updates made by the user\n\tcontrast=cv2.getTrackbarPos('contrast','app') \n\tbrightness=cv2.getTrackbarPos('brightness','app')\n\tkernel=cv2.getTrackbarPos('filter','app')\n\t\t\n# Apply the filters to the color image as well as the color image\n\tcolor_modified=cv2.filter2D(color_original,-1,kernals[kernel])\t\n\tgray_modified=cv2.filter2D(gray_original,-1,kernals[kernel])\t\n\t\t\n# Apply the contrast and brightness to the original and gray image \n\tcolor_modified=cv2.addWeighted(color_modified,contrast,np.zeros(color_original.shape,dtype=color_original.dtype),0,brightness-50)\n\tgray_modified=cv2.addWeighted(gray_modified,contrast,np.zeros(gray_original.shape,dtype=gray_original.dtype),0,brightness-50)\n\n# Once everything is performed, close all the windows\ncv2.destroyAllWindows()\n\n","sub_path":"Filter_App.py","file_name":"Filter_App.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"192666456","text":"from math import sqrt\n\n\ndef is_prime(number):\n '''\n Check whether the number is prime.\n\n Keywortd arguments:\n number -- the number who will be checked\n\n Return: bool\n '''\n return all([number % divisor for divisor\n in range(2, int(sqrt(number)) + 1)])\n\n\ndef primes():\n '''\n Generator for a prime numbers.\n '''\n number = 2\n while True:\n if is_prime(number):\n yield number\n number += 1\n\n\ndef prime_divisors(number):\n '''\n Gets all prime divisors of the number.\n\n Keywortd arguments:\n number -- the number whose prime divisors will be searched\n\n Return: list\n '''\n divisors = []\n for prime in primes():\n while number % prime == 0:\n divisors.append(prime)\n number /= prime\n if number == 1:\n return divisors\n\n\ndef semiprimes():\n '''\n Generator for a semiprime numbers.\n (semi-primes - numbers with exacly 2 prime divisors)\n '''\n number = 4\n while True:\n if len(prime_divisors(number)) == 2:\n yield number\n number += 1\n","sub_path":"2012/04/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"366177776","text":"import pytest\r\nimport requests\r\nimport json\r\n\r\n\r\nurls = [\"https://dog.ceo/api/breeds/list/all\",\r\n \"https://dog.ceo/api/breed/affenpinscher/images/random\",\r\n \"https://dog.ceo/api/breeds/image/random\"]\r\n\r\n\r\n@pytest.fixture(params=urls)\r\ndef response(request):\r\n r = requests.get(request.param)\r\n return r\r\n\r\n\r\n@pytest.mark.usefixtures(\"response\")\r\ndef test_request(response):\r\n assert json.loads(response.text)['status'] == \"success\"\r\n","sub_path":"Homework/Test_API_Text_Status.py","file_name":"Test_API_Text_Status.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"299821779","text":"class Nodo:\n\tdef __init__(self, valor):\n\t\tself.valor = valor\n\t\tself.izq = self.der = None\n\t\tself.padre = None\n\t\tself.altura = 0\n\nclass ArbolAVL:\n\tdef __init__(self):\n\t\tself.raiz=None\n\t\t\n\tdef agregar(self,valor):\n\t\tif self.raiz==None:\n\t\t\tself.raiz=Nodo(valor)\n\t\telse:\n\t\t\tself._agregar(valor, self.raiz)\n\n\tdef _agregar(self,valor,tmp):\n\t\tif valortmp.valor and tmp.der!=None:\n\t\t\treturn self._buscar(valor,tmp.der)\n\n\tdef eliminar(self,valor):\n\t\treturn self._eliminar(self.buscar(valor))\n\n\tdef _eliminar(self,tmp):\n\n\n\t\tif tmp==None or self.buscar(tmp.valor)==None:\n\t\t\tprint(\"No se encuentra el valor\")\n\t\t\treturn None \n\n\t\tdef minimValor(tmp):\n\t\t\tactual=tmp\n\t\t\twhile actual.izq!=None:\n\t\t\t\tactual=actual.izq\n\t\t\treturn actual\n\n\t\tdef numHijos(tmp):\n\t\t\tnumHijos=0\n\t\t\tif tmp.izq!=None: \n\t\t\t\tnumHijos+=1\n\t\t\tif tmp.der!=None:\n\t\t\t\tnumHijos+=1\n\t\t\treturn numHijos\n\n\t\tnodoPadre=tmp.padre\n\n\t\tnodoHijo=numHijos(tmp)\n\n\t\tif nodoHijo==0:\n\n\t\t\tif nodoPadre!=None:\n\t\t\t\tif nodoPadre.izq==tmp:\n\t\t\t\t\tnodoPadre.izq=None\n\t\t\t\telse:\n\t\t\t\t\tnodoPadre.der=None\n\t\t\telse:\n\t\t\t\tself.raiz=None\n\n\t\tif nodoHijo==1:\n\n\t\t\tif tmp.izq!=None:\n\t\t\t\thijo=tmp.izq\n\t\t\telse:\n\t\t\t\thijo=tmp.der\n\n\t\t\tif nodoPadre!=None:\n\t\t\t\tif nodoPadre.izq==tmp:\n\t\t\t\t\tnodoPadre.izq=hijo\n\t\t\t\telse:\n\t\t\t\t\tnodoPadre.der=hijo\n\t\t\telse:\n\t\t\t\tself.raiz=hijo\n\n\t\t\t# correct the padre pointer in node\n\t\t\thijo.padre=nodoPadre\n\n\t\tif nodoHijo==2:\n\n\t\t\tsiguiente=minimValor(tmp.der)\n\n\t\t\ttmp.valor=siguiente.valor\n\n\t\t\tself._eliminar(siguiente)\n\n\t\t\treturn\n\n\t\tif nodoPadre!=None:\n\t\t\tnodoPadre.altura=1+max(self.altura(nodoPadre.izq),self.altura(nodoPadre.der))\n\t\t\tself.validaEliminacion(nodoPadre)\n\n\tdef confirmaAgre(self,tmp,ajuste=[]):\n\t\tif tmp.padre==None: return\n\t\tajuste=[tmp]+ajuste\n\n\t\talturaIzq =self.altura(tmp.padre.izq)\n\t\talturaDer=self.altura(tmp.padre.der)\n\n\t\tif abs(alturaIzq-alturaDer)>1:\n\t\t\tajuste=[tmp.padre]+ajuste\n\t\t\tself.balanceo(ajuste[0],ajuste[1],ajuste[2])\n\t\t\treturn\n\n\t\tnuevaAltura=1+tmp.altura \n\t\tif nuevaAltura>tmp.padre.altura:\n\t\t\ttmp.padre.altura=nuevaAltura\n\n\t\tself.confirmaAgre(tmp.padre,ajuste)\n\n\tdef validaEliminacion(self,tmp):\n\t\tif tmp==None: return\n\n\t\tif abs(self.altura(tmp.izq)-self.altura(tmp.der))>1:\n\t\t\ty=self.hijoMayor(tmp)\n\t\t\tx=self.hijoMayor(y)\n\t\t\tself.balanceo(tmp,y,x)\n\n\t\tself.validaEliminacion(tmp.padre)\n\n\tdef balanceo(self,z,y,x):\n\t\tif y==z.izq and x==y.izq:\n\t\t\tself.rotDer(z)\n\t\telif y==z.izq and x==y.der:\n\t\t\tself.rotIzq(y)\n\t\t\tself.rotDer(z)\n\t\telif y==z.der and x==y.der:\n\t\t\tself.rotIzq(z)\n\t\telif y==z.der and x==y.izq:\n\t\t\tself.rotDer(y)\n\t\t\tself.rotIzq(z)\n\n\tdef rotDer(self,z):\n\t\tsub_raiz=z.padre \n\t\ty=z.izq\n\t\tt3=y.der\n\t\ty.der=z\n\t\tz.padre=y\n\t\tz.izq=t3\n\t\tif t3!=None: t3.padre=z\n\t\ty.padre=sub_raiz\n\t\tif y.padre==None:\n\t\t\t\tself.raiz=y\n\t\telse:\n\t\t\tif y.padre.izq==z:\n\t\t\t\ty.padre.izq=y\n\t\t\telse:\n\t\t\t\ty.padre.der=y\t\t\n\t\tz.altura=1+max(self.altura(z.izq),\n\t\t\tself.altura(z.der))\n\t\ty.altura=1+max(self.altura(y.izq),\n\t\t\tself.altura(y.der))\n\n\tdef rotIzq(self,z):\n\t\tsub_raiz=z.padre \n\t\ty=z.der\n\t\tt2=y.izq\n\t\ty.izq=z\n\t\tz.padre=y\n\t\tz.der=t2\n\t\tif t2!=None: t2.padre=z\n\t\ty.padre=sub_raiz\n\t\tif y.padre==None: \n\t\t\tself.raiz=y\n\t\telse:\n\t\t\tif y.padre.izq==z:\n\t\t\t\ty.padre.izq=y\n\t\t\telse:\n\t\t\t\ty.padre.der=y\n\t\tz.altura=1+max(self.altura(z.izq),\n\t\t\tself.altura(z.der))\n\t\ty.altura=1+max(self.altura(y.izq),\n\t\t\tself.altura(y.der))\n\n\tdef altura(self,tmp):\n\t\tif tmp==None: \n\t\t\treturn 0\n\t\treturn tmp.altura\n\n\tdef hijoMayor(self,tmp):\n\t\tizq=self.altura(tmp.izq)\n\t\tder=self.altura(tmp.der)\n\t\treturn tmp.izq if izq>=der else tmp.der\n\n\nt = ArbolAVL()\nt.agregar(1)\nt.agregar(23)\nt.agregar(45)\nt.agregar(86)\nt.agregar(89)\nt.agregar(5)\n\nt.preorden()\nt.enorden()\n\nt.eliminar(23)\n\nt.preorden()\nt.enorden()","sub_path":"storage/team06/Arquitectura árbol.py","file_name":"Arquitectura árbol.py","file_ext":"py","file_size_in_byte":5029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"534389096","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Dec 8 12:54:26 2018\r\n\r\n@author: Shyam\r\nAll models files\r\n\"\"\"\r\nimport torch\r\nimport torch.nn.functional as F\r\nclass SimpleCNN(torch.nn.Module):\r\n\r\n def __init__(self):\r\n super(SimpleCNN, self).__init__()\r\n self.features = torch.nn.Sequential(torch.nn.Conv2d(3, 18, kernel_size=3, stride=1, padding=1), \r\n torch.nn.ReLU(inplace = True), \r\n torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0),\r\n torch.nn.Conv2d(18, 20, kernel_size=3, stride=1, padding=1), \r\n torch.nn.ReLU(inplace = True), \r\n torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0),\r\n torch.nn.Conv2d(20, 32, kernel_size=3, stride=1, padding=1), \r\n torch.nn.ReLU(inplace = True), \r\n torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0),)\r\n \r\n self.classifier = torch.nn.Sequential(torch.nn.Linear(32 * 28 * 28, 64), \r\n torch.nn.ReLU(inplace = True), \r\n torch.nn.Linear(64, 1), \r\n torch.nn.Sigmoid())\r\n\r\n def forward(self, x):\r\n #Computes the activation of the first convolution\r\n #Size changes from (3, 224, 224) to (18, 224, 24)\r\n x = self.features(x)\r\n \r\n #Size changes from (3, 224, 224) to (18, 112, 112)\r\n \r\n #Reshape data to input to the input layer of the neural net\r\n #Size changes from (18, 16, 16) to (1, 4608)\r\n #Recall that the -1 infers this dimension from the other given dimension\r\n x = x.view(-1, 32 * 28 * 28)\r\n \r\n #Computes the activation of the first fully connected layer\r\n #Size changes from (1, 4608) to (1, 64)\r\n x = self.classifier(x)\r\n \r\n #Computes the second fully connected layer (activation applied later)\r\n #Size changes from (1, 64) to (1, 10)\r\n return(x)\r\n\r\n def num_flat_features(self, x):\r\n size = x.size()[1:] # all dimensions except the batch dimension\r\n num_features = 1\r\n for s in size:\r\n num_features *= s\r\n return num_features\r\n","sub_path":"own_cnn_model.py","file_name":"own_cnn_model.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"350934946","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\n\n# socket 接口请求, 只能查 yizhu, jianchabaogao, jianyanbaogao,\n# 检查,检验要加入入院时间,不要就诊次,医嘱只需要就诊次\n\nimport os\nimport sys\ncur_path = os.path.abspath(os.path.dirname(__file__))\nroot_path = os.path.split(cur_path)[0]\nsys.path.append(root_path) # 项目路径添加到系统路径\nimport json\nimport socket\nimport traceback\nfrom Utils.loadingConfigure import Properties\nfrom Utils.LogUtils import LogUtils\n\n\nclass SocketConnect(object):\n # 是否初始化\n IS_INIT = False\n\n def __init__(self):\n if not SocketConnect.IS_INIT:\n self.parameters = Properties()\n self.logger = LogUtils().getLogger('socket_conn')\n self.host = self.parameters.properties.get('socket_host', '0.0.0.0')\n self.port = self.parameters.properties.get('socket_port', 0)\n if self.port:\n self.port = int(self.port)\n SocketConnect.IS_INIT = True\n\n def __new__(cls, *args, **kwargs):\n \"\"\"\n 单例模式\n \"\"\"\n if not hasattr(cls, 'instance'):\n cls.instance = super(SocketConnect, cls).__new__(cls)\n return cls.instance\n\n def gainEmr(self, patient_id, visit_id, record_name, start_time='', end_time=''):\n res = dict()\n data = ''.encode()\n try:\n requests_add = (self.host, self.port)\n requests_info = '{}#{}#{}#{}#{}\\n'.format(patient_id, visit_id, start_time, end_time, record_name)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect(requests_add)\n s.settimeout(5) # 超时\n s.sendall(requests_info.encode())\n while True:\n x = s.recv(1024, socket.MSG_WAITALL)\n data += x\n if not len(x):\n break\n s.close()\n res = json.loads(data)\n if not res.get(record_name):\n res['res_flag'] = False\n res['error_info'] = 'no result in socket request.'\n res['error_source'] = 'socket'\n res['patient_id'] = patient_id\n res['visit_id'] = visit_id\n res['record_name'] = record_name\n info = '\\npatient_id: {}\\nvisit_id: {}\\nstart_time: {}\\nend_time: {}\\nrecord_name: {}\\nhost: {}\\nport: {}\\ndata: {}'.format(\n patient_id, visit_id, start_time, end_time, record_name, self.host, self.port, data)\n self.logger.info(info)\n return res\n res['res_flag'] = True\n except:\n info = '\\npatient_id: {}\\nvisit_id: {}\\nstart_time: {}\\nend_time: {}\\nrecord_name: {}\\nhost: {}\\nport: {}\\ndata: {}'.format(\n patient_id, visit_id, start_time, end_time, record_name, self.host, self.port, data)\n self.logger.info(info)\n self.logger.error(traceback.format_exc())\n exc_type, exc_value, exc_traceback_obj = sys.exc_info()\n res['res_flag'] = False\n res['patient_id'] = patient_id\n res['visit_id'] = visit_id\n res['record_name'] = record_name\n res['error_source'] = 'socket'\n res['error_type'] = exc_type.__name__\n res['error_info'] = '.'.join(exc_value.args)\n res['abnormal_info'] = ''.join(traceback.format_tb(exc_traceback_obj))\n return res\n\n def process(self, patient_id, visit_id, record_name, start_time='', end_time=''):\n socket_res = self.gainEmr(patient_id, visit_id, record_name, start_time, end_time)\n if not socket_res.get('res_flag'):\n return socket_res\n res = {'res_flag': True}\n if record_name in ['shouyeshoushu', 'shouyezhenduan', 'yizhu', 'jianchabaogao', 'jianyanbaogao']:\n res[record_name] = list()\n for value in socket_res.get(record_name, list()):\n tmp = dict()\n try:\n assert isinstance(value, dict)\n except AssertionError:\n info = '\\npatient_id: {}\\nvisit_id: {}\\nstart_time: {}\\nend_time: {}\\nrecord_name: {}\\nhost: {}\\nport: {}\\nvalue: {}'.format(\n patient_id, visit_id, start_time, end_time, record_name, self.host, self.port, value)\n self.logger.error(info)\n continue\n for k, v in value.items():\n if v:\n key = k.lower()\n if k == 'OPER_NO':\n key = 'operation_num'\n elif k == 'DRUG_AMOUNT_UNIT':\n key = 'drug_amount_value_unit'\n tmp[key] = v\n res[record_name].append(tmp)\n elif record_name == 'binganshouye':\n res[record_name] = dict()\n res[record_name]['pat_info'] = dict()\n res[record_name]['pat_visit'] = dict()\n if not socket_res.get(record_name, list()):\n return res\n value = socket_res.get(record_name, list())[0]\n for k, v in value.items():\n if not v:\n continue\n key = k.lower()\n if k == 'PATIENT_IDVISIT':\n key = 'patient_id'\n elif k == 'NAME':\n key = 'person_name'\n elif k == 'SEX':\n key = 'sex_name'\n if k in ['PATIENT_ID', 'PATIENT_IDVISIT', 'NAME', 'SEX', 'DATE_OF_BIRTH', 'NATION_NAME',\n 'NATIONALITY_NAME', 'ID_CARD_TYPE', 'ID_CARD_NO', 'BIRTH_ADDRESS', 'BLOOD_TYPE_NAME',\n 'RH_BLOOD_NAME', 'IDENTITY_NAME', 'FAMILY_ADDR_PROVINCE_NAME', 'BABY_AGE', 'BABY_BIRTH_WEIGHT',\n 'BABY_ADMIN_WEIGHT']:\n res[record_name]['pat_info'][key] = v\n else:\n res[record_name]['pat_visit'][key] = v\n if not res.get(record_name):\n res['res_flag'] = False\n res['record_name'] = record_name\n res['patient_id'] = patient_id\n res['visit_id'] = visit_id\n res['error_source'] = 'socket'\n res['error_info'] = 'no process result'\n return res\n\n\nif __name__ == '__main__':\n app = SocketConnect()\n r = app.process('000724189200', '6', 'yizhu') # siwangjilu ruyuanjilu shoushujilu shouyeshoushu binganshouye shouyezhenduan\n print(json.dumps(r, ensure_ascii=False, indent=2))\n # res = app.gainHiss('0009583961', '1', 'jianchabaogao') # yizhu jianchabaogao\n # print(json.dumps(res, ensure_ascii=False, indent=2))\n # import requests\n # add = 'http://192.168.8.20:8801/document/examreport'\n # for i in r.get('jianchabaogao', list()):\n # i['examine_class_name'] = i.get('exam_class_name', '')\n # i['examine_item_name'] = i.get('exam_item_name', '')\n # i['examine_diag'] = i.get('exam_diag', '')\n # i['examine_feature'] = i.get('exam_feature', '')\n # res = requests.post(add, data=json.dumps(i))\n # print(json.dumps(json.loads(res.text), ensure_ascii=False, indent=2))\n","sub_path":"Utils/socketConnect.py","file_name":"socketConnect.py","file_ext":"py","file_size_in_byte":7211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"440173071","text":"import numpy as np\n''' wir packen 3 Klassifizieren Algorithmen in unsere Klassenpool '''\nimport ClassiferPool.FischerDiskriminante as fischer\nimport ClassiferPool.Perzeptron as perzeptron\nimport ClassiferPool.KNN as knn\n\n\ndef initial_weight_data(data):\n return np.ones((len(data), 1))\n\n\ndef initial_weight_class(MaxInter):\n return np.zeros((MaxInter, 1))\n\n\ndef sigmoid(x):\n tmp = 1.0 + np.exp(-x)\n result = 1.0 / tmp\n return result\n\n''' hier berechnen wir das exponential Error von den Gewicht '''\ndef cal_error_weight(data, dataWeightVector, resultVector):\n sum = 0\n errorIndex = []\n for i in range(0, len(data)):\n if (data[i][-1] != resultVector[i]):\n sum += dataWeightVector[i]\n errorIndex.append(i)\n return sum, errorIndex\n\n''' hier wählen wir die Klasse aus dem Klassenpool, die das exponentiale Error minimiert,\n gleichzeitig merken wir die Hit Dateien und Miss Dateien, damit wir den Datensatz später passend \n aktualisieren '''\ndef get_next_class(data, dataWeight):\n arrayClass = [fischer, knn, perzeptron]\n arrayError = []\n errorIndex = []\n #print('ja')\n result1 = fischer.prediction(data)\n #print('hello')\n arrayError.append(cal_error_weight(data, dataWeight, result1)[0])\n errorIndex.append(cal_error_weight(data, dataWeight, result1)[1])\n\n result2 = knn.prediction(data)\n #print('hi')\n arrayError.append(cal_error_weight(data, dataWeight, result2)[0])\n errorIndex.append(cal_error_weight(data, dataWeight, result2)[1])\n\n result3 = perzeptron.prediction(data)\n #print('hey')\n arrayError.append(cal_error_weight(data, dataWeight, result3)[0])\n errorIndex.append(cal_error_weight(data, dataWeight, result3)[1])\n\n index = np.argmin(arrayError)\n error = np.amin(arrayError)\n\n return arrayClass[index], error, errorIndex\n\n\ndef adaBoosting(data, MaxInter):\n classWeight = initial_weight_class(MaxInter)\n dataWeight = initial_weight_data(data)\n classPool = []\n\n for i in range(0, MaxInter):\n result = get_next_class(data, dataWeight)\n #print(result[1])\n classPool.append(result[0]) # füge neue Klasse hinzu\n\n e = result[1] / np.sum(dataWeight)\n right = 0.5 * (np.log((1-e)/e))\n classWeight[i] = right # aktualisiere das Gewicht von der neuen Klasse\n\n errorIndex = result[2]\n update_data_weight(dataWeight, errorIndex, right) # aktualisiere das Gewicht des Datensätzes\n\n return classPool, classWeight # wir bekommen unsere Committee und deren \"speak right\"\n\n''' aktualisieren den Datensatz, je nach ob es richtig geschätzt wird'''\ndef update_data_weight(dataWeight, errorIndex, right):\n j = 0\n for i in range(0, len(dataWeight)):\n if (i != errorIndex[j]):\n dataWeight[i] = dataWeight[i] * np.exp(right)\n else:\n dataWeight[i] = dataWeight[i] * np.exp(0-right)\n j += 1\n return dataWeight\n\n\ndef prediction(data, testsample, MaxInter):\n adaBoost = adaBoosting(data, MaxInter)\n classes = adaBoost[0]\n weights = adaBoost[1]\n resultArray = []\n\n for i in range(0, len(classes)):\n result = classes[i].prediction_all(data, testsample)\n resultArray.append(result)\n\n C = np.dot(np.transpose(weights), np.transpose(np.matrix(resultArray)))\n\n if (sigmoid(C) >= 0.5):\n return 1\n else: return 0\n\n\ndef error_rate(data, test, MaxInter):\n error = 0\n\n for i in range(0, len(test)):\n result = prediction(data, test[i], MaxInter)\n print(result)\n if (result != test[i][-1]):\n error += 1\n print('error rate mit', MaxInter, 'Iterationen ist', error / len(test))\n\n\n\n\n","sub_path":"ada-boost/AdaBoost.py","file_name":"AdaBoost.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"444428908","text":"\"\"\"\n剑指 Offer 31. 栈的压入、弹出序列\n输入两个整数序列,第一个序列表示栈的压入顺序,请判断第二个序列是否为该栈的弹出顺序。假设压入栈的所有数字均不相等。例如,序列 {1,2,3,4,5} 是某栈的压栈序列,序列 {4,5,3,2,1} 是该压栈序列对应的一个弹出序列,但 {4,3,5,1,2} 就不可能是该压栈序列的弹出序列。\n示例 1:\n输入:pushed = [1,2,3,4,5], popped = [4,5,3,2,1]\n输出:true\n解释:我们可以按以下顺序执行:\npush(1), push(2), push(3), push(4), pop() -> 4,\npush(5), pop() -> 5, pop() -> 3, pop() -> 2, pop() -> 1\n\n示例 2:\n输入:pushed = [1,2,3,4,5], popped = [4,3,5,1,2]\n输出:false\n解释:1 不能在 2 之前弹出。\n提示:\n0 <= pushed.length == popped.length <= 1000\n0 <= pushed[i], popped[i] < 1000\npushed 是 popped 的排列。\n注意:本题与主站 946 题相同\n\"\"\"\n# 就用栈结构来模拟\nfrom typing import List\nclass Solution:\n def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:\n if not pushed:\n return True\n stack, p = [], 0\n for num in pushed:\n stack.append(num)\n while stack and stack[-1] == popped[p]:\n stack.pop()\n p += 1\n return not stack\n\n","sub_path":"剑指offer_二刷/剑指offer31_栈的压入弹出序列.py","file_name":"剑指offer31_栈的压入弹出序列.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"105887140","text":"#January 13th 2016, Mads Poulsen\r\n\r\n#DESCRIPTION: Plots a global map of the surface fluid particle speed for a three-day average field, averaged over the top 100m water column.\r\n\r\nimport matplotlib #load matplotlib\r\nmatplotlib.use('Agg') #allows plotting without display\r\nimport numpy as np, matplotlib.pyplot as plt, seaborn\r\nfrom netCDF4 import Dataset as ncread \r\nfrom matplotlib.mlab import griddata #Load griddata function that is able to regrid data to regular grid\r\nfrom mpl_toolkits.basemap import Basemap, addcyclic #Basemap module for plotting\r\nplt.style.use('/home/nsz465/Scripts/python/mapproj.mplstyle') #Load my style of the plot\r\n\r\nroot = ncread('/home/nsz465/erda_ro/Output/ctrl.g.e11.G.T62_t12.002.pop.h.VEL.0026-01-01.zmean.nc') #Open the .nc file\r\nvel = np.array(root.variables['VEL']) #Load the surface speed field\r\nULAT = np.array(root.variables['ULAT']) #Load the latitude grid relevant to the speed field\r\nULONG = np.array(root.variables['ULONG']) #Load the longitude grid relevant to the speed field\r\nmask = np.array(root.variables['REGION_MASK']) #Load land mask\r\nfillvalue = getattr(root.variables['VEL'],'_FillValue')\r\n\r\nULAT = ULAT.flatten() #Turn ULAT from 2D to 1D array (analog to MATLAB's reshape)\r\nULONG = ULONG.flatten()\r\nvel = vel.flatten()\r\nmask = mask.flatten()\r\n\r\ni = np.squeeze(np.where(vel != fillvalue)) #find indices to the entries in the velocity field that is equal to 'missing value'\r\nULAT = ULAT[i] #Remove these entries from array\r\nULONG = ULONG[i]\r\nvel = vel[i]\r\nmask = mask[i]\r\n\r\ni = np.squeeze(np.where(mask != -1)) #find land (equal to -1)\r\nULAT = ULAT[i] #remove land from array\r\nULONG = ULONG[i]\r\nvel = vel[i]\r\n\r\n#We need to wrap the field around 180 degrees west/east such that the grid interpolation gets the field right at the edge of the plot. Some longitudes will get outside the usual 180w-->0-->180E range, but it doesn't matter as the new longitude/latitude axes will be restricted to the correct span\r\ni = np.squeeze(np.where(ULONG < -170)) #Where ULONG is less than 170 west\r\nULONG = np.append(ULONG, ULONG[i] + 360) #Append to the array\r\nULAT = np.append(ULAT, ULAT[i])\r\nvel = np.append(vel, vel[i])\r\ni = np.squeeze(np.where(ULONG > 170)) #Where ULONG is more than 170 east\r\nULONG = np.append(ULONG, ULONG[i] - 360)\r\nULAT = np.append(ULAT, ULAT[i])\r\nvel = np.append(vel, vel[i]) \r\n\r\nULATnew = np.arange(1800) * 0.1 - 90.0 #Define new longitude axis\r\nULONGnew = np.arange(3600) * 0.1 - 180.0 #Define new latitude axis\r\nvelregrid = griddata(ULONG, ULAT, vel, ULONGnew, ULATnew, interp='linear') #Regrid using linear interpolation\r\nvelregrid, ULONGnew = addcyclic(velregrid, ULONGnew) #Add information to regridded fields such that it spans 360 degrees\r\nvelregrid = np.array(velregrid) \r\n\r\nplt.close('all')\r\nm = Basemap(projection = 'robin', llcrnrlon = -180, urcrnrlon = 180, llcrnrlat = -90, urcrnrlat = 90, lon_0 = 0, lat_0 = 0) #Constructs a Robinson projection at central longitude 0. Resolution 'c' means crude coastlines\r\nm.drawparallels(np.arange(-90.,91.,30.), labels = [1, 0, 0, 0], dashes = [1,1], color = 'white') #Draw lines of constant latitude\r\nm.drawmeridians(np.arange(-180.,180.,90.), labels = [0, 0, 0, 1], dashes = [1,1], color = 'white') #Draw lines of constant longitude\r\nULONGnew, ULATnew = np.meshgrid(ULONGnew,ULATnew)\r\nclevs = np.arange(0.,51.,1.) #Color levels\r\ncticks = np.arange(0.,51.,5.) #Colorbar levels\r\ncs = m.contourf(ULONGnew,ULATnew,velregrid, clevs, cmap='viridis', latlon = 'True', extend = 'max')\r\ncs.cmap.set_over('w')\r\ncbar = m.colorbar(cs, 'right', size = '5%', pad = '5%', ticks = cticks, label = 'Surface speed [cm/s]', drawedges = 'True', extendrect = 'False')\r\ncbar.outline.set_edgecolor('black')\r\nplt.savefig('/home/nsz465/plots/surfacespeedglobal.png', dpi = 300)\r\n#plt.savefig('/home/nsz465/plots/surfacespeedglobal.eps')\r\n\r\n","sub_path":"notebooks/NwAC-plots/x010plot.py","file_name":"x010plot.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"533105713","text":"import logging\nimport tweepy\nfrom envparse import Env\nfrom bot import TwitterForwarderBot, FetchAndSendTweetsJob\nfrom basebot import JobQueue\n\"\"\"\nenv = Env(\n TWITTER_CONSUMER_KEY=str,\n TWITTER_CONSUMER_SECRET=str,\n TWITTER_ACCESS_TOKEN=str,\n TWITTER_ACCESS_TOKEN_SECRET=str,\n TELEGRAM_BOT_TOKEN=str,\n)\n\"\"\"\nTWITTER_CONSUMER_KEY = 'XgUWuwYMwyI1uR21i32XaCuAf'\nTWITTER_CONSUMER_SECRET = 'wLxicbkey8OWl5c5LAD4UBxdgWQhlHZevGkt74QMs21yzpndur'\nTWITTER_ACCESS_TOKEN = '307895514-KA9tPLeex9jzaqCjUfBvO86uobPLD1Wb5N1gbo0n'\nTWITTER_ACCESS_TOKEN_SECRET = 'GwZNrfrYSEqB2HdzIZtI8yXbke6qj3HaNNcJT8p1Zz4LM'\nTELEGRAM_BOT_TOKEN = '208047699:AAG_TEqzH_Zc3jLxW8FmZEUEqGfS5knh3Eo'\n\nif __name__ == '__main__':\n\n logging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.WARNING)\n\n logging.getLogger(TwitterForwarderBot.__name__).setLevel(logging.DEBUG)\n logging.getLogger(JobQueue.__name__).setLevel(logging.DEBUG)\n logging.getLogger(FetchAndSendTweetsJob.__name__).setLevel(logging.DEBUG)\n\n #auth = tweepy.OAuthHandler(env('TWITTER_CONSUMER_KEY'), env('TWITTER_CONSUMER_SECRET'))\n auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET)\n\n try:\n #auth.set_access_token(env('TWITTER_ACCESS_TOKEN'), env('TWITTER_ACCESS_TOKEN_SECRET'))\n auth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET)\n except KeyError:\n print(\"Either TWITTER_ACCESS_TOKEN or TWITTER_ACCESS_TOKEN_SECRET \"\n \"environment variables are missing. \"\n \"Tweepy will be initialized in 'app-only' mode\")\n\n twapi = tweepy.API(auth)\n\n #bot = TwitterForwarderBot(env('TELEGRAM_BOT_TOKEN'), twapi)\n bot = TwitterForwarderBot(TELEGRAM_BOT_TOKEN, twapi)\n\n bot.kb_interruptable_loop()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"159144220","text":"import json\n\nfrom django.core.exceptions import ValidationError\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\n\nfrom account.models import Profile\nfrom .models import FriendshipRequest\nfrom account.models import Relation\n# Create your views here.\n@login_required\ndef friend_list(request, tag=None):\n\n # Friend List\n friend_list = request.user.profile.get_following\n # Already asking Friendship List\n qs = FriendshipRequest.objects.select_related('from_user', 'to_user').filter(\n from_user=request.user.profile).all()\n asking_friends = [u.to_user for u in qs]\n\n querylist = Profile.objects.exclude(\n follower_user__from_user=request.user.profile)\n recommend_list = querylist.exclude(follower_user__to_user__in=asking_friends)\n\n # Have received asking Friendship List\n qs = FriendshipRequest.objects.select_related('from_user', 'to_user').filter(\n to_user=request.user.profile, rejected=False).all()\n received_list = [u.from_user for u in qs]\n\n qs = FriendshipRequest.objects.filter(to_user=request.user.profile, rejected=True) \n rejected_list = [u.from_user for u in qs]\n\n context = {'friend_list': friend_list, 'recommend_list': recommend_list, \n 'received_list': received_list, 'rejected_list': rejected_list}\n return render(request, 'friend/friend.html', context)\n\n\n@login_required\ndef ask_friend(request):\n from_user = request.user.profile\n id = request.POST.get('pk')\n if id:\n to_user = Profile.objects.get(id=id)\n\n if from_user == to_user:\n raise ValidationError(\"Users cannot be friends with themselves\")\n if to_user in from_user.get_following:\n message = \"Users are already friends\"\n\n request, created = FriendshipRequest.objects.get_or_create(\n from_user=from_user,\n to_user=to_user,\n )\n\n if created is False:\n message = \"You already requested friendship to {}\".format(to_user)\n else:\n message = \"You have requested friendship to {} successfully\".format(to_user)\n context = { 'message' : message }\n return HttpResponse(json.dumps(context), content_type=\"application/json\")\n\n\n@login_required\ndef accept_friend(request):\n from_user = request.user.profile\n id = request.POST.get('pk')\n if id:\n to_user = Profile.objects.get(id=id)\n\n relation1 = Relation.objects.create(\n from_user=from_user,\n to_user=to_user,\n status='F'\n )\n relation2 = Relation.objects.create(\n from_user=to_user,\n to_user=from_user,\n status='F'\n )\n\n FriendshipRequest.objects.get(from_user=to_user, to_user=from_user).delete()\n\n friend_list = request.user.profile.get_following\n return render(request, \"friend/ajax_friend_list.html\", { 'friend_list': friend_list})\n\n@login_required\ndef reject_friend(request):\n to_user = request.user.profile\n id = request.POST.get('pk')\n if id:\n from_user = Profile.objects.get(id=id)\n\n fsr = FriendshipRequest.objects.get(from_user=from_user, to_user=to_user)\n fsr.rejected = True\n fsr.save()\n\n qs = FriendshipRequest.objects.filter(to_user=to_user, rejected=True) \n rejected_list = [u.from_user for u in qs]\n return render(request, \"friend/ajax_rejected_list.html\", { 'rejected_list': rejected_list})\n\n","sub_path":"friend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"369502330","text":"from cnn_rnn_text_clf import ConvLSTMClassifier\nimport tensorflow as tf\n\n\nvocab_size = 20000\n\n\nif __name__ == '__main__':\n (X_train, y_train), (X_test, y_test) = tf.contrib.keras.datasets.imdb.load_data(num_words=vocab_size)\n\n clf = ConvLSTMClassifier(vocab_size)\n clf.fit(X_train, y_train, n_epoch=2)\n clf.evaluate(X_test, y_test)\n","sub_path":"nlp-models/pytorch/cnn_rnn_text_clf_imdb_test.py","file_name":"cnn_rnn_text_clf_imdb_test.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"565054697","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 4 19:50:33 2018\r\n\r\n@author: Ben\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom matplotlib import pyplot\r\nfrom rng_functions import rng\r\n\r\nngroups = 16\r\n\r\nI = np.zeros(ngroups)\r\nN = np.zeros(ngroups)\r\nDev = np.zeros(ngroups)\r\n\r\nn0 = 100\r\nfor i in range(ngroups):\r\n\r\n N[i] = n0\r\n x = rng(n0)\r\n y = rng(n0)\r\n I[i] = 0.\r\n Nin = 0\r\n for j in range(n0):\r\n if(y[j] < np.sqrt(1-x[j]**2)):\r\n Nin += 1\r\n \r\n I[i] = 4.*float(Nin)/float(n0)\r\n Dev[i] = abs(I[i]-np.pi)\r\n print (n0,Nin,I[i],Dev[i])\r\n n0 *= 2\r\n \r\n \r\npyplot.plot(N,Dev,ls='-',c='red',lw=3,label='Deviation');\r\npyplot.plot(N,1/np.sqrt(N),ls='-',c='blue',lw=3, label='1/sqrt(N)');\r\npyplot.xscale('log')\r\npyplot.yscale('log')\r\npyplot.legend()\r\npyplot.title('RNG w/ a = 5, M = 11')","sub_path":"rng_a=5_m=11.py","file_name":"rng_a=5_m=11.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"72070530","text":"import unittest\n#import random\n#import datetime\n#from ca_omikuji.entitiy.entity_uranai import UranaiEntity\n#from ca_omikuji_python.ca_omikuji.entitiy.entity_uranai import UranaiEntity\nfrom ca_omikuji.entity.entity_uranai import UranaiEntity\n\nclass UranaiEntityTest(unittest.TestCase):\n\n #def test_get_uranai_result(self):\n\n #uranai_result = {}\n #uranai_result['user_name'] = self.user_name\n #print(uranai_result['user_name'])\n # date when uranai was done\n #now_date = datetime.datetime.today()\n #uranai_result['date'] = now_date\n #print(uranai_result['date'])\n #unsei_result = self.get_unsei()\n #uranai_result['unsei'] = unsei_result\n #print(uranai_result['unsei'])\n\n\n def test_get_unsei(self):\n mock_user_name = 'obitwo'\n mock_unsei_list = [\n {'unsei_name': 'daikichi', 'unsei_point': 1, 'unsei_present': '大吉'},\n {'unsei_name': 'chukichi', 'unsei_point': 1, 'unsei_present': '中吉'},\n {'unsei_name': 'kichi', 'unsei_point': 1, 'unsei_present': '吉'},\n {'unsei_name': 'shokichi', 'unsei_point': 1, 'unsei_present': '小吉'},\n {'unsei_name': 'kyo', 'unsei_point': 1, 'unsei_present': '凶'}\n ]\n #mock_unsei_result = {'unsei_name': 'kichi', 'unsei_point': 1, 'unsei_present': '吉'}\n #random.shuffle(self.unsei_list)\n #unsei_result = self.unsei_list[0]\n uranai = UranaiEntity\n test_uranai = uranai(mock_user_name, mock_unsei_list).get_uranai_result()\n assert test_uranai != False\n\n#print(test_urania['date'])\n","sub_path":"ca_omikuji_python/ca_omikuji/test/test_entity_uranai.py","file_name":"test_entity_uranai.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"242694385","text":"'''\nCreated on 16 июн. 2019 г.\n\n@author: alex\n\nСкрипт для создания отдельного исполняемого файла с использованием библиотеки cx_Freeze\n'''\n\n\nfrom cx_Freeze import setup, Executable\nimport sys\nbase = None\nif sys.platform == \"win32\":\n base = \"Win32GUI\"\n \nexecutables = [Executable('XL.py', base = base)]\n\nsetup(name='XL',\n version='0.0.1',\n description='Автоматизация',\n executables=executables)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"195959446","text":"#!/usr/bin/python3\nimport sys\n\nimport pygame\nfrom pygame import Color\nfrom pygame.math import Vector2\nimport pygame.draw as draw\n\nimport colortable\nfrom util import vector\n\nBASIS_X = vector(1, 0)\nBASIS_Y = vector(0, 1)\n\n\ndef main():\n fps = 120\n\n pygame.init()\n\n screen = pygame.display.set_mode((800, 600))\n screen.fill(colortable.background())\n\n font = pygame.font.SysFont(\"None\", 18)\n\n clock = pygame.time.Clock()\n\n center = Vector2(400, 300)\n a = center\n b = center\n\n while 1:\n\n clock.tick(fps)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n a = vector(event.pos[0], event.pos[1])\n elif event.type == pygame.MOUSEMOTION:\n b = vector(event.pos[0], event.pos[1])\n\n # draw\n screen.fill(colortable.background())\n\n draw.aaline(screen, Color('green'), center, a)\n draw.aaline(screen, Color('red'), center, b)\n\n msg = half_angle(angle_between(a, center, b))\n\n screen.blit(text(msg, font), (10, 10))\n\n pygame.display.flip()\n\n\ndef text(msg, font):\n text = font.render(str(msg), True, Color('white'))\n text.convert_alpha()\n return text\n\n\ndef full_angle(a):\n return a if a >= 0 else 360 + a\n\n\ndef half_angle(a):\n return a if a <= 180 else a - 360\n\n\ndef angle_between(a: Vector2, o: Vector2, b: Vector2):\n angle_a = full_angle((a - o).as_polar()[1])\n angle_b = full_angle((b - o).as_polar()[1])\n return full_angle(angle_b - angle_a)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"652922817","text":"from pymongo import MongoClient # El cliente de MongoDB\nfrom Caidos import caidos # La clase Producto\nfrom bson.objectid import ObjectId # Para crear ObjectId, porque _id como cadena no funciona\n\n\ndef obtener_bd():\n client = MongoClient('mongodb://localhost:27017/')\n mydb = client['test']\n return mydb\n\ndef insertar(parcad):\n base_de_datos = obtener_bd()\n caidos = base_de_datos.caidos\n return caidos.insert_one({\n \"codigo\": parcad.codigo,\n \"nombre\": parcad.nombre,\n \"Diasenclase\": parcad.Diasenclase\n }).inserted_id\n\ndef obtener():\n base_de_datos = obtener_bd()\n caidos_list = base_de_datos.caidos.find()\n for c in caidos_list:\n print('{0} {1} {2}'.format(c['codigo'],c['nombre'],c['Diasenclase']))\n \n\ndef actualizar(id, caidos):\n base_de_datos = obtener_bd()\n resultado = base_de_datos.caidos.update_one(\n {\n '_id': ObjectId(id)\n }, \n {\n '$set': {\n \"codigo\": caidos.codigo,\n \"nombre\": caidos.nombre,\n \"Diasenclase\": caidos.Diasenclase,\n }\n })\n return resultado.modified_count\n\ndef eliminar(id):\n base_de_datos = obtener_bd()\n resultado = base_de_datos.caidos.delete_one(\n {\n '_id': ObjectId(id)\n })\n return resultado.deleted_count\n\n\n\ncreditos = \"\"\"==========================================================\n\t CRUD de MongoDB y Python\n \n __ __ __ \n\t.-----.---.-.----.-----|__| |--.--.--| |_.-----.\n\t| _ | _ | _|-- __| | _ | | | _| -__|\n\t| __|___._|__| |_____|__|_____|___ |____|_____|\n\t|__| |_____| \n==========================================================\"\"\"\n\nmenu = \"\"\"Bienvenido a la tienda.\n1 - Insertar Caidos\n2 - Ver todos\n3 - Actualizar\n4 - Eliminar\n5 - Salir\n\"\"\"\neleccion = None\nprint(creditos)\nwhile eleccion is not 5:\n print(menu)\n eleccion = int(input(\"Elige: \"))\n if eleccion is 1:\n print(\"Insertar\")\n codigo = input(\"Codigo del caido: \")\n nombre = input(\"nombre del caido: \")\n Diasenclase = float(input(\"Dias en clase: \"))\n Obj_caidos = caidos(codigo,nombre,Diasenclase)\n id = insertar(Obj_caidos)\n print(\"El id del caido insertado es: \", id)\n elif eleccion is 2:\n print(\"Obteniendo caidos...\")\n obtener()\n elif eleccion is 3:\n print(\"Actualizar\")\n id = input(\"Dime el id: \")\n codigo = input(\"Nuevo codigo del caido: \")\n nombre = input(\"Nuevo nombre del caido: \")\n DiasEnClase = float(input(\"Nuevos Dias en clase: \"))\n Obj_caidos = caidos(codigo, nombre, DiasEnClase)\n caidos_actualizados = actualizar(id, Obj_caidos)\n print(\"Número de caidos actualizados: \", caidos_actualizados)\n\n elif eleccion is 4:\n print(\"Eliminar\")\n id = input(\"Dime el id: \")\n caidos_eliminados = eliminar(id)\n print(\"Número de caidos eliminados: \", caidos_eliminados)\n\n \n","sub_path":"crudmongo.py","file_name":"crudmongo.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"58071998","text":"import re\n\nHOMEWORK_FILE = \"homework.txt\"\n\nBRACKET_REGEX_PATTERN = \"\\(\\d+(\\s(\\+|\\*)\\s\\d+)+\\)\"\nOPERATOR_REGEX_PATTERN = \"\\d+\\s(\\+|\\*)\\s\\d+\"\nPLUS_REGEX_PATTERN = \"\\d+\\s\\+\\s\\d+\"\nMULTIPLY_REGEX_PATTERN = \"\\d+\\s\\*\\s\\d+\"\n\ndef get_homework_result_equations(use_operator_precedence=False):\n \"\"\" Return each homework equation result in a list \"\"\"\n homework_result_list = []\n \n with open(HOMEWORK_FILE, 'r') as homeowork_file:\n for equation in homeowork_file:\n result = solve_complete_equation(equation.strip(), use_operator_precedence)\n homework_result_list.append(result)\n\n return homework_result_list\n\ndef solve_complete_equation(equation, use_operator_precedence=False):\n \"\"\"\n Solve the whole given 'equation_str' by first solving each bracket and in the end compute the overall\n result by simply solving the equation from left to right. This function will identify all equations\n in brackets and solve them at first.\n Depending on given flag, operator precedence is applied or not.\n \"\"\"\n # find first occurrence of brackets\n bracketed_equation = re.search(BRACKET_REGEX_PATTERN, equation)\n\n solve_equation = solve_equation_with_operator_precedence if use_operator_precedence\\\n else solve_equation_without_operator_precedence\n\n while bracketed_equation is not None:\n # solve equation\n result = solve_equation(bracketed_equation.group())\n\n # replace current match with computed value\n equation = re.sub(BRACKET_REGEX_PATTERN, str(result), equation, 1)\n\n # find next match\n bracketed_equation = re.search(BRACKET_REGEX_PATTERN, equation)\n\n # no brackets left in equation --> solve remaining equation\n equation_result = solve_equation(equation)\n\n return equation_result\n\n# -------------------------- Puzzle 1 --------------------------\n\ndef solve_equation_without_operator_precedence(equation_str):\n \"\"\"\n Solve given 'equation_str' and return the computed value. Given equation will not contain any brackets\n and is hence solvable by only considering each value and operator from left to right.\n \"\"\"\n current_result = 0\n found_match = re.search(OPERATOR_REGEX_PATTERN, equation_str)\n \n while found_match is not None:\n # equation will look exactly like this: value-1 (+ or *) value-2 --> split it at space\n value_1, operator, value_2 = found_match.group().split(' ')\n\n # get result of found equation\n current_result = compute_result(value_1, value_2, operator)\n\n # replace current match with computed value\n equation_str = re.sub(OPERATOR_REGEX_PATTERN, str(current_result), equation_str, 1)\n\n # find next match\n found_match = re.search(OPERATOR_REGEX_PATTERN, equation_str)\n\n return current_result\n\ndef compute_result(a, b, operator):\n if operator == '*':\n return int(a) * int(b)\n elif operator == '+':\n return int(a) + int(b)\n\n# -------------------------- Puzzle 2 --------------------------\n\ndef solve_equation_with_operator_precedence(equation_str):\n \"\"\"\n Solve given 'equation_str' and return the computed value. Given equation will not contain any brackets\n and is hence solvable by only considering each value and operator from left to right.\n However, addition takes precedence over multiplication! Hence, solve this parts first.\n Afterwards only multiplication operations are left. Solve them from left to right.\n \"\"\"\n current_result = 0\n\n # solve all plus operations first\n found_match = re.search(PLUS_REGEX_PATTERN, equation_str)\n while found_match is not None:\n # equation will look exactly like this: value-1 + value-2 --> split it at space\n value_1, _, value_2 = found_match.group().split(' ')\n\n # get result of found equation\n current_result = int(value_1) + int(value_2)\n\n # replace current match with computed value\n equation_str = re.sub(PLUS_REGEX_PATTERN, str(current_result), equation_str, 1)\n\n # find next match\n found_match = re.search(PLUS_REGEX_PATTERN, equation_str)\n\n # only multiplication left, solve equation from left to right\n found_match = re.search(MULTIPLY_REGEX_PATTERN, equation_str)\n while found_match is not None:\n # equation will look exactly like this: value-1 + value-2 --> split it at space\n value_1, _, value_2 = found_match.group().split(' ')\n\n # get result of found equation\n current_result = int(value_1) * int(value_2)\n\n # replace current match with computed value\n equation_str = re.sub(MULTIPLY_REGEX_PATTERN, str(current_result), equation_str, 1)\n\n # find next match\n found_match = re.search(MULTIPLY_REGEX_PATTERN, equation_str)\n\n return current_result\n\n# -------------------------- Solution of puzzles 1 and 2 --------------------------\n\ndef compute_solution_of_puzzle():\n \"\"\" Find the sum of all homework equations \"\"\"\n homework_result_list = get_homework_result_equations()\n sum_of_all_equations = sum(homework_result_list)\n print(\"[+] Solution of day18/puzzle1: Sum of all homework equations is {}\".format(sum_of_all_equations))\n\n homework_result_list = get_homework_result_equations(use_operator_precedence=True)\n sum_of_all_equations = sum(homework_result_list)\n print(\"[+] Solution of day18/puzzle2: Sum of all homework equations with operator precedence is {}\"\\\n .format(sum_of_all_equations))\n\nif __name__ == \"__main__\":\n compute_solution_of_puzzle()\n","sub_path":"day_18/puzzles.py","file_name":"puzzles.py","file_ext":"py","file_size_in_byte":5540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"619844146","text":"import os\r\nfrom datetime import datetime\r\nfrom functools import wraps\r\nfrom flask import (\r\n render_template,\r\n redirect,\r\n url_for,\r\n flash,\r\n request,\r\n Markup,\r\n abort,\r\n send_file,\r\n)\r\nfrom flask_login import current_user\r\nfrom .. import blueprint, db\r\nfrom ..forms import (\r\n DemographicsLookupForm,\r\n DemographicsSearchForm,\r\n DemographicsDefineColumnsForm,\r\n DemographicsAdminSearchForm,\r\n)\r\nfrom lbrc_flask.forms import ConfirmForm\r\nfrom identity.demographics.model import (\r\n DemographicsRequest,\r\n DemographicsRequestXlsx,\r\n DemographicsRequestExcel97,\r\n DemographicsRequestCsv,\r\n DemographicsRequestColumn,\r\n DemographicsRequestColumnDefinition,\r\n User,\r\n)\r\nfrom identity.demographics import schedule_lookup_tasks\r\nfrom identity.security import must_be_admin\r\nfrom lbrc_flask.logging import log_exception\r\n\r\n\r\ndef must_be_request_owner():\r\n def decorator(f):\r\n @wraps(f)\r\n def decorated_function(*args, **kwargs):\r\n dr = DemographicsRequest.query.get_or_404(request.view_args.get(\"id\"))\r\n\r\n if current_user != dr.owner and not current_user.is_admin:\r\n abort(403)\r\n\r\n return f(*args, **kwargs)\r\n\r\n return decorated_function\r\n\r\n return decorator\r\n\r\n\r\n@blueprint.route(\"/demographics/\", methods=['GET', 'POST'])\r\ndef demographics():\r\n form = DemographicsLookupForm()\r\n\r\n if current_user.is_admin:\r\n search_form = DemographicsAdminSearchForm(formdata=request.args)\r\n\r\n submitter_ids = DemographicsRequest.query.with_entities(DemographicsRequest.owner_user_id.distinct()).filter(DemographicsRequest.owner_user_id != current_user.id).subquery()\r\n submitters = sorted(User.query.filter(User.id.in_(submitter_ids)).all(), key=lambda u: u.full_name)\r\n\r\n search_form.owner_user_id.choices = [(current_user.id, current_user.full_name)] + [(u.id, u.full_name) for u in submitters] + [(0, 'All')]\r\n else:\r\n search_form = DemographicsSearchForm(formdata=request.args)\r\n\r\n q = DemographicsRequest.query\r\n\r\n if search_form.search.data:\r\n q = q.filter(DemographicsRequest.filename.like('%{}%'.format(search_form.search.data)))\r\n\r\n if not search_form.show_deleted.data:\r\n q = q.filter(DemographicsRequest.deleted_datetime.is_(None))\r\n\r\n if not search_form.show_downloaded.data:\r\n q = q.filter(DemographicsRequest.result_downloaded_datetime.is_(None))\r\n\r\n if current_user.is_admin:\r\n owner_id = search_form.owner_user_id.data or current_user.id\r\n\r\n if owner_id > 0:\r\n q = q.filter(DemographicsRequest.owner_user_id == owner_id)\r\n else:\r\n q = q.filter(DemographicsRequest.owner == current_user)\r\n\r\n demographics_requests = q.order_by(DemographicsRequest.created_datetime.desc()).paginate(\r\n page=search_form.page.data, per_page=5, error_out=False\r\n )\r\n\r\n if form.validate_on_submit():\r\n _, file_extension = os.path.splitext(form.upload.data.filename)\r\n\r\n if file_extension == '.csv':\r\n d = DemographicsRequestCsv(\r\n owner=current_user,\r\n last_updated_by_user=current_user,\r\n filename=form.upload.data.filename,\r\n skip_pmi=form.skip_pmi.data,\r\n )\r\n elif file_extension == '.xlsx':\r\n d = DemographicsRequestXlsx(\r\n owner=current_user,\r\n last_updated_by_user=current_user,\r\n filename=form.upload.data.filename,\r\n skip_pmi=form.skip_pmi.data,\r\n )\r\n elif file_extension == '.xls':\r\n d = DemographicsRequestExcel97(\r\n owner=current_user,\r\n last_updated_by_user=current_user,\r\n filename=form.upload.data.filename,\r\n skip_pmi=form.skip_pmi.data,\r\n )\r\n\r\n db.session.add(d)\r\n db.session.flush()\r\n\r\n os.makedirs(os.path.dirname(d.filepath), exist_ok=True)\r\n form.upload.data.save(d.filepath)\r\n\r\n try:\r\n for name in d.get_column_names():\r\n c = DemographicsRequestColumn(\r\n demographics_request = d,\r\n name=name,\r\n last_updated_by_user=current_user,\r\n )\r\n db.session.add(c)\r\n \r\n db.session.commit()\r\n\r\n except Exception as e:\r\n db.session.rollback()\r\n os.unlink(d.filepath)\r\n log_exception(e)\r\n flash('File contents are invalid', 'error')\r\n\r\n return redirect(url_for('ui.demographics'))\r\n\r\n return redirect(url_for('ui.demographics_define_columns', id=d.id))\r\n\r\n return render_template(\"ui/demographics/index.html\", form=form, demographics_requests=demographics_requests, search_form=search_form)\r\n\r\n\r\n@blueprint.route(\"/demographics/define_columns/\", methods=['GET', 'POST'])\r\n@must_be_request_owner()\r\ndef demographics_define_columns(id):\r\n dr = DemographicsRequest.query.get_or_404(id)\r\n\r\n if dr.deleted:\r\n flash('Cannot amend a request that is deleted.', 'error')\r\n return redirect(url_for('ui.demographics'))\r\n\r\n if dr.submitted:\r\n flash('Cannot amend a request that has been submitted.', 'error')\r\n return redirect(url_for('ui.demographics'))\r\n\r\n form = DemographicsDefineColumnsForm(obj=dr.column_definition)\r\n\r\n columns = sorted(dr.columns)\r\n\r\n for f in form:\r\n f.choices = [(0, '')] + [(c.id, c.name) for c in columns]\r\n\r\n if not form.uhl_system_number_column_id.data:\r\n form.uhl_system_number_column_id.data = dr.get_most_likely_uhl_system_number_column_id()\r\n if not form.nhs_number_column_id.data:\r\n form.nhs_number_column_id.data = dr.get_most_likely_nhs_number_column_id()\r\n if not form.family_name_column_id.data:\r\n form.family_name_column_id.data = dr.get_most_likely_family_name_column_id()\r\n if not form.given_name_column_id.data:\r\n form.given_name_column_id.data = dr.get_most_likely_given_name_column_id()\r\n if not form.gender_column_id.data:\r\n form.gender_column_id.data = dr.get_most_likely_gender_column_id()\r\n if not form.dob_column_id.data:\r\n form.dob_column_id.data = dr.get_most_likely_dob_column_id()\r\n if not form.postcode_column_id.data:\r\n form.postcode_column_id.data = dr.get_most_likely_postcode_column_id()\r\n\r\n if form.validate_on_submit():\r\n if dr.column_definition is None:\r\n dr.column_definition = DemographicsRequestColumnDefinition()\r\n\r\n dr.column_definition.last_updated_by_user = current_user\r\n\r\n dr.column_definition.uhl_system_number_column_id = form.uhl_system_number_column_id.data if form.uhl_system_number_column_id.data > 0 else None\r\n dr.column_definition.nhs_number_column_id = form.nhs_number_column_id.data if form.nhs_number_column_id.data > 0 else None\r\n dr.column_definition.family_name_column_id = form.family_name_column_id.data if form.family_name_column_id.data > 0 else None\r\n dr.column_definition.given_name_column_id = form.given_name_column_id.data if form.given_name_column_id.data > 0 else None\r\n dr.column_definition.gender_column_id = form.gender_column_id.data if form.gender_column_id.data > 0 else None\r\n dr.column_definition.dob_column_id = form.dob_column_id.data if form.dob_column_id.data > 0 else None\r\n dr.column_definition.postcode_column_id = form.postcode_column_id.data if form.postcode_column_id.data > 0 else None\r\n dr.column_definition.gender_female_value = form.gender_female_value.data\r\n dr.column_definition.gender_male_value = form.gender_male_value.data\r\n\r\n if not dr.column_definition.is_valid:\r\n flash(Markup('Column specification is invalid. Please read the help page for more details.'), 'error')\r\n db.session.rollback()\r\n else:\r\n db.session.add(dr)\r\n db.session.commit()\r\n\r\n return redirect(url_for('ui.demographics_submit', id=dr.id))\r\n\r\n return render_template(\"ui/demographics/define_columns.html\", form=form, demographics_request=dr)\r\n\r\n\r\n@blueprint.route(\"/demographics/submit/\", methods=['GET', 'POST'])\r\n@must_be_request_owner()\r\ndef demographics_submit(id):\r\n dr = DemographicsRequest.query.get_or_404(id)\r\n\r\n if dr.deleted:\r\n flash('Cannot submit a request that is deleted.', 'error')\r\n return redirect(url_for('ui.demographics'))\r\n\r\n if dr.submitted:\r\n flash('Cannot submit a request that has already been submitted.', 'error')\r\n return redirect(url_for('ui.demographics'))\r\n\r\n form = ConfirmForm(obj=dr)\r\n\r\n if form.validate_on_submit():\r\n dr.submitted_datetime = datetime.utcnow()\r\n\r\n db.session.add(dr)\r\n db.session.commit()\r\n\r\n schedule_lookup_tasks(dr.id)\r\n \r\n flash('Request submitted.')\r\n return redirect(url_for('ui.demographics'))\r\n\r\n return render_template(\"ui/demographics/submit.html\", form=form, demographics_request=dr)\r\n\r\n\r\n@blueprint.route(\"/demographics/resubmit/\")\r\n@must_be_admin()\r\ndef demographics_resubmit(id):\r\n dr = DemographicsRequest.query.get_or_404(id)\r\n dr.paused_datetime = None\r\n\r\n db.session.add(dr)\r\n db.session.commit()\r\n\r\n schedule_lookup_tasks(id)\r\n \r\n flash('Request resubmitted.')\r\n return redirect(url_for('ui.demographics'))\r\n\r\n\r\n@blueprint.route(\"/demographics/clear_error/\")\r\n@must_be_admin()\r\ndef demographics_clear_error(id):\r\n dr = DemographicsRequest.query.get_or_404(id)\r\n dr.error_datetime = None\r\n dr.error_message = None\r\n\r\n db.session.add(dr)\r\n db.session.commit()\r\n\r\n schedule_lookup_tasks(id)\r\n \r\n flash('Error cleared and Request resubmitted.')\r\n return redirect(url_for('ui.demographics'))\r\n\r\n\r\n@blueprint.route(\"/demographics/pause/\")\r\n@must_be_admin()\r\ndef demographics_pause(id):\r\n\r\n dr = DemographicsRequest.query.get_or_404(id)\r\n\r\n if dr.deleted:\r\n flash('Request already deleted.', 'error')\r\n elif dr.result_created:\r\n flash('Request result already created.', 'error')\r\n else:\r\n dr.paused_datetime = datetime.utcnow()\r\n\r\n db.session.add(dr)\r\n db.session.commit()\r\n\r\n flash('Request paused.')\r\n\r\n return redirect(url_for('ui.demographics'))\r\n\r\n\r\n@blueprint.route(\"/demographics/delete/\", methods=['GET', 'POST'])\r\n@must_be_request_owner()\r\ndef demographics_delete(id):\r\n dr = DemographicsRequest.query.get_or_404(id)\r\n\r\n if dr.deleted:\r\n flash('Request already deleted.', 'error')\r\n return redirect(url_for('ui.demographics'))\r\n\r\n form = ConfirmForm(obj=dr)\r\n\r\n if form.validate_on_submit():\r\n dr.deleted_datetime = datetime.utcnow()\r\n\r\n db.session.add(dr)\r\n db.session.commit()\r\n\r\n flash('Request deleted.')\r\n return redirect(url_for('ui.demographics'))\r\n\r\n return render_template(\"ui/demographics/delete.html\", form=form, demographics_request=dr)\r\n\r\n\r\n@blueprint.route(\"/demographics/download_result/\")\r\n@must_be_request_owner()\r\ndef demographics_download_result(id):\r\n dr = DemographicsRequest.query.get_or_404(id)\r\n\r\n if not dr.result_created:\r\n abort(404)\r\n\r\n dr.result_downloaded_datetime = datetime.utcnow()\r\n db.session.add(dr)\r\n db.session.commit()\r\n\r\n return send_file(\r\n dr.result_filepath,\r\n as_attachment=True,\r\n download_name=dr.result_filename,\r\n )\r\n\r\n\r\n@blueprint.route(\"/demographics/download_request/\")\r\n@must_be_request_owner()\r\ndef demographics_download_request(id):\r\n dr = DemographicsRequest.query.get_or_404(id)\r\n\r\n dr.result_downloaded_datetime = datetime.utcnow()\r\n db.session.add(dr)\r\n db.session.commit()\r\n\r\n return send_file(\r\n dr.filepath,\r\n as_attachment=True,\r\n download_name=dr.filename,\r\n )\r\n","sub_path":"identity/ui/views/demographics.py","file_name":"demographics.py","file_ext":"py","file_size_in_byte":12050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"437968431","text":"def rotate(lst, high, length):\n res = [[0] * high for i in range(length)]\n for i in range(high):\n for j in range(length):\n res[j][-i - 1] = str(lst[i][j])\n return res\n\n\nrotate_in = open('rotate.in', 'r')\nrotate_out = open('rotate.out', 'w')\n\nlength, high = map(int, rotate_in.readline().split())\nlst = [[]] * high\nfor i in range(high):\n lst[i] = list(map(int, rotate_in.readline().split()))\nans = rotate(lst, high, length)\nfor i in ans:\n print(' '.join(i), file=rotate_out)\nrotate_in.close()\nrotate_out.close()\n","sub_path":"lKSH/day05/rotate.py","file_name":"rotate.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"254844930","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\n#시작점으로 되돌아 오는 코드를 아래와 같이 작성하였다.\n\nimport rospy\nfrom std_msgs.msg import Empty\nfrom geometry_msgs.msg import Twist\nfrom bb2_pkg.bebop_move_by_gps_module_5 import MoveByGPS\n\nif __name__ == '__main__':\n rospy.init_node('FlyBack', anonymous = False)\n pb1 = rospy.Publisher('/bebop/land', Empty, queue_size = 0)\n tw = Twist()\n em = Empty()\n mbg = MoveByGPS()\n\n #일단, 구동하자마자 현 시작점의 위도와 경도값을 입력 받는다. 단, 구동을 하고 있으되, 파라미터가 1일 경우에만 되돌아오는 비행 코드가 동작하도록 하였다.\n target_lad1 = mbg.lati_now\n target_lod1 = mbg.long_now\n\n while not rospy.is_shutdown():\n #파라미터가 참일 경우에 되돌아 온다.\n if rospy.get_param(\"/FlyBackl/param_of_back\") == \"1\":\n rospy.sleep(1)\n #시작점으로 되돌아 가는 비행을 한다.\n mbg.fly_to_target(target_lad1, target_lod1)\n #착륙한다.\n pb1.publish(em)\n #코드를 종료한다.\n exit()\n","sub_path":"bb2_pkg/scripts/src/17-2_FlyBack.py","file_name":"17-2_FlyBack.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"74893977","text":"from flask import request\nfrom flask_restful import Resource\nfrom http import HTTPStatus\n\nfrom models.recipe import Recipe\nfrom extensions import db\nfrom flask_jwt_extended import jwt_optional, get_jwt_identity, jwt_required\n\n\nclass RecipeListResource(Resource):\n def get(self):\n data = []\n recipes = Recipe.query.filter_by(is_publish=True).all()\n for i in recipes:\n data.append(i.data)\n return {\"data\": data}, HTTPStatus.OK\n\n @jwt_required\n def post(self):\n data = request.get_json()\n\n current_user = get_jwt_identity()\n\n recipe = Recipe(name=data[\"name\"],\n description=data[\"description\"],\n num_of_servings=data[\"num_of_servings\"],\n cook_time=data[\"cook_time\"],\n directions=data[\"directions\"],\n user_id=current_user\n )\n\n recipe.save()\n\n return recipe.data, HTTPStatus.CREATED\n\n\nclass RecipeResource(Resource):\n\n @jwt_optional\n def get(self, recipe_id):\n recipe = Recipe.get_by_id(recipe_id)\n\n current_user = get_jwt_identity()\n if recipe:\n if current_user != recipe.user_id:\n return {\"message\": \"Access is not allowed\"}, HTTPStatus.FORBIDDEN\n return recipe.data, HTTPStatus.OK\n else:\n return {\"message\": \"recipe not found\"}, HTTPStatus.NOT_FOUND\n\n\n @jwt_required\n def put(self, recipe_id):\n recipe = Recipe.get_by_id(recipe_id)\n\n if recipe:\n\n current_user = get_jwt_identity()\n\n if current_user != recipe.user_id:\n return {\"message\": \"Access is not allowed\"}, HTTPStatus.FORBIDDEN\n\n data = request.get_json()\n\n recipe.name = data[\"name\"]\n recipe.description = data[\"description\"]\n recipe.num_of_servings = data[\"num_of_servings\"]\n recipe.cook_time = data[\"cook_time\"]\n recipe.directions = data[\"directions\"]\n\n recipe.save()\n\n return recipe.data, HTTPStatus.OK\n return {\"message\": \"recipe not found\"}, HTTPStatus.NOT_FOUND\n\n @jwt_required\n def delete(self, recipe_id):\n recipe = Recipe.get_by_id(recipe_id)\n if recipe:\n current_user = get_jwt_identity()\n if current_user != recipe.user_id:\n return {\"message\": \"Access is not allowed\"}, HTTPStatus.FORBIDDEN\n\n recipe.delete()\n return {}, HTTPStatus.OK\n\n return {\"message\": \"recipe not found\"}, HTTPStatus.NOT_FOUND\n\n\n\"\"\"\nclass RecipePublishResource(Resource):\n \n def put(self, recipe_id):\n for recipe in recipe_list:\n if recipe.id == recipe_id:\n recipe.is_publish = True\n return {}, HTTPStatus.OK\n return {\"message\": \"recipe not found\"}, HTTPStatus.NOT_FOUND\n\n \n def delete(self, recipe_id):\n for recipe in recipe_list:\n if recipe.id == recipe_id:\n recipe.is_publish = False\n return {}, HTTPStatus.OK\n return {\"message\": \"recipe not found\"}, HTTPStatus.NOT_FOUND\n\"\"\"","sub_path":"resources/recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"495595028","text":"import xml.etree.cElementTree as ET \nimport pickle\nimport os\nimport re\n\n\n#===================-- SET DIRECTORY NAME --=================================\nfolder_to_data_folder_path='Data'\n#============================================================================\n\n\ndef userid_questions_answers(data_folder_path,filename):\n\t\n\n\t# Each dictionary contains key as User ID and key as respective numbers of Questions / Answers Asked/Given\n\tquestions_asked = {}\n\tanswers_given = {}\n\t# Calculated as per subtraction of Upvotes-Downvotes \n\taccount_votes={}\n\t# Contains User id and list of all its UNIQUE interest of Contribution through TAG ID's\n\tinterest_area={}\n\n\tcontext = ET.iterparse(data_folder_path+'/Posts.xml')\n\tfor event, child in context: #child will contain each row\n\t\tif child.attrib == {}: # This is to tackle with the empty child that appears at last\n\t\t\tcontinue\n\t\telse:\n\t\t\tuser_id=0\n\t\t\tif child.attrib.has_key('OwnerUserId'):\n\t\t\t\tuser_id = int(child.attrib['OwnerUserId'])\n\t\t\tif child.attrib.has_key('PostTypeId'):\n\t\t\t\ttype = child.attrib['PostTypeId']\n\t\t\t\tif type == '1':\n\t\t\t\t\tif questions_asked.has_key(user_id):\n\t\t\t\t\t\tquestions_asked[user_id] += 1 # Adding Questions Added for the Website\n\t\t\t\t\telse:\n\t\t\t\t\t\tquestions_asked[user_id] = 1\n\t\t\t\telif type == '2':\n\t\t\t\t\tif answers_given.has_key(user_id): # Adding Answers Added for the Website\n\t\t\t\t\t\tanswers_given[user_id] += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tanswers_given[user_id] = 1\n\t\t\t\t\t\n\t\tchild.clear()\n\n\n\n\t# Calculating User Id Contributed VOTES : UPVOTES + DOWNVOTS\n\tcontext3 = ET.iterparse(data_folder_path+'/Users.xml')\n\n\tfor event, child in context3: #child will contain each row\n\t\tif child.attrib == {}: # This is to tackle with the empty child that appears at last\n\t\t\tcontinue\n\t\telse:\n\t\t\tif child.attrib.has_key('Id'):\n\t\t\t\n\t\t\t\tacc_id = int(child.attrib['Id'])\n\t\t\t\taccount_votes[acc_id] = int(child.attrib['UpVotes'])+int(child.attrib['DownVotes'])\n\t\tchild.clear()\t\n\n\t\n\t# STORED AS userid: [Ques asked, ans posted, votes added ]\n\tuser_con={}\n\tfor key,value in answers_given.items():\n\t\tif user_con.has_key(key):\n\t\t\tuser_con[key]=[user_con[key][0],value,user_con[key][2]]\n\t\telse:\n\t\t\tuser_con[key]=[0,value,0] # In case not found making a new Entry With other two as 0\n\n\tfor key,value in questions_asked.items():\n\t\tif user_con.has_key(key):\n\t\t\tuser_con[key]=[value,user_con[key][1],user_con[key][2]]\n\t\telse:\n\t\t\tuser_con[key]=[value,0,0]\t\n\t\n\tfor key,value in account_votes.items():\n\t\tif user_con.has_key(key):\n\t\t\tuser_con[key]=[user_con[key][0],user_con[key][1],value]\n\t\t\t\n\t\telse:\n\t\t\tuser_con[key]=[0,0,value]\n\n\n\t# Writing Account Summary ie UserId : Questions , Answers , Vote in FIle \n\n\tfilename1='Results/'+filename+'_Result_raw.txt'\t\t\n\tf = open(filename1, 'w')\n\tfor key,value in user_con.items():\n\t\tf.write(\"User_Id:\"+str(key)+'\\t Questions Asked:'+str(value[0])+'\\t Answers Posted:'+str(value[1])+'\\t \tVotes:'+str(value[2])+'\\n')\n\tf.close()\n\n\t# Writing Account Summary ie UserId : Questions , Answers , Vote in Pickle\n\n\tfilename2='Results/'+filename+'_Result_pkl.pkl'\n\twith open(filename2,'wb') as f:\n\t\tpickle.dump(user_con,f)\n\t\n\t\n\t\n\t\n\t# COMPUTING PERCENATGE OF UNI , BI , TRI Speciality in a Website\n\tspeciality={}\n\tfor key,value in user_con.items():\n\t\tspeciality[key]=0\n\t\tfor j in value:\n\t\t\tif j>0:\n\t\t\t\tspeciality[key]+=1\n\tuni=0.0\n\tbi=0.0\n\ttri=0.0\n\t\n\tcontributors=0.0\t\t\n\tfor key,value in speciality.items():\n\t\tif value!=0:\n\t\t\tcontributors+=1\n\t\tif value==1:\n\t\t\tuni+=1\n\t\tif value==2:\n\t\t\tbi+=1\n\t\tif value==3:\n\t\t\ttri+=1\t\t\n\tuni=(uni/contributors)*100\n\ttri=(tri/contributors)*100\n\tbi=(bi/contributors)*100\n\t\n\tspec=[uni,tri,bi]\n\n\t# Writing Website Summary ie UNI , BI , TRI in FIle \n\t\n\tfilename1='Results/'+filename+'_Specialist_raw.txt'\n\tf = open(filename1, 'w')\n\tf.write(\"UNI:\"+str(uni)+' %\\t BI:'+str(bi)+' %\\t TRI:'+str(tri)+' %\\n') \n\tf.close()\n\t\n\t# Writing Website Summary ie UNI , BI , TRI in PICKLE \n\n\tfilename2='Results/'+filename+'_Specialist_pkl.pkl'\n\twith open(filename2,'wb') as f:\n\t\tpickle.dump(spec,f)\n\t\n\ndef main():\n\tpath=os.getcwd()+'/'+folder_to_data_folder_path\n\tarr = os.listdir(path)\n\tif((os.path.isdir(os.getcwd()+\"/Results\"))==False):\n\t\tos.mkdir(\"Results\")\n\tfor files in arr:\n\t\tuserid_questions_answers(path+'/'+files,files)\nmain()\n","sub_path":"Generate.py","file_name":"Generate.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"610673080","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2016-present, CloudZero, Inc. All rights reserved.\n# Licensed under the BSD-style license. See LICENSE file in the project root for full license information.\n\nimport functools\nimport re\nimport os\n\nimport simplejson as json\n\nimport pyfaaster.aws.configuration as conf\nfrom pyfaaster.aws.exceptions import HTTPResponseException\nimport pyfaaster.aws.publish as publish\nimport pyfaaster.aws.tools as tools\nimport pyfaaster.common.utils as utils\n\n\nlogger = tools.setup_logging('pyfaaster')\n\n\ndef environ_aware(required=None, optional=None, **kwargs):\n \"\"\" Decorator that will add each environment variable in reqs and opts\n to the handler kwargs. The variables in reqs will be checked for existence\n and return immediately if the environmental variable is missing.\n\n Args:\n required (iterable): required environment vars\n optional (iterable): optional environment vars\n\n Returns:\n handler (func): a lambda handler function that is environ aware\n \"\"\"\n def environ_handler(handler):\n def handler_wrapper(event, context, **kwargs):\n for r in required if required else {}:\n value = os.environ.get(r)\n if not value:\n logger.error(f'{r} environment variable missing.')\n return {'statusCode': 500, 'body': f'Invalid {r}.'}\n kwargs[r] = value\n\n for o in optional if optional else {}:\n kwargs[o] = os.environ.get(o)\n\n return handler(event, context, **kwargs)\n\n return handler_wrapper\n\n return environ_handler\n\n\nnamespace_aware = environ_aware(['NAMESPACE'], [])\n\n\ndef domain_aware(handler):\n \"\"\" Decorator that will check and add event.requestContext.authorizer.domain to the event kwargs.\n\n Args:\n handler (func): a handler function with the signature (event, context) -> result\n\n Returns:\n handler (func): a lambda handler function that is domain aware\n \"\"\"\n def handler_wrapper(event, context, **kwargs):\n domain = utils.deep_get(event, 'requestContext', 'authorizer', 'domain')\n if not domain:\n logger.error('Domain requestContext variable missing.')\n return {'statusCode': 500, 'body': 'Invalid domain.'}\n\n kwargs['domain'] = domain\n return handler(event, context, **kwargs)\n\n return handler_wrapper\n\n\ndef allow_origin_response(*origins):\n \"\"\" Decorator that will check that the event.headers.origin is in origins; if the origin\n is valid, this decorator will add it to the response headers.\n\n Args:\n handler (func): a handler function with the signature (event, context) -> result\n\n Returns:\n handler (func): a lambda handler function that is authorized\n \"\"\"\n def allow_origin_handler(handler):\n def handler_wrapper(event, context, **kwargs):\n logger.debug(f'Checking origin for event: {event}')\n\n # Check Origin\n request_origin = utils.deep_get(event, 'headers', 'origin', ignore_case=True)\n if not any(re.match(o, str(request_origin)) for o in origins):\n logger.warning(f'Invalid request origin: {request_origin}')\n return {'statusCode': 403, 'body': 'Unknown origin.'}\n\n # call handler\n kwargs['request_origin'] = request_origin\n response = handler(event, context, **kwargs)\n\n if not isinstance(response, dict):\n raise Exception(\n f'Unsupported response type {type(response)}; response must be dict for *_response decorators.')\n\n # add origin to response headers\n current_headers = response.get('headers', {})\n cors_headers = {'Access-Control-Allow-Origin': request_origin,\n 'Access-Control-Allow-Credentials': 'true'}\n response['headers'] = {**current_headers, **cors_headers}\n return response\n\n return handler_wrapper\n\n return allow_origin_handler\n\n\ndef parameters(required_querystring=None, optional_querystring=None, path=None, error=None):\n \"\"\" Decorator that will check and add queryStringParameters\n and pathParameters to the event kwargs.\n\n Args:\n required_querystring (iterable): Required queryStringParameters\n optional_querystring (iterable): Optional queryStringParameters\n path (iterable): pathParameters (these are always required)\n\n Returns:\n handler (func): a lambda handler function that is namespace aware\n \"\"\"\n def parameters_handler(handler):\n def handler_wrapper(event, context, **kwargs):\n for param in required_querystring if required_querystring else {}:\n value = utils.deep_get(event, 'queryStringParameters', param)\n if not value:\n logger.error(f'queryStringParameter [{param}] missing from event [{event}].')\n return {'statusCode': 400, 'body': error or f'Invalid {param}.'}\n kwargs[param] = value\n for param in optional_querystring if optional_querystring else {}:\n value = utils.deep_get(event, 'queryStringParameters', param)\n if value:\n kwargs[param] = value\n for param in path if path else {}:\n value = utils.deep_get(event, 'pathParameters', param)\n if not value:\n logger.error(f'pathParameter [{param}] missing from event [{event}].')\n return {'statusCode': 400, 'body': error or f'Invalid {param}.'}\n kwargs[param] = value\n return handler(event, context, **kwargs)\n\n return handler_wrapper\n\n return parameters_handler\n\n\ndef body(required=None, optional=None, error=None):\n \"\"\" Decorator that will check that event.get('body') has keys, then add a map of selected keys\n to kwargs.\n\n Args:\n required (iterable): Required body keys\n optional (iterable): Optional body keys\n\n Returns:\n handler (func): a lambda handler function that is namespace aware\n \"\"\"\n def body_handler(handler):\n def handler_wrapper(event, context, **kwargs):\n try:\n event_body = json.loads(event.get('body'))\n except json.JSONDecodeError:\n return {'statusCode': 400, 'body': error or 'Invalid event.body: cannot decode json.'}\n\n body_required = {k: event_body.get(k) for k in (required if required else {})}\n if not all((v is not None for v in body_required.values())):\n logger.error(f'There is a required key in [{required}] missing from event.body [{event_body}].')\n return {'statusCode': 400, 'body': error or 'Invalid event.body: missing required key.'}\n\n body_optional = {k: event_body.get(k) for k in (optional if optional else {})}\n\n handler_body = {}\n handler_body.update(**body_required, **body_optional)\n kwargs['body'] = handler_body\n\n return handler(event, context, **kwargs)\n\n return handler_wrapper\n\n return body_handler\n\n\ndef scopes(*scope_list):\n \"\"\" Decorator that will check that event.requestContext.authorizer.scopes has the given\n scopes. This decorator assumes that you have an upstream authorizer putting the scopes from the\n access_token into the event.requestContext.authorizer.scopes. This is a reasonable assumption\n if you are using a custom authorizer, which we are!\n\n Args:\n scope_list (List): List of required access_token scopes. Each item must be castable to string.\n\n Returns:\n handler (func): a lambda handler function that is namespace aware\n \"\"\"\n try:\n string_scope_list = [str(s) for s in scope_list]\n except Exception as err:\n logger.exception(err)\n raise TypeError('All scopes must be castable to string.')\n\n def scopes_handler(handler):\n def handler_wrapper(event, context, **kwargs):\n token_scopes = utils.deep_get(event, 'requestContext', 'authorizer', 'scopes')\n\n if not token_scopes:\n return {'statusCode': 500, 'body': 'Invalid token scopes: missing!'}\n\n if not all((s in token_scopes for s in string_scope_list)):\n logger.warning(f'There is a required scope [{scope_list}] missing from token scopes [{token_scopes}].')\n return {'statusCode': 403, 'body': 'access_token has insufficient access.'}\n\n return handler(event, context, **kwargs)\n\n return handler_wrapper\n\n return scopes_handler\n\n\ndef sub_aware(handler):\n \"\"\" Decorator that will check and add event.requestContext.authorizer.sub to the event kwargs.\n\n Args:\n handler (func): a handler function with the signature (event, context) -> result\n\n Returns:\n handler (func): a lambda handler function that is sub aware\n \"\"\"\n def handler_wrapper(event, context, **kwargs):\n sub = utils.deep_get(event, 'requestContext', 'authorizer', 'sub')\n if not sub:\n logger.error('Sub requestContext variable missing.')\n return {'statusCode': 500, 'body': 'Invalid sub.'}\n\n kwargs['sub'] = sub\n return handler(event, context, **kwargs)\n\n return handler_wrapper\n\n\ndef http_response(default_error_message=None):\n \"\"\" Decorator that will wrap handler response in an API Gateway compatible dict with\n statusCode and json serialized body. If handler result has a 'body', this decorator\n will serialize it into the API Gateway body; if the handler result does _not_ have a\n body, this decorator will return statusCode 200 and serialize the entire result.\n\n Args:\n handler (func): a handler function with the signature (event, context) -> result\n\n Returns:\n handler (func): a lambda handler function that whose result is HTTPGateway compatible.\n \"\"\"\n def http_response_handler(handler):\n def handler_wrapper(event, context, **kwargs):\n try:\n res = handler(event, context, **kwargs)\n if not isinstance(res, dict):\n raise Exception(f'Unsupported return type {type(res)}; response must be dict.')\n return {\n 'headers': res.get('headers', {}),\n 'statusCode': res.get('statusCode', 200),\n 'body': json.dumps(res['body'], iterable_as_array=True) if 'body' in res else None,\n }\n except HTTPResponseException as err:\n return {\n 'statusCode': err.statusCode,\n 'body': json.dumps(err.body, iterable_as_array=True),\n }\n except Exception as err:\n # Try and handle HTTPResponseException like objects\n if hasattr(err, 'statusCode') and hasattr(err, 'body'):\n return {\n 'statusCode': err.statusCode,\n 'body': json.dumps(err.body, iterable_as_array=True),\n }\n else:\n logger.exception(err)\n lambda_function_name = context.function_name.split('.')[-1].replace('_', ' ')\n return {\n 'statusCode': 500,\n 'body': default_error_message or f'Failed to {lambda_function_name}.',\n }\n\n return handler_wrapper\n return http_response_handler\n\n\ndef pausable(handler):\n \"\"\" Decorator that will \"pause', i.e. short circuit and return immediately before calling\n the decorated handler, if the PAUSE environment variable is set.\n\n Args:\n handler (func): a handler function with the signature (event, context) -> result\n\n Returns:\n handler (func): a pausable lambda handler\n \"\"\"\n @environ_aware([], ['PAUSE'])\n def handler_wrapper(event, context, **kwargs):\n if kwargs.get('PAUSE'):\n logger.warning('Function paused')\n return {'statusCode': 503, 'body': 'info: paused'}\n return handler(event, context, **kwargs)\n return handler_wrapper\n\n\ndef pingable(handler):\n \"\"\" Decorator that will short circuit and return immediately before calling\n the decorated handler if the event is a \"ping\" event.\n\n Args:\n handler (func): a handler function with the signature (event, context) -> result\n\n Returns:\n handler (func): a pingable lambda handler\n \"\"\"\n def handler_wrapper(event, context, **kwargs):\n if event.get('detail-type') == 'Scheduled Event' and event.get('source') == 'aws.events':\n logger.debug('Ping received, keeping function alive')\n return 'info: ping'\n return handler(event, context, **kwargs)\n\n return handler_wrapper\n\n\ndef publisher(handler):\n \"\"\" Decorator that will publish messages to SNS Topics. This decorator looks for a 'messages'\n key in the result of the wrapper decorator. It expects result['messages'] to be a dict where\n key is Topic Name or ARN and value is the message to be sent. It will publish each message to\n its respective Topic.\n\n For example:\n\n response['messages'] = {\n 'topic-1': 'string message',\n 'topic-2': {'dictionary': 'message'},\n }\n\n Args:\n handler (func): lambda handler whose result will be checked for messages to publish\n\n Returns:\n handler (func): a publishing lambda handler\n \"\"\"\n\n @account_id_aware\n @namespace_aware\n @region_aware\n def handler_wrapper(event, context, **kwargs):\n result = handler(event, context, **kwargs)\n conn = publish.conn(kwargs['region'], kwargs['account_id'], kwargs['NAMESPACE'])\n publish.publish(conn, result.get('messages', {}))\n return result\n\n return handler_wrapper\n\n\ndef subscriber(required_topics=None):\n \"\"\" Decorator that will grab messages from sns location in event body.\n\n Args:\n required_topics (iterable): Handler must be triggered by one of these Topics\n\n Returns:\n handler (func): a lambda handler function that is namespace aware\n \"\"\"\n def subscriber_handler(handler):\n def handler_wrapper(event, context, **kwargs):\n try:\n sns = event['Records'][0]['Sns']\n except Exception:\n raise Exception('Unsupported event format.')\n if required_topics and not any((topic_name in sns['TopicArn'] for topic_name in required_topics)):\n raise Exception('Message received not from expected topic.')\n try:\n message_body = json.loads(sns.get('Message'))\n except Exception as err:\n raise Exception(f'Could not decode message. ({err})')\n\n kwargs['message'] = message_body\n\n return handler(event, context, **kwargs)\n\n return handler_wrapper\n\n return subscriber_handler\n\n\ndef configuration_aware(config_file, create=False):\n \"\"\" Decorator that expects a configuration file in an S3 Bucket specified by the 'CONFIG'\n environment variable and S3 Bucket Key (path) specified by config_file. If create=True, this\n decorator will create an empty configuration file instead of erring.\n\n NOTE: Decorating a lambda with this incurs a performance penalty - S3 is checked on every call.\n This makes sense when writing a lambda function that updates config and is called infrequently,\n but makes far less sense if all one needs to do is read config data.\n TODO: We need a more clear, refined pattern for dealing with uncached writes and cached reads.\n The way the current config features are written, it's not clear how best to use them.\n\n Args:\n config_file (str): key in the 'CONFIG' S3 bucket of expected configuration file\n create (Bool): optionally create configuration file if absent\n\n Returns:\n handler (func): a configuration aware lambda handler\n \"\"\"\n def configuration_handler(handler):\n def handler_wrapper(event, context, **kwargs):\n config_bucket = os.environ['CONFIG']\n encrypt_key_arn = os.environ.get('ENCRYPT_KEY_ARN')\n\n conn = conf.conn(encrypt_key_arn)\n try:\n settings = conf.load_or_create(conn, config_bucket, config_file) if create else conf.load(\n conn, config_bucket, config_file)\n except Exception as err:\n logger.exception(err)\n logger.error('Failed to load or create configuration.')\n return {'statusCode': 503, 'body': 'Failed to load configuration.'}\n\n configuration = {\n 'load': lambda: settings or {},\n 'save': functools.partial(conf.save, conn, config_bucket, config_file),\n }\n return handler(event, context, configuration=configuration, **kwargs)\n\n return handler_wrapper\n\n return configuration_handler\n\n\ndef client_config_aware(handler):\n \"\"\" Decorator that will find the Source IP and Client in the event headers.\n\n Args:\n handler (func): a handler function with the signature (event, context) -> result\n\n Returns:\n handler (func): a client config aware lambda handler\n \"\"\"\n def handler_wrapper(event, context, **kwargs):\n client_details = tools.get_client_details(event)\n logger.info(f\"{handler.__name__} | {client_details}\")\n logger.debug(f'aws_lambda_wrapper| {event}')\n kwargs['client_details'] = client_details\n return handler(event, context, **kwargs)\n return handler_wrapper\n\n\ndef region_aware(handler):\n \"\"\" Decorator that will find the Account Region in the lambda context.\n\n Args:\n handler (func): a handler function with the signature (event, context) -> result\n\n Returns:\n handler (func): a region aware lambda handler\n \"\"\"\n def handler_wrapper(event, context, **kwargs):\n region = tools.get_region(context)\n kwargs['region'] = region\n return handler(event, context, **kwargs)\n return handler_wrapper\n\n\ndef account_id_aware(handler):\n \"\"\" Decorator that will find the Account ID in the lambda context.\n\n Args:\n handler (func): a handler function with the signature (event, context) -> result\n\n Returns:\n handler (func): a context aware lambda handler\n \"\"\"\n def handler_wrapper(event, context, **kwargs):\n account_id = tools.get_account_id(context)\n kwargs['account_id'] = account_id\n return handler(event, context, **kwargs)\n return handler_wrapper\n\n\ndef catch_exceptions(handler):\n \"\"\" Decorator that will catch all exceptions. Normally bad practice in pure Python programming, but when running\n Python in AWS Lambda, by preventing a Python Lambda from throwing an exception you can prevent a cold start\n the next time the Lambda function is called\n\n Args:\n handler (func): an AWS Lambda handler function\n\n Returns:\n handler (func): an AWS Lambda handler that will catch any exception that occurs in the decorated function\n \"\"\"\n def handler_wrapper(*args, **kwargs):\n try:\n return handler(*args, **kwargs)\n except Exception as e:\n logger.exception(f'Exception caught by catch_exceptions decorator: {e}')\n return handler_wrapper\n\n\ndef default(default_error_message=None):\n \"\"\"\n AWS lambda handler handler. A wrapper with standard boilerplate implementing the\n best practices we've developed\n\n Returns:\n The wrapped lambda function or JSON response function when an error occurs. When called,\n this wrapped function will return the appropriate output\n \"\"\"\n\n def default_handler(handler):\n\n @http_response(default_error_message)\n @account_id_aware\n @client_config_aware\n @configuration_aware('configuration.json', create=True)\n @environ_aware(['NAMESPACE', 'CONFIG'], ['ENCRYPT_KEY_ARN'])\n @pingable\n def handler_wrapper(event, context, **kwargs):\n try:\n return handler(event, context, **kwargs)\n except Exception as err:\n logger.error('Lambda Event : {}'.format(event))\n logger.exception('{}:{}'.format(type(err), err))\n return {'statusCode': 500, 'body': f'Could not complete {handler.__name__}'}\n\n return handler_wrapper\n\n return default_handler\n","sub_path":"pyfaaster/aws/handlers_decorators.py","file_name":"handlers_decorators.py","file_ext":"py","file_size_in_byte":20481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"61823897","text":"import unittest\nimport Model\nimport move_source\nfrom move import Move\n\n\nclass DummySource(move_source.MoveSource):\n\n def __init__(self, name):\n self.name = name\n\n def play(self, hand, current_card) -> [Move]:\n move = hand[0]\n return [Move(Move.Type.SINGLE, [move])]\n\n\nclass TestGame(unittest.TestCase):\n\n def test_simple(self):\n dummy1 = DummySource(\"Player 1\")\n player1 = Model.Player(dummy1)\n player1.hand = [Model.Card(\"Hearts\", \"4\"), Model.Card(\"Diamonds\", \"5\")]\n dummy2 = DummySource(\"Player 2\")\n player2 = Model.Player(dummy2)\n player2.hand = [Model.Card(\"Spades\", \"7\")]\n players = [player1, player2]\n results = Model.Game.run_game(players)\n self.assertListEqual([player2, player1], results[0], \"Player 2 should win and player 1 should come second\")\n self.assertEqual(3, results[1], \"The game should have three moves because there are 3 cards\")\n\n def test_player_hand(self):\n dummy = DummySource(\"Dummy\")\n player = Model.Player(dummy)\n player.hand = [Model.Card(\"Hearts\", \"4\"), Model.Card(\"Diamonds\", \"5\")]\n player.play(None)\n self.assertEqual(2, len(player.hand), \"Player should not modify its own hand\")\n\n def test_illegal_moves(self):\n dummy1 = DummySource(\"Dummy1\")\n player1 = Model.Player(dummy1)\n player1.hand = [Model.Card(\"Hearts\", \"4\"), Model.Card(\"Diamonds\", \"5\")]\n dummy2 = DummySource(\"Dummy2\")\n player2 = Model.Player(dummy2)\n player2.hand = [Model.Card(\"Hearts\", \"J\"), Model.Card(\"Hearts\", \"Q\")]\n results = Model.Game.run_game([player1, player2], current_card=Model.Card(\"Hearts\", \"10\"))\n self.assertEqual(4, results[2][dummy1.name], \"Player 1 should have made four illegal moves, two bad cards\"\n \", and two move sets with no valid moves\")\n self.assertEqual(0, results[2][dummy2.name], \"Player 2 should have no illegal moves\")\n\n\nif __name__ == \"main\":\n unittest.main()\n","sub_path":"model_tests.py","file_name":"model_tests.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"569722820","text":"import importlib\nimport warnings\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils import six\nfrom django.utils.encoding import force_text, python_2_unicode_compatible\nfrom django.utils.six.moves.urllib.parse import parse_qs, urlparse\n\nfrom redis._compat import unicode\nfrom redis.connection import SSLConnection\n\n\ndef get_servers(location):\n \"\"\"Returns a list of servers given the server argument passed in from\n Django.\n \"\"\"\n if isinstance(location, six.string_types):\n servers = location.split(',')\n elif hasattr(location, '__iter__'):\n servers = location\n else:\n raise ImproperlyConfigured(\n '\"server\" must be an iterable or string'\n )\n return servers\n\n\ndef import_class(path):\n module_name, class_name = path.rsplit('.', 1)\n try:\n module = importlib.import_module(module_name)\n except ImportError:\n raise ImproperlyConfigured('Could not find module \"%s\"' % module_name)\n else:\n try:\n return getattr(module, class_name)\n except AttributeError:\n raise ImproperlyConfigured('Cannot import \"%s\"' % class_name)\n\n\ndef parse_connection_kwargs(server, db=None, **kwargs):\n \"\"\"\n Return a connection pool configured from the given URL.\n\n For example::\n\n redis://[:password]@localhost:6379/0\n rediss://[:password]@localhost:6379/0\n unix://[:password]@/path/to/socket.sock?db=0\n\n Three URL schemes are supported:\n redis:// creates a normal TCP socket connection\n rediss:// creates a SSL wrapped TCP socket connection\n unix:// creates a Unix Domain Socket connection\n\n There are several ways to specify a database number. The parse function\n will return the first specified option:\n 1. A ``db`` querystring option, e.g. redis://localhost?db=0\n 2. If using the redis:// scheme, the path argument of the url, e.g.\n redis://localhost/0\n 3. The ``db`` argument to this function.\n\n If none of these options are specified, db=0 is used.\n\n Any additional querystring arguments and keyword arguments will be\n passed along to the ConnectionPool class's initializer. In the case\n of conflicting arguments, querystring arguments always win.\n\n NOTE: taken from `redis.ConnectionPool.from_url` in redis-py\n \"\"\"\n kwargs['unix_socket_path'] = ''\n if '://' in server:\n url = server\n url_string = url\n url = urlparse(url)\n qs = ''\n\n # in python2.6, custom URL schemes don't recognize querystring values\n # they're left as part of the url.path.\n if '?' in url.path and not url.query:\n # chop the querystring including the ? off the end of the url\n # and reparse it.\n qs = url.path.split('?', 1)[1]\n url = urlparse(url_string[:-(len(qs) + 1)])\n else:\n qs = url.query\n\n url_options = {}\n\n for name, value in parse_qs(qs).items():\n if value and len(value) > 0:\n url_options[name] = value[0]\n\n # We only support redis:// and unix:// schemes.\n if url.scheme == 'unix':\n url_options.update({\n 'password': url.password,\n 'unix_socket_path': url.path,\n })\n\n else:\n url_options.update({\n 'host': url.hostname,\n 'port': int(url.port or 6379),\n 'password': url.password,\n })\n\n # If there's a path argument, use it as the db argument if a\n # querystring value wasn't specified\n if 'db' not in url_options and url.path:\n try:\n url_options['db'] = int(url.path.replace('/', ''))\n except (AttributeError, ValueError):\n pass\n\n if url.scheme == 'rediss':\n url_options['connection_class'] = SSLConnection\n\n # last shot at the db value\n url_options['db'] = int(url_options.get('db', db or 0))\n\n # update the arguments from the URL values\n kwargs.update(url_options)\n\n # backwards compatability\n if 'charset' in kwargs:\n warnings.warn(DeprecationWarning(\n '\"charset\" is deprecated. Use \"encoding\" instead'))\n kwargs['encoding'] = kwargs.pop('charset')\n if 'errors' in kwargs:\n warnings.warn(DeprecationWarning(\n '\"errors\" is deprecated. Use \"encoding_errors\" instead'))\n kwargs['encoding_errors'] = kwargs.pop('errors')\n else:\n unix_socket_path = None\n if ':' in server:\n host, port = server.rsplit(':', 1)\n try:\n port = int(port)\n except (ValueError, TypeError):\n raise ImproperlyConfigured(\n \"{0} from {1} must be an integer\".format(\n repr(port),\n server\n )\n )\n else:\n host, port = None, None\n unix_socket_path = server\n\n kwargs.update(\n host=host,\n port=port,\n unix_socket_path=unix_socket_path,\n db=db,\n )\n\n return kwargs\n","sub_path":"docker/alpine/kolibri/dist/redis_cache/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"145015362","text":"#!/usr/bin/python3\n# Distributed under MIT License\n# Copyright (c) 2021 Remi BERTHOLET\n\"\"\" Class defining a VT100 text editor.\nThis editor works directly in the board.\nThis allows you to make quick and easy changes directly on the board, without having to use synchronization tools.\nThis editor allows script execution, and displays errors and execution time.\n\nEditor shortcuts :\n
- Exit : Escape\n
- Move cursor : Arrows, Home, End, PageUp, PageDown, Ctrl-Home, Ctrl-End, Ctrl-Left, Ctrl-Right\n
- Selection : Shift-Arrows, Shift-Home, Shift-End, Alt-Shift-Arrows, Ctrl-Shift-Left, Ctrl-Shift-Right\n
- Clipboard : Selection with Ctrl X(Cut), Ctrl-C(Copy), Ctrl-V(Paste)\n
- Case change : Selection with Ctrl-U(Toggle majuscule, minuscule)\n
- Indent : Selection with Tab(Indent) or Shift-Tab(Unindent)\n
- Comment block : Selection with Ctrl-Q\n
- Save : Ctrl-S\n
- Find : Ctrl-F\n
- Replace : Ctrl-H\n
- Toggle mode : Ctrl-T (Insertion/Replacement)\n
- Delete line : Ctrl-L\n
- Goto line : Ctrl-G\n
- Execute : F5\n\nThis editor also works on linux and osx, and can also be used autonomously,\nyou need to add the useful.py script to its side.\nAll the keyboard shortcuts are at the start of the script.\n\nOn the boards with low memory, it may work, but on very small files, otherwise it may produce an error due to insufficient memory.\n\"\"\"\nimport sys\nsys.path.append(\"lib\")\nsys.path.append(\"lib/tools\")\ntry:\n\tfrom tools import useful\nexcept:\n\timport useful\n\nTABSIZE = 4 # Tabulation size\nHORIZONTAL_MOVE=8 # Scrolling minimal deplacement\n\nESCAPE = \"\\x1B\"\n\n# Move shortcuts\nUP = [\"\\x1B[A\"]\nDOWN = [\"\\x1B[B\"]\nRIGHT = [\"\\x1B[C\"]\nLEFT = [\"\\x1B[D\"]\nHOME = [\"\\x1B[1;3D\", \"\\x1B[H\", \"\\x1B\\x1B[D\", \"\\x1B[1~\", \"\\x1Bb\"]\nEND = [\"\\x1B[1;3C\", \"\\x1B[F\", \"\\x1B\\x1B[C\", \"\\x1B[4~\", \"\\x1Bf\"]\nPAGE_UP = [\"\\x1B[1;3A\", \"\\x1B[A\", \"\\x1B\\x1B[A\", \"\\x1B[5~\"]\nPAGE_DOWN = [\"\\x1B[1;3B\", \"\\x1B[B\", \"\\x1B\\x1B[B\", \"\\x1B[6~\"]\nTOP = [\"\\x1B[1;5H\"]\nBOTTOM = [\"\\x1B[1;5F\"]\nNEXT_WORD = [\"\\x1B[1;5C\"]\nPREVIOUS_WORD = [\"\\x1B[1;5D\"]\n\n# Selection shortcuts\nSELECT_UP = [\"\\x1B[1;2A\"]\nSELECT_DOWN = [\"\\x1B[1;2B\"]\nSELECT_RIGHT = [\"\\x1B[1;2C\"]\nSELECT_LEFT = [\"\\x1B[1;2D\"]\nSELECT_PAGE_UP = [\"\\x1B[1;10A\",\"\\x1B[1;4A\",\"\\x1B[5;2~\"]\nSELECT_PAGE_DOWN = [\"\\x1B[1;10B\",\"\\x1B[1;4B\",\"\\x1B[6;2~\"]\nSELECT_HOME = [\"\\x1B[1;2H\",\"\\x1B[1;10D\"]\nSELECT_END = [\"\\x1B[1;2F\",\"\\x1B[1;10C\"]\nSELECT_ALL = [\"\\x01\"]\nSELECT_NEXT_WORD = [\"\\x1B[1;6C\",\"\\x1B[1;4C\"]\nSELECT_PREV_WORD = [\"\\x1B[1;6D\",\"\\x1B[1;4D\"]\n\n# Clipboard shortcuts\nCUT = [\"\\x18\",\"\\x1Bx\"] # Cut\nCOPY = [\"\\x03\",\"\\x1Bc\"] # Copy\nPASTE = [\"\\x16\",\"\\x1Bv\"] # Paste\n\n# Selection modification shortcut\nINDENT = [\"\\t\"] # Indent\nUNINDENT = [\"\\x1B[Z\"] # Unindent\nCHANGE_CASE = [\"\\x15\"] # Change case\nCOMMENT = [\"\\x11\"] # Comment block\n\nDELETE = [\"\\x1B[3~\"] # Delete pressed\nBACKSPACE = [\"\\x7F\"] # Backspace pressed\nNEW_LINE = [\"\\n\", \"\\r\"] # New line pressed\n\nTOGGLE_MODE = [\"\\x14\"] # Toggle replace/insert mode\nEXIT = [ESCAPE] # Exit\nFIND = [\"\\x06\"] # Find\nFIND_NEXT = [\"\\x1bOR\"] # Find next\nFIND_PREVIOUS = [\"\\x1b[1;2R\"] # Find previous\nGOTO = [\"\\x07\"] # Goto line\nSAVE = [\"\\x13\",\"\\x1Bs\"] # Save\nDELETE_LINE = [\"\\x0C\"] # Delete line\nREPLACE = [\"\\x08\"] # Replace\nREPLACE_CURRENT = [\"\\x12\"] # Replace the selection\nEXECUTE = [\"\\x1B[15~\"] # Execute script\n\nclass View:\n\t\"\"\" Class which manage the view of the edit field \"\"\"\n\tdef __init__(self, viewHeight, viewTop):\n\t\t\"\"\" Constructor \"\"\"\n\t\tself.line = 0\n\t\tself.column = 0\n\t\tif viewHeight is None:\n\t\t\tself.height = 20\n\t\telse:\n\t\t\tself.height = viewHeight\n\t\tself.width = 80\n\t\tself.top = viewTop\n\t\tself.isRefreshAll = True\n\t\tself.isRefreshLine = False\n\t\tself.isRefreshLineBefore = False\n\t\tself.isRefreshLineAfter = False\n\t\tself.refreshPart = None\n\t\tself.text = None\n\t\tself.tabCursorColumn = 0\n\t\tself.selLineStart = None\n\t\tself.selLineEnd = None\n\t\tself.screenHeight = 1\n\t\tself.screenWidth = 1\n\n\tdef write(self, data):\n\t\t\"\"\" Write data to stdout \"\"\"\n\t\tsys.stdout.write(data)\n\n\tdef flush(self):\n\t\t\"\"\" Flush text to stdout \"\"\"\n\t\ttry:\n\t\t\tsys.stdout.flush()\n\t\texcept:\n\t\t\tpass\n\n\tdef setText(self, text):\n\t\t\"\"\" Set the text object \"\"\"\n\t\tself.text = text\n\n\tdef getScreenPosition(self):\n\t\t\"\"\" Get the screen position of cursor \"\"\"\n\t\treturn (self.text.getCursorLine() - self.line + self.top, self.tabCursorColumn - self.column)\n\n\tdef reset(self):\n\t\t\"\"\" Reset VT100 \"\"\"\n\t\tself.write(\"\\x1b\"\"c\")\n\t\tself.flush()\n\n\tdef resetScrollRegion(self):\n\t\t\"\"\" Reset VT100 scroll region \"\"\"\n\t\tif self.screenHeight > 0:\n\t\t\tself.setScrollingRegion(0, self.screenHeight-1)\n\n\tdef setScrollingRegion(self, topLine, bottomLine):\n\t\t\"\"\" Define VT100 scroll region \"\"\"\n\t\tif topLine < bottomLine:\n\t\t\tself.write(\"\\x1B[%d;%dr\"%(topLine+1,bottomLine+1))\n\n\tdef scrollUp(self):\n\t\t\"\"\" Scroll to up \"\"\"\n\t\tself.setScrollingRegion(self.top, self.height+1)\n\t\tself.write(\"\\x1B[1S\")\n\n\tdef scrollDown(self):\n\t\t\"\"\" Scroll to down \"\"\"\n\t\tself.setScrollingRegion(self.top, self.height+1)\n\t\tself.write(\"\\x1B[1T\")\n\n\tdef scrollPartUp(self):\n\t\t\"\"\" Scroll the upper part \"\"\"\n\t\tline, column = self.getScreenPosition()\n\t\tif line < self.height:\n\t\t\tself.setScrollingRegion(line, self.height+1)\n\t\t\tself.write(\"\\x1B[1S\")\n\n\tdef scrollPartDown(self):\n\t\t\"\"\" Scroll the lower part \"\"\"\n\t\tline, column = self.getScreenPosition()\n\t\tif line < self.height:\n\t\t\tself.setScrollingRegion(line+1, self.height+1)\n\t\t\tself.write(\"\\x1B[1T\")\n\t\telse:\n\t\t\tself.isRefreshLineAfter = True\n\n\tdef move(self):\n\t\t\"\"\" Move the view \"\"\"\n\t\tself.tabCursorColumn = self.text.getTabCursor(self.text.getCursorLine())\n\t\t# Move view port\n\t\tif self.tabCursorColumn < self.column:\n\t\t\tself.isRefreshAll = True\n\t\t\tif self.tabCursorColumn > HORIZONTAL_MOVE:\n\t\t\t\tself.column = self.tabCursorColumn-HORIZONTAL_MOVE\n\t\t\telse:\n\t\t\t\tself.column = 0\n\t\telif self.tabCursorColumn >= self.column + self.width:\n\t\t\tself.column = self.tabCursorColumn-self.width+HORIZONTAL_MOVE\n\t\t\tself.isRefreshAll = True\n\t\tif self.text.getCursorLine() < self.line:\n\t\t\tdelta = self.line - self.text.getCursorLine()\n\t\t\tself.line = self.text.getCursorLine()\n\t\t\tif self.line < 0:\n\t\t\t\tself.line = 0\n\t\t\tif delta <= 1:\n\t\t\t\tself.scrollDown()\n\t\t\t\tself.isRefreshLine = True\n\t\t\telse:\n\t\t\t\tself.isRefreshAll = True\n\t\telif self.text.getCursorLine() > self.line + self.height:\n\t\t\tdelta = self.text.getCursorLine() - self.line - self.height\n\t\t\tself.line = self.text.getCursorLine()-self.height\n\t\t\tif delta <= 1:\n\t\t\t\tself.scrollUp()\n\t\t\t\tself.isRefreshLine = True\n\t\t\telse:\n\t\t\t\tself.isRefreshAll = True\n\n\tdef setRefreshLine(self):\n\t\t\"\"\" Indicates that the line must be refreshed \"\"\"\n\t\tself.isRefreshLine = True\n\n\tdef setRefreshAfter(self):\n\t\t\"\"\" Indicates that all lines after the current line must be refreshed \"\"\"\n\t\tself.isRefreshLine = True\n\t\tself.isRefreshLineAfter = True\n\n\tdef setRefreshBefore(self):\n\t\t\"\"\" Indicates that all lines before the current line must be refreshed \"\"\"\n\t\tself.isRefreshLine = True\n\t\tself.isRefreshLineBefore = True\n\n\tdef setRefreshAll(self):\n\t\t\"\"\" Indicates that all lines must be refreshed \"\"\"\n\t\tself.isRefreshAll = True\n\n\tdef showLine(self, currentLine, screenLine, selectionStart, selectionEnd, quick=False):\n\t\t\"\"\" Show one line \"\"\"\n\t\tif quick:\n\t\t\tlineToDisplay = \"\"\n\t\telse:\n\t\t\tlineToDisplay = \"\\x1B[%d;1f\\x1B[K\"%(screenLine+1)\n\t\tcountLine = self.text.getCountLines()\n\t\tif currentLine < countLine and currentLine >= 0:\n\t\t\tline = self.text.getTabLine(currentLine)\n\t\t\tpartLine = line[self.column:self.column+self.width]\n\t\t\t# If the line selected\n\t\t\tif selectionStart != None:\n\t\t\t\t# If the line not empty\n\t\t\t\tif len(partLine) >= 1:\n\t\t\t\t\t# If the line have carriage return at the end\n\t\t\t\t\tif partLine[-1] == \"\\n\":\n\t\t\t\t\t\t# Remove the carriage return\n\t\t\t\t\t\tpartLine = partLine[:-1]\n\t\t\t\tif len(partLine) > 0:\n\t\t\t\t\tdummy, selLineStart, selColumnStart = selectionStart\n\t\t\t\t\tdummy, selLineEnd, selColumnEnd = selectionEnd\n\t\t\t\t\t# If the current line is the end of selection\n\t\t\t\t\tif currentLine == selLineEnd:\n\t\t\t\t\t\t# If the end of selection is outside the visible part\n\t\t\t\t\t\tif selColumnEnd - self.column < 0:\n\t\t\t\t\t\t\tselColumnEnd = 0\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tselColumnEnd -= self.column\n\n\t\t\t\t\t\t# If the start of selection is on the previous lines\n\t\t\t\t\t\tif selLineStart < selLineEnd:\n\t\t\t\t\t\t\t# Select the start of line\n\t\t\t\t\t\t\tpartLine = \"\\x1B[7m\" + partLine[:selColumnEnd] + \"\\x1B[m\" + partLine[selColumnEnd:]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# Unselect the end of line\n\t\t\t\t\t\t\tpartLine = partLine[:selColumnEnd] + \"\\x1B[m\" + partLine[selColumnEnd:]\n\t\t\t\t\t# If the current line is the start of selection\n\t\t\t\t\tif currentLine == selLineStart:\n\t\t\t\t\t\t# If the start of selection is outside the visible part\n\t\t\t\t\t\tif selColumnStart - self.column < 0:\n\t\t\t\t\t\t\tselColumnStart = 0\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tselColumnStart -= self.column\n\n\t\t\t\t\t\t# If the end of selection is on the next lines\n\t\t\t\t\t\tif selLineStart < selLineEnd:\n\t\t\t\t\t\t\t# Select the end of line\n\t\t\t\t\t\t\tpartLine = partLine[:selColumnStart] + \"\\x1B[7m\" + partLine[selColumnStart:] + \"\\x1B[m\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# Select the start of line\n\t\t\t\t\t\t\tpartLine = partLine[:selColumnStart] + \"\\x1B[7m\" + partLine[selColumnStart:] \n\t\t\t\t\t# If the line is completly selected\n\t\t\t\t\tif currentLine > selLineStart and currentLine < selLineEnd:\n\t\t\t\t\t\t# Select all the line\n\t\t\t\t\t\tpartLine = \"\\x1B[7m\" + partLine + \"\\x1B[m\"\n\t\t\t\telse:\n\t\t\t\t\tpartLine = \"\"\n\t\t\t\tself.write(lineToDisplay + partLine)\n\t\t\telse:\n\t\t\t\tself.write(lineToDisplay + partLine.rstrip())\n\n\tdef refreshLine(self, selectionStart, selectionEnd):\n\t\t\"\"\" Refresh line \"\"\"\n\t\tscreenLine, screenColumn = self.getScreenPosition()\n\t\trefreshed = False\n\n\t\t# If the line must be refreshed before the cursor line\n\t\tif self.isRefreshLineBefore:\n\t\t\tself.isRefreshLineBefore = False\n\t\t\tself.showLine(self.text.getCursorLine()-1, screenLine-1, selectionStart, selectionEnd)\n\t\t\trefreshed = True\n\t\t# If the line must be refreshed after the cursor line\n\t\tif self.isRefreshLineAfter:\n\t\t\tself.isRefreshLineAfter = False\n\t\t\tself.showLine(self.text.getCursorLine()+1, screenLine+1, selectionStart, selectionEnd)\n\t\t\toffset = self.height - screenLine\n\t\t\tself.showLine(self.text.getCursorLine()+offset+1, screenLine+offset+1, selectionStart, selectionEnd)\n\t\t\trefreshed = True\n\t\t# If only the cursor line must be refresh\n\t\tif self.isRefreshLine:\n\t\t\tself.isRefreshLine = False\n\t\t\tself.showLine(self.text.getCursorLine(), screenLine, selectionStart, selectionEnd)\n\t\t\trefreshed = True\n\n\t\t# If no refresh detected and a selection started\n\t\tif selectionStart != None and refreshed == False:\n\t\t\t# Refresh the selection\n\t\t\tself.showLine(self.text.getCursorLine(), screenLine, selectionStart, selectionEnd)\n\n\tdef refresh(self):\n\t\t\"\"\" Refresh view \"\"\"\n\t\tselectionStart, selectionEnd = self.text.getSelection()\n\t\tif self.refreshPart != None:\n\t\t\tself.refreshContent(selectionStart, selectionEnd, self.refreshPart)\n\t\t\tself.refreshPart = None\n\t\t# Refresh all required\n\t\tif self.isRefreshAll:\n\t\t\tself.refreshContent(selectionStart, selectionEnd, True)\n\t\t\tself.isRefreshAll = False\n\t\t\tself.isRefreshLine = False\n\t\telse:\n\t\t\t# If no selection activated\n\t\t\tif selectionStart == None:\n\t\t\t\t# Refresh the current line\n\t\t\t\tself.refreshLine(selectionStart, selectionEnd)\n\t\t\telse:\n\t\t\t\t# Refresh the selection\n\t\t\t\tself.refreshContent(selectionStart, selectionEnd, False)\n\t\tself.moveCursor()\n\t\tself.flush()\n\n\tdef refreshContent(self, selectionStart, selectionEnd, all_):\n\t\t\"\"\" Refresh content \"\"\"\n\t\t# If selection present\n\t\tif selectionStart != None:\n\t\t\t# Get the selection\n\t\t\tdummy, selLineStart, selColumnStart = selectionStart\n\t\t\tdummy, selLineEnd, selColumnEnd = selectionEnd\n\t\t\tlineStart = selLineStart\n\t\t\tlineEnd = selLineEnd\n\t\t\t# The aim of this part is to limit the refresh area\n\t\t\t# If the precedent display show a selection\n\t\t\tif self.selLineEnd != None and self.selLineStart != None:\n\t\t\t\t# If the start and end of selection is on the sames lines\n\t\t\t\tif self.selLineEnd == selLineEnd and self.selLineStart == selLineStart:\n\t\t\t\t\tlineStart = lineEnd = self.text.getCursorLine()\n\t\t\t\telse:\n\t\t\t\t\t# If the end of selection is after the precedent display\n\t\t\t\t\tif self.selLineEnd > selLineEnd:\n\t\t\t\t\t\tlineEnd = self.selLineEnd\n\t\t\t\t\t# If the end of selection is on the same line than the precedent display\n\t\t\t\t\telif self.selLineEnd == selLineEnd:\n\t\t\t\t\t\t# If the start of selection is before the precedent display\n\t\t\t\t\t\tif self.selLineStart < selLineStart:\n\t\t\t\t\t\t\tlineEnd = selLineStart\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlineEnd = self.selLineStart\n\t\t\t\t\t# If the start of selection is before the precedent display\n\t\t\t\t\tif self.selLineStart < selLineStart:\n\t\t\t\t\t\tlineStart = self.selLineStart\n\t\t\t\t\t# If the start of selection is on the same line than the precedent display\n\t\t\t\t\telif self.selLineStart == selLineStart:\n\t\t\t\t\t\t# If the end of selection is after the precedent display\n\t\t\t\t\t\tif self.selLineEnd > selLineEnd:\n\t\t\t\t\t\t\tlineStart = selLineEnd\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlineStart = self.selLineEnd\n\t\telse:\n\t\t\tlineStart = 0\n\t\t\tlineEnd = self.line + self.height\n\t\tcurrentLine = self.line\n\t\tscreenLine = self.top\n\t\tif type(all_) == type([]):\n\t\t\tlineStart, lineEnd = all_\n\t\t\tall_ = False\n\t\tcountLine = self.text.getCountLines()\n\t\tmaxLine = self.line + self.height\n\t\tif all_:\n\t\t\t# Erase the rest of the screen with empty line (used when the text is shorter than the screen)\n\t\t\tself.moveCursor(screenLine, 0)\n\t\t\tself.write(\"\\x1B[J\")\n\t\t\t# Refresh all lines visible\n\t\t\twhile currentLine < countLine and currentLine <= maxLine:\n\t\t\t\tself.showLine(currentLine, screenLine, selectionStart, selectionEnd, True)\n\t\t\t\tscreenLine += 1\n\t\t\t\tcurrentLine += 1\n\t\t\t\tif (currentLine < countLine and currentLine <= maxLine):\n\t\t\t\t\tself.write(\"\\n\\r\")\n\t\telse:\n\t\t\t# Refresh all lines visible\n\t\t\twhile currentLine < countLine and currentLine <= maxLine:\n\t\t\t\t# If the line is in selection or all must be refreshed\n\t\t\t\tif lineStart <= currentLine <= lineEnd or all_:\n\t\t\t\t\tself.showLine(currentLine, screenLine, selectionStart, selectionEnd)\n\t\t\t\tscreenLine += 1\n\t\t\t\tcurrentLine += 1\n\n\t\t# If selection present\n\t\tif selectionStart != None:\n\t\t\t# Save current selection\n\t\t\tdummy, self.selLineStart, dummy = selectionStart\n\t\t\tdummy, self.selLineEnd, dummy = selectionEnd\n\n\tdef hideSelection(self):\n\t\t\"\"\" Hide the selection \"\"\"\n\t\tselectionStart, selectionEnd = self.text.getSelection()\n\t\tif selectionStart != None:\n\t\t\tself.setRefreshSelection()\n\t\t\tself.selLineStart = None\n\t\t\tself.selLineEnd = None\n\n\tdef setRefreshSelection(self):\n\t\t\"\"\" Indicates that the selection must be refreshed \"\"\"\n\t\tselectionStart, selectionEnd = self.text.getSelection()\n\t\tif selectionStart != None:\n\t\t\t# self.isRefreshAll = True\n\t\t\tlineStart = selectionStart[1]\n\t\t\tif self.selLineStart < lineStart:\n\t\t\t\tlineStart = self.selLineStart\n\t\t\tlineEnd = selectionEnd[1]\n\t\t\tif self.selLineEnd > lineEnd:\n\t\t\t\tlineEnd = self.selLineEnd\n\t\t\tself.refreshPart = [lineStart, lineEnd]\n\n\tdef moveCursor(self, screenLine=None, screenColumn=None):\n\t\t\"\"\" Move the cursor in the view \"\"\"\n\t\tself.write(self.getMoveCursor(screenLine, screenColumn))\n\n\tdef getMoveCursor(self, screenLine=None, screenColumn=None):\n\t\t\"\"\" Move the cursor in the view \"\"\"\n\t\tif screenLine == None and screenColumn == None:\n\t\t\tscreenLine, screenColumn = self.getScreenPosition()\n\t\treturn \"\\x1B[%d;%df\"%(screenLine+1,screenColumn+1)\n\n\tdef getScreenSize(self):\n\t\t\"\"\" Get the screen size \"\"\"\n\t\theight, width = useful.getScreenSize()\n\t\tself.screenHeight = height\n\t\tself.screenWidth = width\n\t\tself.height = height-self.top-1\n\t\tself.width = width\n\t\tself.moveCursor()\n\n\tdef cls(self):\n\t\t\"\"\" Clear the screen \"\"\"\n\t\tself.write(\"\\x1B[2J\")\n\t\tself.moveCursor(0,0)\n\nclass Text:\n\t\"\"\" Class which manage the text edition \"\"\"\n\tdef __init__(self, readOnly=False):\n\t\t\"\"\" Constructor \"\"\"\n\t\tself.lines = [\"\"]\n\t\tself.cursorLine = 0\n\t\tself.cursorColumn = 0\n\t\tself.tabCursorColumn = 0\n\t\tself.modified = False\n\t\tself.replaceMode = False\n\t\tself.readOnly = readOnly\n\t\tself.view = None\n\t\tself.tabSize = TABSIZE\n\t\tself.selectionStart = None\n\t\tself.selectionEnd = None\n\t\tself.selection = []\n\t\tself.filename = None\n\n\tdef setView(self, view):\n\t\t\"\"\" Define the view attached to the text \"\"\"\n\t\tself.view = view\n\n\tdef getCountLines(self):\n\t\t\"\"\" Get the total of lines \"\"\"\n\t\treturn len(self.lines)\n\n\tdef getCursorLine(self):\n\t\t\"\"\" Get the current line of the cursor \"\"\"\n\t\treturn self.cursorLine\n\n\tdef getTabCursor(self, currentLine, currentColumn=None):\n\t\t\"\"\" Get position of cursor with line with tabulation \"\"\"\n\t\tif currentColumn == None:\n\t\t\tcursorColumn = self.cursorColumn\n\t\telse:\n\t\t\tcursorColumn = currentColumn\n\t\tline = self.lines[currentLine]\n\t\tif \"\\t\" in line:\n\t\t\ttabCursorColumn = 0\n\t\t\tcolumn = 0\n\t\t\tlenLine = len(line)\n\t\t\twhile column < cursorColumn: \n\t\t\t\tif line[column] == \"\\t\":\n\t\t\t\t\tpos = tabCursorColumn%self.tabSize\n\t\t\t\t\ttabCursorColumn += self.tabSize-pos\n\t\t\t\t\tcolumn += 1\n\t\t\t\telse:\n\t\t\t\t\ttab = line.find(\"\\t\",column)\n\t\t\t\t\tif tab > 0:\n\t\t\t\t\t\tpartSize = tab - column\n\t\t\t\t\telse:\n\t\t\t\t\t\tpartSize = lenLine - column\n\t\t\t\t\tif column + partSize > cursorColumn:\n\t\t\t\t\t\tpartSize = cursorColumn - column\n\t\t\t\t\ttabCursorColumn += partSize\n\t\t\t\t\tcolumn += partSize\n\t\t\treturn tabCursorColumn\n\t\telse:\n\t\t\treturn cursorColumn\n\n\tdef getTabLine(self, currentLine = None):\n\t\t\"\"\" Get the tabuled line \"\"\"\n\t\tline = self.lines[currentLine]\n\t\tif \"\\t\" in line:\n\t\t\ttabLine = \"\"\n\t\t\ttabCursorColumn = 0\n\t\t\tlenLine = len(line)\n\t\t\tcolumn = 0\n\t\t\twhile column < lenLine: \n\t\t\t\tchar = line[column]\n\t\t\t\tif char == \"\\t\":\n\t\t\t\t\tpos = tabCursorColumn%self.tabSize\n\t\t\t\t\ttabCursorColumn += self.tabSize-pos\n\t\t\t\t\ttabLine += \" \"*(self.tabSize-pos)\n\t\t\t\t\tcolumn += 1\n\t\t\t\telse:\n\t\t\t\t\ttab = line.find(\"\\t\",column)\n\t\t\t\t\tif tab > 0:\n\t\t\t\t\t\tpart = line[column:tab]\n\t\t\t\t\telse:\n\t\t\t\t\t\tpart = line[column:]\n\t\t\t\t\ttabCursorColumn += len(part)\n\t\t\t\t\ttabLine += part\n\t\t\t\t\tcolumn += len(part)\n\t\telse:\n\t\t\ttabLine = line\n\t\treturn tabLine\n\n\tdef getTabCursorColumn(self):\n\t\t\"\"\" Get the column of cursor in tabuled line \"\"\"\n\t\tline = self.lines[self.cursorLine]\n\t\tcolumn = 0\n\t\tself.tabCursorColumn = 0\n\t\twhile column < self.cursorColumn:\n\t\t\tif line[column] == \"\\t\":\n\t\t\t\tpos = self.tabCursorColumn%self.tabSize\n\t\t\t\tself.tabCursorColumn += self.tabSize-pos\n\t\t\t\tcolumn += 1\n\t\t\telse:\n\t\t\t\ttab = line.find(\"\\t\",column)\n\t\t\t\tif tab > 0:\n\t\t\t\t\tdelta = tab - column\n\t\t\t\t\tif column + delta > self.cursorColumn:\n\t\t\t\t\t\tdelta = self.cursorColumn - column\n\t\t\t\t\t\tself.tabCursorColumn += delta\n\t\t\t\t\t\tcolumn += delta\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.tabCursorColumn += delta\n\t\t\t\t\t\tcolumn += delta\n\t\t\t\telse:\n\t\t\t\t\tdelta = self.cursorColumn - column\n\t\t\t\t\tself.tabCursorColumn += delta\n\t\t\t\t\tcolumn += delta\n\n\tdef setCursorColumn(self):\n\t\t\"\"\" When the line change compute the cursor position with tabulation in the line \"\"\"\n\t\tline = self.lines[self.cursorLine]\n\t\tcolumn = 0\n\t\ttabCursorColumn = 0\n\t\tlenLine = len(line)\n\t\tcolumn = 0\n\t\twhile column < lenLine: \n\t\t\tchar = line[column]\n\t\t\t# If the previous position found exactly in the current line\n\t\t\tif tabCursorColumn == self.tabCursorColumn:\n\t\t\t\tself.cursorColumn = column\n\t\t\t\tbreak\n\t\t\t# If the previous position not found in the current line\n\t\t\tif tabCursorColumn > self.tabCursorColumn:\n\t\t\t\t# Keep last existing position\n\t\t\t\tself.cursorColumn = column\n\t\t\t\tbreak\n\t\t\t# If tabulation found\n\t\t\tif char == \"\\t\":\n\t\t\t\ttabCursorColumn += self.tabSize-(tabCursorColumn%self.tabSize)\n\t\t\t\tcolumn += 1\n\t\t\telse:\n\t\t\t\t# Optimization to accelerate the cursor position\n\t\t\t\ttab = line.find(\"\\t\", column)\n\n\t\t\t\t# Tabulation found\n\t\t\t\tif tab > 0:\n\t\t\t\t\tdelta = tab - column\n\t\t\t\t\t# If the tabulation position is after the previous tabulation cursor\n\t\t\t\t\tif delta + tabCursorColumn > self.tabCursorColumn:\n\t\t\t\t\t\t# Move the cursor to the left\n\t\t\t\t\t\tself.cursorColumn = column + (self.tabCursorColumn - tabCursorColumn)\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\t# Another tabulation found, move it after\n\t\t\t\t\t\ttabCursorColumn += delta\n\t\t\t\t\t\tcolumn += delta\n\t\t\t\t# Tabulation not found\n\t\t\t\telse:\n\t\t\t\t\t# Move the cursor to the end of line\n\t\t\t\t\tself.cursorColumn = column + (self.tabCursorColumn - tabCursorColumn)\n\t\t\t\t\tbreak\n\t\telse:\n\t\t\tif len(line) >= 1:\n\t\t\t\tself.cursorColumn = len(line)-1\n\t\t\telse:\n\t\t\t\tself.cursorColumn = 0\n\n\tdef load(self, filename_):\n\t\t\"\"\" Load file in the editor \"\"\"\n\t\tself.filename = None\n\t\ttry:\n\t\t\tself.lines = []\n\t\t\tself.filename = filename_\n\t\t\tfile = open(filename_, \"r\")\n\t\t\tline = file.readline()\n\t\t\twhile line != \"\":\n\t\t\t\tself.lines.append(line.replace(\"\\r\\n\",\"\\n\"))\n\t\t\t\tline = file.readline()\n\t\t\tfile.close()\n\t\t\tif len(self.lines) == 0:\n\t\t\t\tself.lines = [\"\"]\n\t\texcept MemoryError:\n\t\t\t# pylint: disable=raise-missing-from\n\t\t\traise MemoryError()\n\t\texcept OSError:\n\t\t\tself.lines = [\"\"]\n\t\t\t# File not existing\n\t\texcept Exception as err:\n\t\t\tuseful.syslog(err)\n\t\t\tself.lines = [\"\"]\n\n\tdef save(self):\n\t\t\"\"\" Save text in the file \"\"\"\n\t\tresult = False\n\t\tif self.readOnly == False:\n\t\t\tif self.filename != None:\n\t\t\t\ttry:\n\t\t\t\t\tfile = open(self.filename, \"w\")\n\t\t\t\t\tfor line in self.lines:\n\t\t\t\t\t\tfile.write(line)\n\t\t\t\t\tfile.close()\n\t\t\t\t\tself.modified = False\n\t\t\t\t\tresult = True\n\t\t\t\texcept Exception as err:\n\t\t\t\t\tuseful.syslog(err)\n\t\treturn result\n\n\tdef changeLine(self, moveLine):\n\t\t\"\"\" Move the cursor on another line \"\"\"\n\t\t# If cursor is before the first line\n\t\tif moveLine + self.cursorLine < 0:\n\t\t\t# Set the cursor to the first line\n\t\t\tself.cursorLine = 0\n\t\t\tself.cursorColumn = 0\n\t\t\tself.changeColumn(0)\n\t\t# If the cursor is after the last line\n\t\telif moveLine + self.cursorLine >= len(self.lines):\n\t\t\tself.cursorLine = len(self.lines) -1\n\t\t\tself.cursorColumn = len(self.lines[self.cursorLine])\n\t\t\tself.changeColumn(0)\n\t\t# else the cursor is in the lines of text\n\t\telse:\n\t\t\tpreviousLine = self.cursorLine\n\t\t\tself.cursorLine += moveLine\n\t\t\tif len(self.lines) - 1 == self.cursorLine:\n\t\t\t\tlenLine = len(self.lines[self.cursorLine])\n\t\t\telse:\n\t\t\t\tlenLine = len(self.lines[self.cursorLine])-1\n\n\t\t\tself.setCursorColumn()\n\t\t\t# If the new cursor position is outside the last line of text\n\t\t\tif self.cursorColumn > lenLine:\n\t\t\t\tself.cursorColumn = lenLine\n\n\t\tif self.selectionStart != None:\n\t\t\tself.selectionEnd = [self.cursorColumn, self.cursorLine,self.getTabCursor(self.cursorLine)]\n\t\tself.view.move()\n\n\tdef changeColumn(self, moveColumn):\n\t\t\"\"\" Move the cursor on another column \"\"\"\n\t\tcursorLine = self.cursorLine\n\t\tcursorColumn = self.cursorColumn\n\t\t# If the cursor go to the previous line\n\t\tif moveColumn + self.cursorColumn < 0:\n\t\t\t# If start of line\n\t\t\tif abs(moveColumn) > 1:\n\t\t\t\tself.cursorColumn = 0\n\t\t\t# If move to the left and must go to previous line\n\t\t\telif self.cursorLine > 0:\n\t\t\t\tself.cursorLine -= 1\n\t\t\t\tself.cursorColumn = len(self.lines[self.cursorLine])-1\n\t\t# If the cursor is at the end of line\n\t\telif moveColumn + self.cursorColumn > len(self.lines[self.cursorLine])-1:\n\t\t\t# If the cursor is on the last line of file\n\t\t\tif abs(moveColumn) > 1 or self.cursorLine+1 == len(self.lines):\n\t\t\t\t# If the file is empty\n\t\t\t\tif self.lines[self.cursorLine] == \"\":\n\t\t\t\t\tself.cursorColumn = 0\n\t\t\t\t\tself.tabCursorColumn = 0\n\t\t\t\t# If the last line of contains return char\n\t\t\t\telif self.lines[self.cursorLine][-1] == \"\\n\":\n\t\t\t\t\t# Move cursor before return\n\t\t\t\t\tself.cursorColumn = len(self.lines[self.cursorLine])-1\n\t\t\t\telse:\n\t\t\t\t\t# Move cursor after the last char\n\t\t\t\t\tself.cursorColumn = len(self.lines[self.cursorLine])\n\n\t\t\t# If the cursor is on the end of line and must change of line\n\t\t\telif self.cursorLine+1 < len(self.lines):\n\t\t\t\tself.cursorLine += 1\n\t\t\t\tself.cursorColumn = 0\n\t\t\t\tself.tabCursorColumn = 0\n\t\t# Normal move of cursor\n\t\telse:\n\t\t\t# Next or previous column\n\t\t\tself.cursorColumn += moveColumn\n\t\tif abs(moveColumn) > 0:\n\t\t\tself.getTabCursorColumn()\n\t\tself.closeSelection()\n\t\tself.view.move()\n\t\tif self.cursorColumn == cursorColumn and self.cursorLine == cursorLine:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\tdef backspace(self):\n\t\t\"\"\" Manage the backspace key \"\"\"\n\t\tself.modified = True\n\t\tif self.removeSelection() == False:\n\t\t\t# The cursor not in the begining of line\n\t\t\tif self.cursorColumn >= 1:\n\t\t\t\tline = self.lines[self.cursorLine]\n\t\t\t\tline = line[0:self.cursorColumn-1:]+ line[self.cursorColumn : :]\n\t\t\t\tself.lines[self.cursorLine] = line\n\t\t\t\tself.changeColumn(-1)\n\t\t\t\tself.view.setRefreshLine()\n\t\t\t# The cursor is on the begining of line\n\t\t\telse:\n\t\t\t\t# If the cursor not on the first line\n\t\t\t\tif self.cursorLine >= 1:\n\t\t\t\t\t# Copy the current line to the end of previous line\n\t\t\t\t\tself.cursorColumn = len(self.lines[self.cursorLine-1])\n\t\t\t\t\tself.lines[self.cursorLine-1] = self.lines[self.cursorLine-1][:-1] + self.lines[self.cursorLine]\n\t\t\t\t\tdel self.lines[self.cursorLine]\n\t\t\t\t\tself.view.scrollPartUp()\n\t\t\t\t\tself.cursorLine -= 1\n\t\t\t\t\tself.view.setRefreshAfter()\n\t\t\t\t\tself.changeColumn(-1)\n\n\tdef delete(self):\n\t\t\"\"\" Manage the delete key \"\"\"\n\t\tself.modified = True\n\t\tif self.removeSelection() == False:\n\t\t\tline = self.lines[self.cursorLine]\n\t\t\tif self.cursorColumn < len(line):\n\t\t\t\t# If the line is empty\n\t\t\t\tif line[self.cursorColumn] == \"\\n\":\n\t\t\t\t\t# If the cursor not at end of files\n\t\t\t\t\tif self.cursorLine < len(self.lines)-1:\n\t\t\t\t\t\t# Copy the next line to the current line\n\t\t\t\t\t\tself.lines[self.cursorLine] = line[:self.cursorColumn] + self.lines[self.cursorLine+1]\n\t\t\t\t\t\tdel self.lines[self.cursorLine+1]\n\t\t\t\t\t\tself.view.scrollPartUp()\n\t\t\t\t\t\tself.view.setRefreshAfter()\n\t\t\t\t# Else the char is deleted in the middle of line\n\t\t\t\telse:\n\t\t\t\t\tline = line[0:self.cursorColumn:]+ line[self.cursorColumn+1 : :]\n\t\t\t\t\tself.lines[self.cursorLine] = line\n\t\t\t\t\tself.changeColumn(0)\n\t\t\t\t\tself.view.isRefreshLine = True\n\n\tdef deleteLine(self):\n\t\t\"\"\" Manage the delete of line key \"\"\"\n\t\tself.hideSelection()\n\t\tself.modified = True\n\t\t# If file contains one or none line\n\t\tif len(self.lines) <= 1:\n\t\t\t# Clean the content of file\n\t\t\tself.lines = [\"\"]\n\t\t\tself.cursorColumn = 0\n\t\t\tself.cursorLine = 0\n\t\t\tself.changeColumn(0)\n\t\t# If the current line is not the last of file\n\t\telif self.cursorLine < len(self.lines):\n\t\t\t# Delete the line\n\t\t\tself.cursorColumn = 0\n\t\t\tdel self.lines[self.cursorLine]\n\t\t\tself.view.scrollPartUp()\n\t\t\tif self.cursorLine >= len(self.lines):\n\t\t\t\tself.cursorLine = len(self.lines)-1\n\t\t\tself.changeColumn(0)\n\t\tself.view.setRefreshAfter()\n\n\tdef newLine(self):\n\t\t\"\"\" Manage the newline key \"\"\"\n\t\tself.modified = True\n\t\tif self.removeSelection() == False:\n\t\t\tline1 = self.lines[self.cursorLine][:self.cursorColumn]+\"\\n\"\n\t\t\tline2 = self.lines[self.cursorLine][self.cursorColumn:]\n\t\t\tself.lines[self.cursorLine]=line1\n\t\t\tself.lines.insert(self.cursorLine+1, line2)\n\t\t\tself.view.scrollPartDown()\n\t\t\tself.changeColumn(1)\n\t\t\tself.view.setRefreshBefore()\n\n\tdef insertChar(self, char):\n\t\t\"\"\" Insert character \"\"\"\n\t\tself.modified = True\n\t\tself.lines[self.cursorLine] = self.lines[self.cursorLine][:self.cursorColumn] + char + self.lines[self.cursorLine][self.cursorColumn:]\n\t\tself.changeColumn(1)\n\t\tself.view.setRefreshLine()\n\n\tdef replaceChar(self, char):\n\t\t\"\"\" Replace character \"\"\"\n\t\tself.modified = True\n\t\tif self.cursorLine == len(self.lines)-1 and self.cursorColumn >= len(self.lines[self.cursorLine])-1:\n\t\t\tself.lines[self.cursorLine] = self.lines[self.cursorLine][:self.cursorColumn] + char \n\t\t\tself.changeColumn(1)\n\t\t\tself.view.setRefreshLine()\n\t\t# If it is the last char in the line\n\t\telif self.lines[self.cursorLine][self.cursorColumn] == \"\\n\":\n\t\t\t# Append char to the line\n\t\t\tself.insertChar(char)\n\t\t# Else the char must be replaced in the line\n\t\telse:\n\t\t\tself.lines[self.cursorLine] = self.lines[self.cursorLine][:self.cursorColumn] + char + self.lines[self.cursorLine][self.cursorColumn+1:]\n\t\t\tself.changeColumn(1)\n\t\t\tself.view.setRefreshLine()\n\n\tdef openSelection(self):\n\t\t\"\"\" Start a selection \"\"\"\n\t\tif self.selectionStart == None:\n\t\t\tself.selectionStart = [self.cursorColumn, self.cursorLine, self.getTabCursor(self.cursorLine)]\n\n\tdef closeSelection(self):\n\t\t\"\"\" Terminate selection \"\"\"\n\t\tif self.selectionStart != None:\n\t\t\tself.selectionEnd = [self.cursorColumn, self.cursorLine,self.getTabCursor(self.cursorLine)]\n\n\tdef selectAll(self):\n\t\t\"\"\" Do a select all \"\"\"\n\t\tself.selectionStart = [0,0,0]\n\t\tlastLine = len(self.lines)-1\n\t\tlastColumn = len(self.lines[lastLine])-1\n\t\tself.moveCursor(lastLine, lastColumn)\n\t\tself.selectionEnd = [lastColumn, lastLine, self.getTabCursor(lastLine, lastColumn)]\n\t\tself.view.setRefreshAll()\n\n\tdef getSelection(self):\n\t\t\"\"\" Get information about selection \"\"\"\n\t\tif self.selectionStart:\n\t\t\tif self.selectionStart[1] > self.selectionEnd[1]:\n\t\t\t\treturn self.selectionEnd, self.selectionStart\n\t\t\telif self.selectionStart[1] < self.selectionEnd[1]:\n\t\t\t\treturn self.selectionStart, self.selectionEnd\n\t\t\telif self.selectionStart[0] < self.selectionEnd[0]:\n\t\t\t\treturn self.selectionStart, self.selectionEnd\n\t\t\telse:\n\t\t\t\treturn self.selectionEnd, self.selectionStart\n\t\telse:\n\t\t\treturn None, None\n\n\tdef arrowUp(self, keys):\n\t\t\"\"\" Manage arrow up key \"\"\"\n\t\tself.hideSelection()\n\t\tself.changeLine(-1)\n\t\n\tdef arrowDown(self, keys):\n\t\t\"\"\" Manage arrow down key \"\"\"\n\t\tself.hideSelection()\n\t\tself.changeLine(1)\n\n\tdef arrowLeft(self, keys):\n\t\t\"\"\" Manage arrow left key \"\"\"\n\t\tself.hideSelection()\n\t\tself.changeColumn(-len(keys))\n\n\tdef arrowRight(self, keys):\n\t\t\"\"\" Manage arrow right key \"\"\"\n\t\tself.hideSelection()\n\t\tself.changeColumn(len(keys))\n\n\tdef selectUp(self, keys):\n\t\t\"\"\" Manage select up key \"\"\"\n\t\tself.openSelection()\n\t\tself.changeLine(-1)\n\t\n\tdef selectDown(self, keys):\n\t\t\"\"\" Manage select down key \"\"\"\n\t\tself.openSelection()\n\t\tself.changeLine(1)\n\n\tdef selectLeft(self, keys):\n\t\t\"\"\" Manage select left key \"\"\"\n\t\tself.openSelection()\n\t\tself.changeColumn(-len(keys))\n\n\tdef selectRight(self, keys):\n\t\t\"\"\" Manage select right key \"\"\"\n\t\tself.openSelection()\n\t\tself.changeColumn(len(keys))\n\n\tdef selectHome(self):\n\t\t\"\"\" Manage home key \"\"\"\n\t\tself.openSelection()\n\t\tself.changeColumn(-100000000000)\n\n\tdef selectEnd(self):\n\t\t\"\"\" Manage end key \"\"\"\n\t\tself.openSelection()\n\t\tself.changeColumn(100000000000)\n\n\tdef selectPageUp(self, keys):\n\t\t\"\"\" Manage select page up key \"\"\"\n\t\tself.openSelection()\n\t\tself.changeLine((-self.view.height-1) * len(keys))\n\t\tself.changeColumn(-100000000000)\n\n\tdef selectPageDown(self, keys):\n\t\t\"\"\" Manage select page down key \"\"\"\n\t\tself.openSelection()\n\t\tself.changeLine((self.view.height+1) * len(keys))\n\t\tself.changeColumn(100000000000)\n\n\tdef selectNextWord(self):\n\t\t\"\"\" Manage select next word key \"\"\"\n\t\tself.openSelection()\n\t\tself.moveWord(1)\n\n\tdef selectPreviousWord(self):\n\t\t\"\"\" Manage select previous word key \"\"\"\n\t\tself.openSelection()\n\t\tself.moveWord(-1)\n\n\tdef pageUp(self, keys):\n\t\t\"\"\" Manage page up key \"\"\"\n\t\tself.hideSelection()\n\t\tself.changeLine((-self.view.height-1) * len(keys))\n\n\tdef pageDown(self, keys):\n\t\t\"\"\" Manage page down key \"\"\"\n\t\tself.hideSelection()\n\t\tself.changeLine((self.view.height+1) * len(keys))\n\n\tdef home(self):\n\t\t\"\"\" Manage home key \"\"\"\n\t\tself.hideSelection()\n\t\tself.changeColumn(-100000000000)\n\n\tdef end(self):\n\t\t\"\"\" Manage end key \"\"\"\n\t\tself.hideSelection()\n\t\tself.changeColumn(100000000000)\n\n\tdef addChar(self, keys):\n\t\t\"\"\" Manage other key, add character \"\"\"\n\t\tresult = False\n\n\t\tif useful.isascii(keys[0]):\n\t\t\tself.removeSelection()\n\t\t\tfor char in keys:\n\t\t\t\tif useful.isascii(char):\n\t\t\t\t\tif self.replaceMode:\n\t\t\t\t\t\tself.replaceChar(char)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.insertChar(char)\n\t\t\t\t\tresult = True\n\t\t# if result == False:\n\t\t\t# print(useful.dump(keys[0]))\n\t\treturn result\n\n\tdef findNext(self, text):\n\t\t\"\"\" Find next researched text \"\"\"\n\t\t# Get the selection\n\t\tselectionStart, selectionEnd = self.getSelection()\n\n\t\t# Hide the selection\n\t\tself.hideSelection()\n\n\t\t# Set the start of search at the cursor position\n\t\tcurrentLine = self.cursorLine\n\t\tcurrentColumn = self.cursorColumn\n\n\t\t# If selection activated\n\t\tif selectionStart != None and selectionEnd != None:\n\t\t\t# If selection is on one line\n\t\t\tif selectionStart[1] == selectionEnd[1] and currentLine == selectionStart[1]:\n\t\t\t\t# If selection is exactly the size of text\n\t\t\t\tif selectionStart[0] == currentColumn:\n\t\t\t\t\t# Move the start of search after the text selected\n\t\t\t\t\tcurrentColumn = selectionEnd[0]\n\n\t\t# Find the text in next lines\n\t\twhile currentLine < len(self.lines):\n\t\t\t# Search text\n\t\t\tpos = self.lines[currentLine].find(text, currentColumn)\n\n\t\t\t# If text found\n\t\t\tif pos >= 0:\n\t\t\t\t# Move the cursor to the text found\n\t\t\t\tself.cursorLine = currentLine\n\t\t\t\tself.cursorColumn = pos + len(text)\n\t\t\t\tself.changeColumn(0)\n\t\t\t\tself.selectionStart = [pos, currentLine,self.getTabCursor(currentLine,pos)]\n\t\t\t\tself.selectionEnd = [pos + len(text), currentLine, self.getTabCursor(currentLine, pos + len(text))]\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\t# Set the search position at the begin of next line\n\t\t\t\tcurrentColumn = 0\n\t\t\t\tcurrentLine += 1\n\t\tself.view.move()\n\n\tdef findPrevious(self, text):\n\t\t\"\"\" Find previous researched text \"\"\"\n\t\t# Get the selection\n\t\tselectionStart, selectionEnd = self.getSelection()\n\n\t\t# Hide the selection\n\t\tself.hideSelection()\n\n\t\t# Set the start of search at the cursor position\n\t\tcurrentLine = self.cursorLine\n\t\tcurrentColumn = self.cursorColumn\n\n\t\t# If selection activated\n\t\tif selectionStart != None and selectionEnd != None:\n\t\t\t# If selection is on one line\n\t\t\tif selectionStart[1] == selectionEnd[1] and currentLine == selectionStart[1]:\n\t\t\t\t# If selection is exactly the size of text\n\t\t\t\tif selectionEnd[0] - selectionStart[0] == len(text):\n\t\t\t\t\t# Move the start of search before the text selected\n\t\t\t\t\tcurrentColumn = selectionStart[0]\n\n\t\t# While the line before the first line not reached\n\t\twhile currentLine >= 0:\n\t\t\t# Get the current line\n\t\t\tline = self.lines[currentLine]\n\n\t\t\t# If the current column is negative\n\t\t\tif currentColumn < 0:\n\t\t\t\t# Set the end of line\n\t\t\t\tcurrentColumn = len(line)\n\n\t\t\t# Search the text in reverse\n\t\t\tpos = line.rfind(text, 0, currentColumn)\n\n\t\t\t# If text found\n\t\t\tif pos >= 0:\n\t\t\t\tself.cursorLine = currentLine\n\t\t\t\tself.cursorColumn = pos\n\t\t\t\tself.changeColumn(0)\n\t\t\t\tself.selectionStart = [pos, currentLine,self.getTabCursor(currentLine,pos)]\n\t\t\t\tself.selectionEnd = [pos + len(text), currentLine, self.getTabCursor(currentLine, pos + len(text))]\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\t# Set the search position at the end of line\n\t\t\t\tcurrentColumn = -1\n\t\t\t\tcurrentLine -= 1\n\t\tself.view.move()\n\n\tdef hideSelection(self):\n\t\t\"\"\" Hide selection \"\"\"\n\t\tself.view.hideSelection()\n\t\tself.selectionStart = self.selectionEnd = None\n\n\tdef goto(self, lineNumber):\n\t\t\"\"\" Goto specified line \"\"\"\n\t\tself.hideSelection()\n\t\tif lineNumber < 0:\n\t\t\tself.cursorLine = len(self.lines)-1\n\t\telif lineNumber < 1:\n\t\t\tself.cursorLine = 1\n\t\telif lineNumber < len(self.lines):\n\t\t\tself.cursorLine = lineNumber - 1\n\t\telse:\n\t\t\tself.cursorLine = len(self.lines)-1\n\t\tself.cursorColumn = 0\n\t\tself.changeColumn(0)\n\t\tself.view.move()\n\n\tdef copyClipboard(self):\n\t\t\"\"\" Copy selection to clipboard \"\"\"\n\t\tresult = []\n\t\tif self.selectionStart != None:\n\t\t\tselectionStart, selectionEnd = self.getSelection()\n\t\t\tselColumnStart, selLineStart, dummy = selectionStart\n\t\t\tselColumnEnd, selLineEnd, dummy = selectionEnd\n\t\t\tresult = []\n\t\t\tif selLineStart == selLineEnd:\n\t\t\t\tresult.append(self.lines[selLineStart][selColumnStart:selColumnEnd])\n\t\t\telse:\n\t\t\t\tfor line in range(selLineStart, selLineEnd+1):\n\t\t\t\t\tif line == selLineStart:\n\t\t\t\t\t\tpart = self.lines[line][selColumnStart:]\n\t\t\t\t\t\tif part != \"\":\n\t\t\t\t\t\t\tresult.append(self.lines[line][selColumnStart:])\n\t\t\t\t\telif line == selLineEnd:\n\t\t\t\t\t\tpart = self.lines[line][:selColumnEnd]\n\t\t\t\t\t\tif part != \"\":\n\t\t\t\t\t\t\tresult.append(self.lines[line][:selColumnEnd])\n\t\t\t\t\telse:\n\t\t\t\t\t\tresult.append(self.lines[line])\n\t\treturn result\n\n\tdef removeSelection(self):\n\t\t\"\"\" Remove selection \"\"\"\n\t\tif self.selectionStart != None:\n\t\t\tself.modified = True\n\t\t\tselectionStart, selectionEnd = self.getSelection()\n\t\t\tselColumnStart, selLineStart, dummy = selectionStart\n\t\t\tselColumnEnd, selLineEnd, dummy = selectionEnd\n\t\t\tstart = self.lines[selLineStart][:selColumnStart]\n\t\t\tend = self.lines[selLineEnd ][selColumnEnd:]\n\t\t\tself.lines[selLineStart] = start + end\n\t\t\tif selLineStart < selLineEnd:\n\t\t\t\tfor line in range(selLineEnd, selLineStart,-1):\n\t\t\t\t\tdel self.lines[line]\n\t\t\tself.moveCursor(selLineStart, selColumnStart)\n\t\t\tself.hideSelection()\n\t\t\tself.view.setRefreshAll()\n\t\t\treturn True\n\t\treturn False\n\n\tdef pasteClipboard(self, selection):\n\t\t\"\"\" Paste clipboard at the cursor position \"\"\"\n\t\tif selection != []:\n\t\t\t# Split the line with insertion\n\t\t\tstart = self.lines[self.cursorLine][:self.cursorColumn]\n\t\t\tend = self.lines[self.cursorLine][self.cursorColumn:]\n\n\t\t\t# Paste the first line\n\t\t\tself.lines[self.cursorLine] = start + selection[0]\n\n\t\t\tself.cursorLine += 1\n\n\t\t\t# Insert all lines from clipboard\n\t\t\tfor line in selection[1:-1]:\n\t\t\t\tself.lines.insert(self.cursorLine, line)\n\t\t\t\tself.cursorLine += 1\n\n\t\t\t# If the last line of clipboard is not empty\n\t\t\tif len(selection[-1]) >= 1:\n\t\t\t\t# If the last line of clipboard contains new line\n\t\t\t\tif selection[-1][-1] == \"\\n\":\n\t\t\t\t\tif len(selection) > 1:\n\t\t\t\t\t\t# Add the new line\n\t\t\t\t\t\tself.lines.insert(self.cursorLine, selection[-1])\n\t\t\t\t\t\tself.cursorLine += 1\n\n\t\t\t\t\t# Add the part after the insertion\n\t\t\t\t\tself.lines.insert(self.cursorLine, end)\n\t\t\t\t\tself.cursorColumn = 0\n\t\t\t\telse:\n\t\t\t\t\tif len(selection) > 1:\n\t\t\t\t\t\tself.lines.insert(self.cursorLine, selection[-1] + end)\n\t\t\t\t\t\tself.cursorColumn = len(selection[-1])\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.cursorLine -= 1\n\t\t\t\t\t\tself.lines[self.cursorLine] += end\n\t\t\t\t\t\tself.cursorColumn = len(start) + len(selection[-1])\n\t\t\t\t\t\n\t\t\tself.moveCursor(self.cursorLine, self.cursorColumn)\n\n\tdef moveCursor(self, line, column):\n\t\t\"\"\" Move the cursor \"\"\"\n\t\tself.cursorLine = line\n\t\tself.cursorColumn = column\n\t\tself.changeColumn(0)\n\t\tself.getTabCursorColumn()\n\n\tdef copy(self):\n\t\t\"\"\" Manage copy key \"\"\"\n\t\tself.selection = self.copyClipboard()\n\n\tdef cut(self):\n\t\t\"\"\" Manage cut key \"\"\"\n\t\tself.modified = True\n\t\tself.selection = self.copyClipboard()\n\t\tself.removeSelection()\n\n\tdef paste(self):\n\t\t\"\"\" Manage paste key \"\"\"\n\t\tself.modified = True\n\t\tself.removeSelection()\n\t\tself.pasteClipboard(self.selection)\n\t\tself.view.setRefreshAll()\n\t\tself.hideSelection()\n\n\tdef changeCase(self):\n\t\t\"\"\" Change the case of selection \"\"\"\n\t\tselection = self.copyClipboard()\n\t\tif selection != []:\n\t\t\tself.modified = True\n\t\t\tselectionStart = self.selectionStart\n\t\t\tselectionEnd = self.selectionEnd\n\n\t\t\tself.removeSelection()\n\t\t\tisUpper = None\n\t\t\tfor line in selection:\n\t\t\t\tfor char in line:\n\t\t\t\t\tif useful.isupper(char):\n\t\t\t\t\t\tisUpper = True\n\t\t\t\t\t\tbreak\n\t\t\t\t\telif useful.islower(char):\n\t\t\t\t\t\tisUpper = False\n\t\t\t\t\t\tbreak\n\t\t\t\tif isUpper != None:\n\t\t\t\t\tbreak\n\t\t\tfor line in range(len(selection)):\n\t\t\t\tif isUpper:\n\t\t\t\t\tselection[line] = selection[line].lower()\n\t\t\t\telse:\n\t\t\t\t\tselection[line] = selection[line].upper()\n\t\t\tself.pasteClipboard(selection)\n\t\t\tself.view.setRefreshSelection()\n\t\t\tself.selectionStart = selectionStart\n\t\t\tself.selectionEnd = selectionEnd\n\n\tdef comment(self):\n\t\t\"\"\" Comment the selection \"\"\"\n\t\tself.modified = True\n\n\t\t# If selection\n\t\tif self.selectionStart != None:\n\t\t\tselectionStart, selectionEnd = self.getSelection()\n\t\t\tselColumnStart, selLineStart, dummy = selectionStart\n\t\t\tselColumnEnd, selLineEnd, dummy = selectionEnd\n\n\t\t\t# Add tabulation\n\t\t\tfor line in range(selLineStart, selLineEnd+1):\n\t\t\t\tif len(self.lines[line]) >= 1:\n\t\t\t\t\tif self.lines[line][0] != '#':\n\t\t\t\t\t\tself.lines[line] = \"#\" + self.lines[line]\n\t\t\t\t\telse:\n\t\t\t\t\t\tif len(self.lines[line]) >= 1:\n\t\t\t\t\t\t\tself.lines[line] = self.lines[line][1:]\n\n\t\t\t# Move the start selection to the start of first selected line\n\t\t\tself.selectionStart = [0,selLineStart, 0]\n\n\t\t\t# Get the length of last selected line\n\t\t\tlenLineEnd = len(self.lines[selLineEnd])\n\n\t\t\t# Move the end of selection at the end of line selected\n\t\t\tself.selectionEnd = [lenLineEnd-1, selLineEnd, self.getTabCursor(selLineEnd,lenLineEnd-1)]\n\t\t\tself.view.setRefreshSelection()\n\t\telse:\n\t\t\tif len(self.lines[self.cursorLine]) >= 1:\n\t\t\t\t# If nothing selected\n\t\t\t\tif self.lines[self.cursorLine][0] == \"#\":\n\t\t\t\t\tself.lines[self.cursorLine] = self.lines[self.cursorLine][1:]\n\t\t\t\t\tif self.cursorColumn > 0:\n\t\t\t\t\t\tself.changeColumn(-1)\n\t\t\t\telse:\n\t\t\t\t\tself.lines[self.cursorLine] = \"#\" + self.lines[self.cursorLine]\n\t\t\t\t\tself.changeColumn(1)\n\t\t\tself.view.setRefreshLine()\n\n\tdef indent(self, keys):\n\t\t\"\"\" Manage tabulation key \"\"\"\n\t\t# If nothing selected\n\t\tif self.selectionStart == None:\n\t\t\tself.addChar(keys)\n\t\telse:\n\t\t\tself.modified = True\n\t\t\t# Indent selection\n\t\t\tselectionStart, selectionEnd = self.getSelection()\n\t\t\tselColumnStart, selLineStart, dummy = selectionStart\n\t\t\tselColumnEnd, selLineEnd, dummy = selectionEnd\n\n\t\t\t# If a part of line selected\n\t\t\tif selLineStart == selLineEnd and not (selColumnStart == 0 and selColumnEnd == len(self.lines[selLineEnd])-1):\n\t\t\t\tself.addChar(INDENT)\n\t\t\telse:\n\t\t\t\t# If the last line selected is at beginning of line\n\t\t\t\tif selColumnEnd == 0:\n\t\t\t\t\t# This line must not be indented\n\t\t\t\t\tselLineEnd -= 1\n\n\t\t\t\t# Add tabulation\n\t\t\t\tfor line in range(selLineStart, selLineEnd+1):\n\t\t\t\t\tself.lines[line] = \"\\t\" + self.lines[line]\n\n\t\t\t\t# Move the start selection to the start of first selected line\n\t\t\t\tself.selectionStart = [0,selLineStart, 0]\n\n\t\t\t\t# If the last line selected is not at beginning of line\n\t\t\t\tif selColumnEnd > 0:\n\t\t\t\t\t# Get the length of last selected line\n\t\t\t\t\tlenLineEnd = len(self.lines[selLineEnd])\n\n\t\t\t\t\t# If the end of selection is not on the last line\n\t\t\t\t\tif selLineEnd < len(self.lines)-1:\n\t\t\t\t\t\tlenLineEnd -= 1\n\n\t\t\t\t\t# Move the end of selection at the end of line selected\n\t\t\t\t\tself.selectionEnd = [lenLineEnd, selLineEnd, self.getTabCursor(selLineEnd,lenLineEnd)]\n\t\t\t\telse:\n\t\t\t\t\t# Move the end of selection at the start of the last line selected\n\t\t\t\t\tself.selectionEnd = [0, selLineEnd+1, 0]\n\t\t\tself.view.setRefreshSelection()\n\n\tdef unindent(self, keys):\n\t\t\"\"\" Manage the unindentation key \"\"\"\n\t\t# If nothing selected\n\t\tif self.selectionStart == None:\n\t\t\tself.backspace()\n\t\telse:\n\t\t\tself.modified = True\n\n\t\t\t# Unindent selection\n\t\t\tselectionStart, selectionEnd = self.getSelection()\n\t\t\tselColumnStart, selLineStart, dummy = selectionStart\n\t\t\tselColumnEnd, selLineEnd, dummy = selectionEnd\n\n\t\t\t# If the selection is only alone line\n\t\t\tif selLineStart == selLineEnd:\n\t\t\t\tself.hideSelection()\n\t\t\telse:\n\t\t\t\t# If the last line selected is at beginning of line\n\t\t\t\tif selColumnEnd == 0:\n\t\t\t\t\t# This line must not be indented\n\t\t\t\t\tselLineEnd -= 1\n\n\t\t\t\t# Remove indentation\n\t\t\t\tfor line in range(selLineStart, selLineEnd+1):\n\t\t\t\t\tif len(self.lines[line]) >= 1:\n\t\t\t\t\t\tif self.lines[line][0] == \"\\t\" or self.lines[line][0] == \" \":\n\t\t\t\t\t\t\tself.lines[line] = self.lines[line][1:]\n\n\t\t\t\t# Move the start selection to the start of first selected line\n\t\t\t\tself.selectionStart = [0,selLineStart, 0]\n\n\t\t\t\t# If the last line selected is not at beginning of line\n\t\t\t\tif selColumnEnd > 0:\n\t\t\t\t\t# Get the length of last selected line\n\t\t\t\t\tlenLineEnd = len(self.lines[selLineEnd])\n\n\t\t\t\t\t# If the end of selection is not on the last line\n\t\t\t\t\tif selLineEnd < len(self.lines)-1:\n\t\t\t\t\t\tlenLineEnd -= 1\n\n\t\t\t\t\t# Move the end of selection at the end of line selected\n\t\t\t\t\tself.selectionEnd = [lenLineEnd, selLineEnd, self.getTabCursor(selLineEnd,lenLineEnd)]\n\t\t\t\telse:\n\t\t\t\t\t# Move the end of selection at the start of the last line selected\n\t\t\t\t\tself.selectionEnd = [0, selLineEnd+1, 0]\n\t\t\tself.view.setRefreshSelection()\n\n\tdef replace(self, old, new):\n\t\t\"\"\" Replace the selection \"\"\"\n\t\tif self.readOnly == False:\n\t\t\tselection = self.copyClipboard()\n\t\t\tif len(selection) == 1:\n\t\t\t\tif selection[0] == old:\n\t\t\t\t\tself.delete()\n\t\t\t\t\tself.insertChar(new)\n\t\t\t\t\treturn True\n\t\treturn False\n\n\tdef getCursorChar(self):\n\t\t\"\"\" Get the char on the cursor \"\"\"\n\t\ttry:\n\t\t\treturn self.lines[self.cursorLine][self.cursorColumn]\n\t\texcept:\n\t\t\treturn None\n\n\tdef moveWord(self, direction):\n\t\t\"\"\" Move the cursor to the word \"\"\"\n\t\tstate = 0\n\t\twhile self.changeColumn(direction):\n\t\t\tcurrentChar = self.getCursorChar()\n\t\t\tif currentChar == None:\n\t\t\t\tbreak\n\t\t\telif useful.ispunctuation(currentChar):\n\t\t\t\tif state == 0:\n\t\t\t\t\tstate = 2\n\t\t\t\telif state == 1:\n\t\t\t\t\tbreak\n\t\t\telif useful.isalpha(currentChar):\n\t\t\t\tif state == 0:\n\t\t\t\t\tstate = 1\n\t\t\t\telif state == 2:\n\t\t\t\t\tbreak\n\t\t\telif useful.isspace(currentChar):\n\t\t\t\tif state == 1:\n\t\t\t\t\tbreak\n\t\t\t\tif state == 2:\n\t\t\t\t\tbreak\n\n\tdef nextWord(self):\n\t\t\"\"\" Move the cursor to the next word \"\"\"\n\t\tself.hideSelection()\n\t\tself.moveWord(1)\n\t\tself.view.move()\n\n\tdef previousWord(self):\n\t\t\"\"\" Move the cursor to the previous word \"\"\"\n\t\tself.hideSelection()\n\t\tself.moveWord(-1)\n\t\tself.view.move()\n\n\tdef top(self):\n\t\t\"\"\" Move the cursor to the first line of text \"\"\"\n\t\tself.goto(1)\n\n\tdef bottom(self):\n\t\t\"\"\" Move the cursor to the last line of text \"\"\"\n\t\tself.goto(100000000000)\n\n\tdef treatChar(self, keys):\n\t\t\"\"\" Treat character entered \"\"\"\n\t\tchar = ord(keys[0][0])\n\t\tif self.readOnly is False:\n\t\t\tif char >= 0x20 and char != 0x7F:\n\t\t\t\tself.addChar(keys)\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef treatKey(self, keys):\n\t\t\"\"\" Treat keys \"\"\"\n\t\tif self.treatChar(keys) == False:\n\t\t\t# Move in the edit field\n\t\t\tif keys[0] in UP : self.arrowUp(keys)\n\t\t\telif keys[0] in DOWN: self.arrowDown(keys)\n\t\t\telif keys[0] in LEFT: self.arrowLeft(keys)\n\t\t\telif keys[0] in RIGHT: self.arrowRight(keys)\n\t\t\telif keys[0] in HOME: self.home()\n\t\t\telif keys[0] in END: self.end()\n\t\t\telif keys[0] in PAGE_UP: self.pageUp(keys)\n\t\t\telif keys[0] in PAGE_DOWN: self.pageDown(keys)\n\t\t\telif keys[0] in TOP: self.top()\n\t\t\telif keys[0] in BOTTOM: self.bottom()\n\t\t\telif keys[0] in NEXT_WORD: self.nextWord()\n\t\t\telif keys[0] in PREVIOUS_WORD: self.previousWord()\n\t\t\t# Selection the edit field\n\t\t\telif keys[0] in SELECT_UP: self.selectUp(keys)\n\t\t\telif keys[0] in SELECT_DOWN: self.selectDown(keys)\n\t\t\telif keys[0] in SELECT_RIGHT: self.selectRight(keys)\n\t\t\telif keys[0] in SELECT_LEFT: self.selectLeft(keys)\n\t\t\telif keys[0] in SELECT_HOME: self.selectHome()\n\t\t\telif keys[0] in SELECT_END: self.selectEnd()\n\t\t\telif keys[0] in SELECT_PAGE_UP: self.selectPageUp(keys)\n\t\t\telif keys[0] in SELECT_PAGE_DOWN:self.selectPageDown(keys)\n\t\t\telif keys[0] in SELECT_ALL: self.selectAll()\n\t\t\telif keys[0] in SELECT_NEXT_WORD:self.selectNextWord()\n\t\t\telif keys[0] in SELECT_PREV_WORD:self.selectPreviousWord()\n\n\t\t\t# If the edit is not in read only\n\t\t\telif self.readOnly is False:\n\t\t\t\t# Modification in the edit field\n\t\t\t\tif keys[0] in COPY: self.copy()\n\t\t\t\telif keys[0] in CUT: self.cut()\n\t\t\t\telif keys[0] in PASTE: self.paste()\n\n\t\t\t\telif keys[0] in INDENT: self.indent(keys)\n\t\t\t\telif keys[0] in UNINDENT: self.unindent(keys)\n\t\t\t\telif keys[0] in CHANGE_CASE: self.changeCase()\n\t\t\t\telif keys[0] in COMMENT: self.comment()\n\n\t\t\t\telif keys[0] in BACKSPACE: self.backspace()\n\t\t\t\telif keys[0] in DELETE: self.delete()\n\t\t\t\telif keys[0] in NEW_LINE: self.newLine()\n\t\t\t\telif keys[0] in DELETE_LINE: self.deleteLine()\n\t\t\t# else: self.addChar(keys)\n\nclass Edit:\n\t\"\"\" Class which aggregate the View and Text \"\"\"\n\tdef __init__(self, viewTop=1, viewHeight=None, readOnly=False):\n\t\t\"\"\" Constructor \"\"\"\n\t\tself.view = View(viewHeight, viewTop)\n\t\tself.text = Text(readOnly)\n\t\tself.text.setView(self.view)\n\t\tself.view.setText(self.text)\n\nclass Editor:\n\t\"\"\" Class which manage a complete editor \"\"\"\n\tdef __init__(self, filename_, readOnly=False):\n\t\t\"\"\" Constructor \"\"\"\n\t\tself.file = filename_\n\t\tself.filename = useful.split(filename_)[1]\n\t\tself.edit = Edit(readOnly=readOnly)\n\t\tself.edit.text.load(filename_)\n\t\tself.isRefreshHeader = True\n\t\tself.findText = None\n\t\tself.replaceText = None\n\t\tself.keys= []\n\t\tself.loop = None\n\n\t\tif (not useful.exists(filename_) and readOnly == True) or useful.isdir(filename_):\n\t\t\tprint(\"Cannot open '%s'\"%self.filename)\n\t\telse:\n\t\t\tself.run()\n\n\tdef refreshHeader(self):\n\t\t\"\"\" Refresh the header of editor \"\"\"\n\t\tif self.isRefreshHeader:\n\t\t\tself.edit.view.moveCursor(0, 0)\n\t\t\tfilename_ = \"File: %s\"%(self.filename)\n\t\t\tif self.edit.text.readOnly == False:\n\t\t\t\tfilename_ += \" (*)\" if self.edit.text.modified else \"\"\n\t\t\t\tend = \"Mode: %s\"%(\"Replace\" if self.edit.text.replaceMode else \"Insert\")\n\t\t\telse:\n\t\t\t\tend = \"Read only\" if self.edit.text.readOnly else \"\"\n\n\t\t\theader = \"\\x1B[7m %s%s%s \\x1B[m\"%(filename_, \" \"*(self.edit.view.width - len(filename_) - len(end)-2), end)\n\t\t\tself.edit.view.write(header)\n\t\t\tself.edit.view.moveCursor()\n\t\t\tself.isRefreshHeader = False\n\n\tdef refresh(self):\n\t\t\"\"\" Refresh the editor \"\"\"\n\t\tself.refreshHeader()\n\t\tself.edit.view.refresh()\n\n\tdef toggleMode(self):\n\t\t\"\"\" Change the replace mode \"\"\"\n\t\tif self.edit.text.replaceMode:\n\t\t\tself.edit.text.replaceMode = False\n\t\telse:\n\t\t\tself.edit.text.replaceMode = True\n\t\tself.isRefreshHeader = True\n\n\tdef save(self):\n\t\t\"\"\" Save the file edited \"\"\"\n\t\tself.edit.text.save()\n\t\tself.isRefreshHeader = True\n\n\tdef exit(self):\n\t\t\"\"\" Exit from editor \"\"\"\n\t\tself.edit.view.cls()\n\t\tif self.edit.text.modified:\n\t\t\tself.edit.view.write(\"\\nSave file '%s' (\\x1b[7mY\\x1b[m:Yes, \\x1b[7mN\\x1b[m:No, \\x1b[7mEsc\\x1b[m:Cancel) : \"%self.filename)\n\t\t\tself.edit.view.flush()\n\t\t\twhile 1:\n\t\t\t\tkey = useful.getch()\n\t\t\t\tif key == \"Y\" or key == \"y\":\n\t\t\t\t\tif self.edit.text.save():\n\t\t\t\t\t\tself.edit.view.write(\"Saved\\n\")\n\t\t\t\t\t\tself.edit.view.flush()\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.edit.view.write(\"Failed to save\\n\")\n\t\t\t\t\t\tself.edit.view.flush()\n\t\t\t\t\tself.loop = False\n\t\t\t\t\tbreak\n\t\t\t\telif key == \"N\" or key == \"n\":\n\t\t\t\t\tself.edit.view.write(\"Not saved\\n\")\n\t\t\t\t\tself.edit.view.flush()\n\t\t\t\t\tself.loop = False\n\t\t\t\t\tbreak\n\t\t\t\telif key == ESCAPE:\n\t\t\t\t\tself.edit.view.setRefreshAll()\n\t\t\t\t\tself.isRefreshHeader = True\n\t\t\t\t\tbreak\n\t\telse:\n\t\t\tself.loop = False\n\n\tdef input(self, text, help_=\"\"):\n\t\t\"\"\" Input value, used to get a line number, or text searched \"\"\"\n\t\tedit_ = Edit(viewTop=2, viewHeight=1, readOnly=False)\n\t\tedit_.view.cls()\n\t\tedit_.view.moveCursor(1,0)\n\t\tedit_.view.write(text)\n\t\tedit_.view.moveCursor(4,0)\n\t\tedit_.view.write(help_)\n\t\tresult = None\n\t\twhile 1:\n\t\t\tedit_.view.refresh()\n\t\t\tkey = self.getKey()\n\t\t\tif key[0] in NEW_LINE:\n\t\t\t\tresult = edit_.text.lines[0]\n\t\t\t\tbreak\n\t\t\telif key[0] in ESCAPE:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tedit_.text.treatKey(key)\n\t\treturn result\n\n\tdef find(self):\n\t\t\"\"\" Find a text \"\"\"\n\t\tself.findText = self.input(\"Find :\",\"\\x1B[7mEsc\\x1B[m:Abort \\x1B[7m^Left\\x1B[m,\\x1B[7m^Up\\x1B[m:Previous \\x1B[7m^Down\\x1B[m,\\x1B[7m^Right\\x1B[m:Next\")\n\t\tself.findNext()\n\t\tself.edit.view.setRefreshAll()\n\t\tself.isRefreshHeader = True\n\n\tdef replace(self):\n\t\t\"\"\" Replace a text \"\"\"\n\t\tself.findText = self.input(\"Find to replace :\",\"\\x1B[7mEsc\\x1B[m:Abort\")\n\t\tif self.findText:\n\t\t\tself.replaceText = self.input(\"Replace with :\",\"\\x1B[7mEsc\\x1B[m:Abort \\x1B[7m^Left\\x1B[m,\\x1B[7m^Up\\x1B[m:Previous \\x1B[7m^Down\\x1B[m,\\x1B[7m^Right\\x1B[m:Next \\x1B[7m^R\\x1B[m:Replace\")\n\t\t\tself.findNext()\n\n\t\tself.edit.view.setRefreshAll()\n\t\tself.isRefreshHeader = True\n\n\tdef replaceCurrent(self):\n\t\t\"\"\" Replace current \"\"\"\n\t\tif self.findText and self.replaceText:\n\t\t\tif self.edit.text.replace(self.findText, self.replaceText):\n\t\t\t\tself.findNext()\n\n\tdef findNext(self):\n\t\t\"\"\" Find next text \"\"\"\n\t\tif self.findText:\n\t\t\tself.edit.text.findNext(self.findText)\n\n\tdef findPrevious(self):\n\t\t\"\"\" Find previous text \"\"\"\n\t\tif self.findText:\n\t\t\tself.edit.text.findPrevious(self.findText)\n\n\tdef goto(self):\n\t\t\"\"\" Goto line \"\"\"\n\t\tlineNumber = self.input(\"Goto line :\",\"\\x1B[7mEsc\\x1B[m:Abort\")\n\t\ttry:\n\t\t\tlineNumber = int(lineNumber)\n\t\t\tself.edit.text.goto(int(lineNumber))\n\t\texcept:\n\t\t\tpass\n\t\tself.edit.view.setRefreshAll()\n\t\tself.isRefreshHeader = True\n\n\tdef groupKey(self):\n\t\t\"\"\" Group similar key to optimize move of cursor and edition \"\"\"\n\t\tresult = [self.keys.pop(0)]\n\t\twhile len(self.keys) > 0 and len(result) <= 10:\n\t\t\tif self.keys[0] == result[0]:\n\t\t\t\tresult.append(self.keys.pop(0))\n\t\t\telse:\n\t\t\t\tif useful.isascii(result[0]) and useful.isascii(self.keys[0]):\n\t\t\t\t\tresult.append(self.keys.pop(0))\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\treturn result\n\n\tdef getKey(self):\n\t\t\"\"\" Get a key pressed \"\"\"\n\t\tif len(self.keys) == 0:\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\tkey = useful.getch()\n\t\t\t\texcept KeyboardInterrupt:\n\t\t\t\t\tkey = \"\\x03\"\n\t\t\t\tself.keys.append(key)\n\t\t\t\tif useful.kbhit() == False or len(self.keys) > 5:\n\t\t\t\t\tbreak\n\t\treturn self.groupKey()\n\n\tdef execute(self):\n\t\t\"\"\" Execute the python script edited \"\"\"\n\t\tself.save()\n\t\tloop = True\n\t\twhile loop:\n\t\t\tself.edit.view.resetScrollRegion()\n\t\t\tself.edit.view.cls()\n\t\t\tself.edit.view.flush()\n\t\t\tstartTime = useful.ticks()\n\t\t\ttry:\n\t\t\t\tuseful.log(None)\n\t\t\t\tuseful.import_(self.filename)\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tpass\n\t\t\tendTime = useful.ticks()\n\t\t\tprint( \"\\x1B[7mTime: %d.%03d s Press enter to stop\\x1B[m\"%((endTime-startTime)/1000, (endTime-startTime)%1000))\n\t\t\twhile 1:\n\t\t\t\tkeys = self.getKey()\n\t\t\t\tif keys[0] in NEW_LINE:\n\t\t\t\t\tloop = False\n\t\t\t\t\tbreak\n\t\t\t\telif keys[0] in EXECUTE:\n\t\t\t\t\tbreak\n\t\t\t\t# else:\n\t\t\t\t\t# print(useful.dump(keys[0]))\n\t\tself.edit.view.cls()\n\t\tself.edit.view.setRefreshAll()\n\t\tself.isRefreshHeader = True\n\n\tdef run(self):\n\t\t\"\"\" Core of the editor \"\"\"\n\t\tself.edit.view.cls()\n\t\tself.edit.view.getScreenSize()\n\t\tself.loop = True\n\t\twhile(self.loop):\n\t\t\ttry:\n\t\t\t\tself.refresh()\n\t\t\t\tkeys = self.getKey()\n\t\t\t\tmodified = self.edit.text.modified\n\t\t\t\tif ord(keys[0][0]) < 0x20:\n\t\t\t\t\tif keys[0] in TOGGLE_MODE: self.toggleMode()\n\t\t\t\t\telif keys[0] in FIND: self.find()\n\t\t\t\t\telif keys[0] in REPLACE: self.replace()\n\t\t\t\t\telif keys[0] in FIND_PREVIOUS: self.findPrevious()\n\t\t\t\t\telif keys[0] in FIND_NEXT: self.findNext()\n\t\t\t\t\telif keys[0] in REPLACE_CURRENT:self.replaceCurrent()\n\t\t\t\t\telif keys[0] in EXIT: self.exit()\n\t\t\t\t\telif keys[0] in GOTO: self.goto()\n\t\t\t\t\telif keys[0] in SAVE: self.save()\n\t\t\t\t\telif keys[0] in EXECUTE: self.execute()\n\t\t\t\tself.edit.text.treatKey(keys)\n\t\t\t\tif modified != self.edit.text.modified:\n\t\t\t\t\tself.isRefreshHeader = True\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tpass\n\t\tself.edit.view.resetScrollRegion()\n\t\tself.edit.view.reset()\n\nif __name__ == \"__main__\":\n\tif len(sys.argv) > 1:\n\t\tfilename = sys.argv[1]\n\telse:\n\t\tfilename = \"editor.txt\"\n\tedit = Editor(filename, readOnly=False)\n","sub_path":"modules/lib/shell/editor.py","file_name":"editor.py","file_ext":"py","file_size_in_byte":54908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"615127767","text":"# coding: utf-8\n\"\"\"\n\n\"\"\"\n\nimport datetime\nimport typing\n\nfrom .. import db\nfrom .objects import Objects\n\n\nclass File(db.Model): # type: ignore\n __tablename__ = 'files'\n __table_args__ = (\n db.CheckConstraint(\n '(fed_id IS NOT NULL AND component_id IS NOT NULL) OR (user_id IS NOT NULL AND utc_datetime IS NOT NULL)',\n name='files_not_null_check'\n ),\n db.UniqueConstraint('fed_id', 'object_id', 'component_id', name='files_fed_id_component_id_key')\n )\n\n id = db.Column(db.Integer, primary_key=True)\n object_id = db.Column(db.Integer, db.ForeignKey(Objects.object_id_column), primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=True)\n utc_datetime = db.Column(db.DateTime, nullable=True)\n data = db.Column(db.JSON, nullable=True)\n binary_data = db.deferred(db.Column(db.LargeBinary, nullable=True))\n fed_id = db.Column(db.Integer, nullable=True)\n component_id = db.Column(db.Integer, db.ForeignKey('components.id'), nullable=True)\n uploader = db.relationship('User')\n component = db.relationship('Component')\n\n def __init__(\n self,\n file_id: int,\n object_id: int,\n user_id: typing.Optional[int],\n utc_datetime: typing.Optional[datetime.datetime] = None,\n data: typing.Optional[typing.Dict[str, typing.Any]] = None,\n binary_data: typing.Optional[bytes] = None,\n fed_id: typing.Optional[int] = None,\n component_id: typing.Optional[int] = None\n ) -> None:\n self.id = file_id\n self.object_id = object_id\n self.user_id = user_id\n if utc_datetime is None:\n utc_datetime = datetime.datetime.utcnow()\n self.utc_datetime = utc_datetime\n self.data = data\n self.binary_data = binary_data\n self.fed_id = fed_id\n self.component_id = component_id\n\n def __repr__(self) -> str:\n return '<{0}(id={1.id}, object_id={1.object_id}, user_id={1.user_id}, utc_datetime={1.utc_datetime}, data=\"{1.data}\")>'.format(type(self).__name__, self)\n","sub_path":"sampledb/models/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"390153523","text":"def solution(s):\n s = list(s)\n str = \"\"\n result = []\n for i in range(0,len(s),2):\n if i+1 < len(s):\n str = s[i]+s[i+1]\n else:\n str = s[i]+'_'\n result.append(str)\n return result\n \n","sub_path":"Split Strings/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"546532904","text":"def convert(slot):\n stock= {\n \t\t\t'AAPL' \t: ['Apple', 'apple', 'appl'],\n \t\t\t'GOOGL'\t: ['Google', 'google', 'googl'],\n \t\t\t'FB'\t: ['Facebook', 'facebook', 'fb'],\n \t\t\t'AMZN'\t: ['Amazon', 'amazon', 'amzn'],\n \t\t\t'MSFT'\t: ['Microsoft', 'microsoft','msft'],\n \t\t\t'BABA'\t: ['Alibaba', 'alibaba', 'baba'],\n \t\t\t'AMD'\t: ['amd', 'Amd'],\n \t\t\t'INTC'\t: ['Intel', 'intel', 'intc'],\n \t\t\t'TSLA'\t: ['Tesla', 'tesla', 'tsla'],\n \t\t\t'DIA'\t: ['Dow Jones','Dow', 'dow', 'dow jones', 'dia'],\n \t\t\t'SPY'\t: ['Spider', 'spider', 'spy'],\n \t\t\t'TWTR'\t: ['Twitter', 'twitter', 'twtr']\n \t\t}\n if slot in stock:\n return slot\n else:\n slots = [i for i in stock for j in stock[i] if j == slot]\n return slots[0]\n \n #[stock[i] for i in stock.keys()]\n\n\n","sub_path":"utils/NametoSymb.py","file_name":"NametoSymb.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"254388894","text":"from flask import Flask, request, jsonify, make_response\n\nimport pandas\nfrom intake import open_catalog\nimport re\napp = Flask(__name__)\n\n@app.route('/')\ndef hello_world():\n return 'Hello, World Noushin!'\n\n@app.route(\"/tag_based\", methods=[\"POST\"])\ndef datasets_per_tag():\n\tres = \"\"\n\t\n\tif not request.json:\n\t\treturn make_response(jsonify({\n \"code\": 400,\n \"message\": \"Input should be a json\"\n\t\t}), 400)\n\ttry:\n\t\tbody = request.json\n\t\ttag = body['tag']\t\t\n\t\tdf = pandas.read_excel('tags-datasets.xlsx')\t\t\n\t\tmy_list = df[lambda x: ~pandas.isnull(x[tag])][tag]\t\t\n\t\tpattern = re.compile(r'\\s+')\n\t\ts = my_list.to_string(index=False).replace('\\n',',')\n\t\t#return(print(pattern.sub(' ', s)))\n\t\tres = pattern.sub(' ', s)\n\texcept Exception as inst:\n\t\tprint(res)\n\t\treturn make_response(jsonify({\n \"code\": 400,\n \"message\": \"Failed to parse input json\"\n\t\t}), 400)\n\treturn make_response(jsonify({\n \"code\": 200,\n \"result\": res,\n \"message\": \"success\"\n\t\t}), 200)\n\n\t\t\n@app.route('/all_data')\ndef all_data():\n\tcatalog_name='https://raw.githubusercontent.com/kpegion/COLA-DATASETS-CATALOG/gh-pages/intake-catalogs/'\n\tcat = open_catalog(catalog_name+'master.yaml')\n\tresult = []\n\tqueue = []\n\tnew_name = \"\"\n\tdef my_recursive(catalog):\n\t\tfor i in list(catalog):\n\t\t\tqueue.append(i)\n\n\t\t\twhile queue:\n\t\t\t\tlevel_size = len(queue)\n\t\t\t\tfor _ in range(level_size):\n\t\t\t\t\tcurrent = queue.pop()\n\t\t\t\t\tresult.append(current)\n\t\t\t\t\tnew_name = catalog_name+current.lower()+'.yaml'\n\t\t\t\t\tsubcat=open_catalog(new_name)\n\t\t\t\t\tfor j in list(subcat):\n\t\t\t\t\t\tif j not in ('netcdf','example'):\n\t\t\t\t\t\t\tqueue.append(j)\n\t\treturn jsonify(result)\n\t\tprint(\"66666666666666666666666\")\n\treturn(my_recursive(cat))\n\n\nif __name__ == \"__main__\":\n app.config['JSON_AS_ASCII'] = False\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"262777572","text":"import pymssql\nimport time\n\nimport requests\nfrom fake_useragent import UserAgent\nfrom bs4 import BeautifulSoup\nua = UserAgent()\nheaders = {\n 'User-Agent':ua.random,\n}\ndef test_url(url):\n ''' 判断是否返回html '''\n while True:\n try:\n\n html = requests.get(url=url, headers=headers)\n html = html.content.decode('gbk')\n return html\n except:\n print(u'HTTP请求失败!!!正在准备重发。。。' )\n time.sleep(2)\n continue\ndef get_html(day,hy):\n day = day.replace('-','')\n url = 'http://new.czce.com.cn/portal/DFSStaticFiles/Future/%s/%s/FutureDataDaily%s.htm' % (day[0:4], day, hy)\n html = test_url(url)\n return html\ndef parse_html(html):\n soup = BeautifulSoup(html,'lxml')\n table = soup.find('table',id=\"senfe\")\n tr_list = table.find_all('tr')\n tr_809 = None\n tr_901 = None\n for tr in tr_list:\n if '809' in tr.td.text:\n tr_809 = tr\n if '901' in tr.td.text:\n tr_901 = tr\n return tr_809,tr_901\ndef yield_item(str):\n td_list = str.find_all('td')\n items = {\n 'spj':int(eval(td_list[5].text.replace(',',''))),\n 'jrjsj':int(eval(td_list[6].text.replace(',',''))),\n 'cjl':int(eval(td_list[9].text.replace(',',''))),\n 'kp':int(eval(td_list[10].text.replace(',',''))),\n }\n return items\ndef main_qhsc(day,yesday):\n '''期货市场'''\n qhsc_list = []\n for hy in ['MA','TA']:\n qhsc = {}\n html = get_html(day,hy)\n tr_809, tr_901 = parse_html(html)\n tr_809 = yield_item(tr_809)\n tr_901 = yield_item(tr_901)\n spj = tr_809['spj'] # 收盘价\n jc = spj - tr_901['spj'] # 价差\n day_zjlx = (tr_809['jrjsj']*tr_809['kp'])\n\n html = get_html(yesday,hy)\n tr_809, tr_901 = parse_html(html)\n tr_809 = yield_item(tr_809)\n tr_901 = yield_item(tr_901)\n zd = (spj-tr_809['spj'])/tr_809['spj'] # 涨跌\n jc_zd = jc - (spj - tr_901['spj']) # 价差涨跌\n yesday_zjlx = (tr_809['jrjsj']*tr_809['kp'])\n zjlx = (day_zjlx-yesday_zjlx)*10*2*0.05 # 资金流向\n qhsc['spj']=spj\n qhsc['jc']=jc\n qhsc['zd']='%.7f'%zd\n qhsc['jc_zd']=jc_zd\n qhsc['zjlx']=zjlx\n qhsc_list.append(qhsc)\n return qhsc_list\ndef get_two_html(day):\n day = day.replace('-','')\n url = 'http://new.czce.com.cn/portal/DFSStaticFiles/Future/%s/%s/FutureDataHolding.htm'%(day[0:4], day)\n html = test_url(url)\n return html\ndef parse_two_html(html,hy):\n soup = BeautifulSoup(html,'lxml')\n table = soup.find('table',id=\"senfe\")\n tr_list = table.find_all('tr')\n count = 0\n tag = None\n for i in tr_list:\n if i.find('td',class_='td-left'):\n if hy in i.find('td',class_='td-left').text:\n tag = count\n count+=1\n for i in tr_list[tag:tag+23]:\n if '合计' in i.find('td').text:\n key = i.find_all('td')\n item = {\n \"dd\": key[5].text, # 多单\n \"dd_zj\": key[6].text, # 多单增减\n \"kd\": key[8].text, # 空单\n \"kd_zj\": key[9].text,# 空单增减\n \"jcc\":int(key[5].text)-int(key[8].text)# 净持仓\n }\n return item\ndef main_jcc(day):\n html = get_two_html(day)\n jcc_list = []\n for hy in ['甲醇MA','PTA']:\n item = parse_two_html(html,hy)\n jcc_list.append(item)\n return jcc_list\ndef read_mssql(day,targetId):\n con = pymssql.connect(\n server=\"172.0.10.59\",\n user = 'gt',\n password=\"server123!@#\",\n database = 'GTData'\n )\n try:\n cur = con.cursor()\n sql = 'select top(1) Value from TargetDatas where time<=%s and TargetId=%s order by time desc'\n cur.execute(sql, (day, targetId))\n result = cur.fetchall()[0][0]\n # result = result.encode('latin-1').decode('gbk')\n return result\n except:\n return 0\n\ndef main_jicha(day,yesday):\n xh_xz = read_mssql(day,'S5422065')\n xh_zj = xh_xz - read_mssql(yesday,'S5422065')\n qh_xz = read_mssql(day,'s5435640')\n qh_zj = qh_xz - read_mssql(yesday,'s5435640')\n return [int(xh_xz),int(xh_zj),int(qh_xz),int(qh_zj)]\ndef main_xhjg(day,yesday):\n # 甲醇\n jc_hd = read_mssql(day,'s5422062')\n jc_hd_zd = jc_hd - read_mssql(yesday,'s5422062')\n\n jc_hn = read_mssql(day,'s5422065')\n jc_hn_zd = jc_hn - read_mssql(yesday,'s5422065')\n\n jc_hb = read_mssql(day,'s5422037')\n jc_hb_zd = jc_hb - read_mssql(yesday,'s5422037')\n\n jc_zg = read_mssql(day,'s5416976')\n jc_zg_zd = jc_zg - read_mssql(yesday,'s5416976')\n\n jc_dny = read_mssql(day,'s5416979')\n jc_dny_zd = jc_dny - read_mssql(yesday,'s5416979')\n jc_item = {\n 'jc_hd':int(jc_hd),\n 'jc_hd_zd':int(jc_hd_zd),\n 'jc_hn':int(jc_hn),\n 'jc_hn_zd':int(jc_hn_zd),\n 'jc_hb':int(jc_hb),\n 'jc_hb_zd':int(jc_hb_zd),\n 'jc_zg':int(jc_zg),\n 'jc_zg_zd':int(jc_zg_zd),\n 'jc_dny':int(jc_dny),\n 'jc_dny_zd':int(jc_dny_zd),\n }\n # PTA\n pta_hd = read_mssql(day,'s5419570')\n pta_hd_zd = pta_hd - read_mssql(yesday,'s5419570')\n\n pta_hn = read_mssql(day,'s5419568')\n pta_hn_zd = pta_hn - read_mssql(yesday,'s5419568')\n\n pta_hb = read_mssql(day,'s5419565')\n pta_hb_zd = pta_hb - read_mssql(yesday,'s5419565')\n\n pta_zg = read_mssql(day,'s5435641')\n pta_zg_zd = pta_zg - read_mssql(yesday,'s5435641')\n\n pta_dny = read_mssql(day,'s5435640')\n pta_dny_zd = pta_dny - read_mssql(yesday,'s5435640')\n pta_item = {\n 'pta_hd':int(pta_hd),\n 'pta_hd_zd':int(pta_hd_zd),\n 'pta_hn':int(pta_hn),\n 'pta_hn_zd':int(pta_hn_zd),\n 'pta_hb':int(pta_hb),\n 'pta_hb_zd':int(pta_hb_zd),\n 'pta_zg':int(pta_zg),\n 'pta_zg_zd':int(pta_zg_zd),\n 'pta_dny':int(pta_dny),\n 'pta_dny_zd':int(pta_dny_zd),\n }\n return jc_item,pta_item\n\ndef main_yljg(day,yesday):\n dlm = read_mssql(day,'s5101386')\n dlm_zd = dlm - read_mssql(yesday,'s5101386')\n\n syq = read_mssql(day,'s5423921')\n syq_zd = syq - read_mssql(yesday,'s5423921')\n\n px = read_mssql(day,'s5432004')\n px_zd = px - read_mssql(yesday,'s5432004')\n return int(dlm),int(dlm_zd),int(syq),int(syq_zd),int(px),int(px_zd)\ndef main_kgl(day,yesday):\n pta = read_mssql(day,'s5417017')\n pta_zj = pta - read_mssql(yesday,'s5417017')\n\n jj = read_mssql(day,'s5417018')\n jj_zj = jj - read_mssql(yesday,'s5417018')\n\n zj = read_mssql(day,'s5417019')\n zj_zj = zj - read_mssql(yesday,'s5417019')\n\n return int(pta),int(pta_zj),int(jj),int(jj_zj),int(zj),int(zj_zj)\ndef main_xycp(day,yesday):\n hg = read_mssql(day,'s5417019')\n hg_zj = hg - read_mssql(yesday,'s5417019')\n\n gt = read_mssql(day,'s5417019')\n gt_zj = gt - read_mssql(yesday,'s5417019')\n\n dq = read_mssql(day,'s5417019')\n dq_zj = dq - read_mssql(yesday,'s5417019')\n\n qp = read_mssql(day,'s5417019')\n qp_zj = qp - read_mssql(yesday,'s5417019')\n\n return int(hg), int(hg_zj), int(gt), int(gt_zj), int(dq), int(dq_zj), int(qp), int(qp_zj)\ndef main():\n day = '2018-07-26'\n yesday = '2018-07-25'\n # qhsc_list = main_qhsc(day,yesday)\n # jcc_list = main_jcc(day)\n # jicha_list = main_jicha(day,yesday)\n # xhjg_list = main_xhjg(day,yesday)\n # yljg_list = main_yljg(day, yesday)\n # kgl_list = main_kgl(day,yesday)\n xycp_list = main_xycp(day,yesday)\n print(xycp_list)\nif __name__ == '__main__':\n main()","sub_path":"WorksZhang/pingzhong/jiachun/insert_table.py","file_name":"insert_table.py","file_ext":"py","file_size_in_byte":7586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"63892725","text":"\nimport tensorflow as tf\nimport pickle\nimport os\nfrom pandas import read_csv, DataFrame \nimport numpy as np\nfrom scipy.misc.pilutil import imresize\n\nCATEGORIES = [\"walking\", \"jogging\", \"running\", \"boxing\", \"handwaving\", \"handclapping\"]\nDATASET_DIR = \"../../data\"\n\ndef split_data(data ,dataframe):\n\n Name = dataframe.columns.get_loc(\"Filename\")\n start_1= dataframe.columns.get_loc(\"start_1\")\n end_1= dataframe.columns.get_loc(\"end_1\")\n start_2= dataframe.columns.get_loc(\"start_2\")\n end_2= dataframe.columns.get_loc(\"end_2\")\n start_3= dataframe.columns.get_loc(\"start_3\")\n end_3= dataframe.columns.get_loc(\"end_3\")\n start_4= dataframe.columns.get_loc(\"start_4\")\n end_4= dataframe.columns.get_loc(\"end_4\")\n FramesOFIntrest = dataframe.values\n count=0\n features = []\n labels = []\n for ex in data:\n\n for i in range(FramesOFIntrest.shape[0]):\n \n if(FramesOFIntrest[i,Name] in ex[\"filename\"] ):\n count+=1\n\n if(not (np.isnan(FramesOFIntrest[i,start_1]) or np.isnan(FramesOFIntrest[i,end_1]))):\n n = int(FramesOFIntrest[i,start_1]-1) \n while n>> tokenize('Bob dropped the apple. Where is the apple?')\n ['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']\n '''\n return [x.strip() for x in re.split('(\\W+)?', sent) if x.strip()]\n\n\nfrom mc_data_utils import vectorize_data, sent_to_tokens, para_to_tokens\n\ndef decode(index):\n return decode_dict.get(index, 'unknown')\n\n\ndef process_data(sentences, question, choices):\n print('sentences', sentences)\n #sent_t = para_to_tokens(sentences)\n sent_t = [sent_to_tokens(tmp) for tmp in sentences]\n print('sent_t', sent_t)\n #sent_t = [filter(lambda x: x != \".\", s) for s in sent_t]\n\n q_t = sent_to_tokens(question)\n #if q_t[-1] == \"?\":\n # q_t = q_t[:-1]\n\n choices_t = choices.split('|')\n if len(choices_t) == 2:\n choices_t.append('')\n choices_t.append('')\n if len(choices_t) == 3:\n choices_t.append('')\n c_t = [sent_to_tokens(tmp) for tmp in choices_t]\n\n data = [(sent_t, q_t, c_t,'0' )]\n\n testS, testQ, testAS, testL = vectorize_data(data, vocab_data['w_idx'], vocab_data['sentence_size'], vocab_data['memory_size'], vocab_data['answer_size'])\n\n return testS, testQ, testAS, testL\n","sub_path":"mc_web-demo/server/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"267112953","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport pickle\nimport json\n\n\nclass paytm:\n def __init__(self):\n self.s = requests.session()\n self.s.headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'}\n # self.s.get('https://paytm.com/')\n\n def login(self, account_id, password, load_pickle=False):\n def dump_session():\n with open('paytm_session.pkl', 'wb') as f:\n pickle.dump(self.s, f)\n\n def load_session():\n with open('paytm_session.pkl', 'rb') as f:\n self.s = pickle.load(f)\n\n def get_authState(st):\n k = re.findall('authState : \\'\\w+-\\w+-\\w+-\\w+-\\w+', st)[0]\n if len(k) > 0:\n k = k.replace('authState : \\'', '')\n return k\n\n def get_otp_auth(st):\n k = re.findall('otpLoginState : \\'\\w+-\\w+-\\w+-\\w+-\\w+', st)[0]\n if len(k) > 0:\n k = k.replace('otpLoginState : \\'', '')\n return k\n if load_pickle:\n return load_session()\n res = self.s.get(\n 'https://accounts.paytm.com/oauth2/authorize?theme=mp-web&redirect_uri=https%3A%2F%2Fpaytm.com%2Fv1%2Fapi%2Fauthresponse&is_verification_excluded=false&client_id=paytm-web-secure&type=web_server&scope=paytm&response_type=code#/login')\n\n def login_sec(auth):\n data = {'hidden': '', 'fakeusernameremembered': '', 'fakepasswordremembered': '', 'username': account_id,\n 'password': password, 'AUTH_STATE': auth}\n res = self.s.post(\n 'https://accounts.paytm.com/oauth2/authorize?client_id=paytm-web-secure&scope=paytm&response_type=code&redirect_uri=https://paytm.com/v1/api/authresponse&theme=mp-web&state=null&is_verification_excluded=false&isSignup=true',\n data=data)\n if res.status_code == 303:\n dump_session()\n return True\n if res.status_code == 200:\n otp_auth = get_otp_auth(res.text)\n otp = input('Please Enter the Otp Sent To you. :')\n data = {'otp': otp, 'state': otp_auth}\n data = json.dumps(data)\n otp_res = self.s.post('https://accounts.paytm.com/login/validate/otp', data=data)\n with open('otp.txt', 'w') as f:\n f.write(otp_res.text)\n if otp_res.status_code == 200:\n if self.s.get(otp_res.json().get('redirectUri')).status_code == 200:\n print('You Have Successfully Logged in.')\n dump_session()\n return True\n\n if res.status_code == 200:\n auth_state = get_authState(res.text)\n login_sec(auth_state)\n\n def get_wallate_transactions(self):\n res = self.s.get('https://paytm.com/v1/api/wallet/txnhistory?page_size=100&page_number=0&channel=web&child_site_id=1&site_id=1&version=2')\n if res.status_code == 200:\n return res.json()\np = paytm()\np.login(8461034077, 'Tsaongaf@paytm8', load_pickle=True)\nprint(p.s.get(\n 'https://paytm.com/v1/api/wallet/customuserbalance?channel=web&child_site_id=1&site_id=1&version=2&detail=1&service=1'))\nk = p.get_wallate_transactions()\nprint(k)","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":3370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"60356665","text":"#! /usr/bin/env python\nimport pygame\nimport pygame.locals as pgl\nimport oculusvr as ovr\n\nfrom RiftApp import RiftApp\nfrom cgkit.cgtypes import mat4, vec3\nfrom OpenGL.GL import *\n\ndef draw_color_cube(size=1.0):\n p = size / 2.0\n n = -p\n glBegin(GL_QUADS)\n\n # front\n glColor3f(1, 1, 0)\n glVertex3f(n, n, n)\n glVertex3f(p, n, n)\n glVertex3f(p, p, n)\n glVertex3f(n, p, n)\n # back\n glColor3f(0.2, 0.2, 1)\n glVertex3f(n, n, p)\n glVertex3f(p, n, p)\n glVertex3f(p, p, p)\n glVertex3f(n, p, p)\n # right\n glColor3f(1, 0, 0)\n glVertex3f(p, n, n)\n glVertex3f(p, n, p)\n glVertex3f(p, p, p)\n glVertex3f(p, p, n)\n # left\n glColor3f(0, 1, 1)\n glVertex3f(n, n, n)\n glVertex3f(n, n, p)\n glVertex3f(n, p, p)\n glVertex3f(n, p, n)\n # top\n glColor3f(0, 1, 0)\n glVertex3f(n, p, n)\n glVertex3f(p, p, n)\n glVertex3f(p, p, p)\n glVertex3f(n, p, p)\n # bottom\n glColor3f(1, 0, 1)\n glVertex3f(n, n, n)\n glVertex3f(p, n, n)\n glVertex3f(p, n, p)\n glVertex3f(n, n, p)\n glEnd()\n\n\nclass RiftDemo(RiftApp):\n def __init__(self):\n RiftApp.__init__(self)\n self.cube_size = self.hmd.get_float(\n ovr.OVR_KEY_IPD, ovr.OVR_DEFAULT_IPD)\n self.reset_camera()\n \n def reset_camera(self):\n self.camera = mat4(1.0)\n self.camera.translate(vec3(0, 0, 0.2))\n\n def recompose_camera(self):\n (tr, rot, sc) = self.camera.decompose()\n self.camera = mat4(1.0)\n self.camera.translate(tr)\n self.camera = self.camera * rot\n \n def init_gl(self):\n RiftApp.init_gl(self)\n glEnable(GL_DEPTH_TEST)\n glClearColor(0.1, 0.1, 0.1, 1)\n\n def update(self):\n RiftApp.update(self)\n pressed = pygame.key.get_pressed()\n\n if pressed[pgl.K_r]:\n self.reset_camera()\n\n rotation = 0.0\n \n if pressed[pgl.K_q]:\n rotation = +1.0\n if pressed[pgl.K_e]:\n rotation = -1.0\n if (rotation != 0.0):\n self.camera = self.camera * \\\n mat4.rotation(rotation * 0.01, vec3(0, 1, 0))\n self.recompose_camera()\n \n # Modify direction vectors for key presses\n translation = vec3()\n if pressed[pgl.K_r]:\n self.hmd.recenter_pose()\n if pressed[pgl.K_w]:\n translation.z = -1.0\n elif pressed[pgl.K_s]:\n translation.z = +1.0\n if pressed[pgl.K_a]:\n translation.x = -1.0\n elif pressed[pgl.K_d]:\n translation.x = +1.0\n if (vec3.length(translation) > 0.1):\n translation = self.camera.getMat3() * (translation * 0.005)\n self.camera.translate(translation)\n self.recompose_camera()\n\n\n def render_scene(self):\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n # apply the camera position\n cameraview = self.eyeview * self.camera \n glMatrixMode(GL_MODELVIEW)\n glLoadMatrixf(cameraview.inverse().toList())\n\n glMultMatrixf(self.camera.inverse().toList())\n draw_color_cube(self.cube_size)\n\n\nRiftDemo().run();\n\n\n","sub_path":"RiftDemo.py","file_name":"RiftDemo.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"351231187","text":"#You are taking part in an Escape Room challenge designed specifically for programmers. \n#In your efforts to find a clue, you've found a binary code written on the wall behind a vase, \n#and realized that it must be an encrypted message. \n#After some thought, your first guess is that each consecutive 8 bits of the code stand for the character with the corresponding extended ASCII code.\n\n#Assuming that your hunch is correct, decode the message.\n\n#Example\n\n#For code = \"010010000110010101101100011011000110111100100001\", the output should be\n#messageFromBinaryCode(code) = \"Hello!\".\n\n#The first 8 characters of the code are 01001000, which is 72 in the binary numeral system. \n#72 stands for H in the ASCII-table, so the first letter is H.\n#Other letters can be obtained in the same manner.\n\ndef messageFromBinaryCode(code):\n\n\t#List\n\tlista = list(code)\n\t#Longitud\n\tlongitud = len(lista)\n\t#Number of characters\n\tnum = int(longitud/8)\n\t#Loop over the first 8bits\n\teight = 8\n\tword = []\n\tfor i in range(num):\n\t\tprint(\"From ind: \",i*eight, 'to: ',i*eight + 8)\n\t\tnewWord = lista[(i*eight):(i*eight+8)]\n\t\tprint( 'Binary: ',''.join( newWord ) )\n\t\tdecimal = sum( [ int(newWord[j])*2**(7-j) for j in range(0,8) ])\n\t\tprint (\"To decimal: \", decimal )\n\t\tword.append(chr(decimal))\n\t\tprint(\"To ascii: \",chr(decimal) )\n\n\tprint(\"The word is :\", ''.join(word))\n\treturn ''.join(word)\n\t\t\n\n","sub_path":"messageFromBinaryCode.py","file_name":"messageFromBinaryCode.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"265235099","text":"import time\nfrom functools import wraps\n\nimport logging\n\nlogging.basicConfig(level=logging.INFO, format=\"%(levelname)s: %(message)s\")\nlogger = logging.getLogger(__name__)\n\n\ndef log_execution(function):\n @wraps(function)\n def wrapped(*args, **kwargs):\n logger.info(\"started execution of %s\", function.__qualname__)\n ret = function(*kwargs, **kwargs)\n logger.info(\"ended execution of %s\", function.__qualname__)\n return ret\n\n return wrapped\n\n\ndef measure_time(function):\n @wraps(function)\n def wrapped(*args, **kwargs):\n start_time = time.time()\n result = function(*args, **kwargs)\n\n logger.info(\n \"function %s took %.2f\",\n function.__qualname__,\n time.time() - start_time,\n )\n return result\n\n return wrapped\n\n\n\ndef operation():\n time.sleep(3)\n logger.info(\"running operation...\")\n return 33\n\nlog_execution(measure_time(operation))()\nlogger.info(\"-------\")\ntime.sleep(1)\n\nmeasure_time(log_execution(operation))()","sub_path":"ch05/decorator_SoC_3.py","file_name":"decorator_SoC_3.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"37446250","text":"##################################################\n# This script copies a file from one server to a list of servers\n##################################################\n# Author: {Suhas Srivats Subburathinam}\n# Copyright: Copyright {2018}, {Project: Copy file from one server to another}\n# Credits: [{credit_list}]\n# License: {MIT}\n# Version: {mayor}.{minor}.{rel}\n# Mmaintainer: {suhas srivats}\n# Email: {contact_email}\n# Status: {dev_status}\n# Example: python2.6 copy_file.py\n##################################################\n\nimport subprocess\nimport os\n\n# Open a file with a list of servers\nwith open('/tmp/servers.txt', 'r') as servers:\n for server in servers:\n # Login to each server\n server_path = 'root@' + str(server).strip('\\r\\n') + ':/root/'\n # Copy that file to '/root/.rhosts' location in the destination server\n p = subprocess.Popen([\"scp\", \"/tmp/.rhosts\", server_path])\n sts = os.waitpid(p.pid, 0)\n","sub_path":"copy_file.py","file_name":"copy_file.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"637045354","text":"# coding: utf-8\n\nimport chainer\nimport chainer.links as L\nfrom chainer_compiler.elichika import testtools\n\n# Network definition\n\n\nclass A(chainer.Chain):\n\n def __init__(self, n_layer, n_in, n_out):\n super(A, self).__init__()\n with self.init_scope():\n self.l1 = L.NStepLSTM(n_layer, n_in, n_out, 0.1)\n\n def forward(self, x):\n hy, cs, ys = self.l1(None, None, x)\n return hy, cs, ys\n # return hy,cs\n\n\n# ======================================\ndef main():\n import numpy as np\n np.random.seed(314)\n\n n_batch = 7\n n_layer = 3\n n_in = 8\n n_hidden = 5\n n_maxlen = 10\n\n # n_batch = 2\n # n_layer = 2\n # n_in = 2\n # n_hidden = 4\n\n model = A(n_layer, n_in, n_hidden)\n\n # ilens = np.random.randint(1,n_maxlen,size=n_batch)\n ilens = [t for t in range(n_batch)]\n xs = [np.random.rand(i+4, n_in).astype(np.float32) for i in ilens]\n testtools.generate_testcase(model, [xs])\n\nif __name__ == '__main__':\n main()","sub_path":"testcases/elichika_tests/node/Links/NStepLSTM.py","file_name":"NStepLSTM.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"400571263","text":"#*****************************************************************************\n# WIKIDB POSTGRES DB INTERFACE\n#*****************************************************************************\n#\n# Part 1: Creation operations\n# Part 2: Retrieval operations\n# Part 3: Status Operations\n# Part 4: Maintenance Operations\n# Part 5: Deletion operations\n# Part 6: Miscellaneous Operations\n#\n#*****************************************************************************\n\n# DUMPING AND RESTORING THE DATABASE\n#\n# pg_dump -h localhost -p 5432 -U postgres -F c -b -v -f \"wikidb.dump\" wikidb\n#\n# NB: Delete and recreate the wikidb before doing a restore.\n#\n# pg_restore -h localhost -p 5432 -U postgres -d wikidb -v \"wikidb.dump\"\n#\n#*****************************************************************************\n\nimport psycopg2\nimport string\nfrom multiprocessing import Process, Manager, freeze_support\nimport pandas as pd\nimport pprint\nfrom functools import reduce\n\nimport processes\n\n#******************************************************************************\n# TABLE CREATION: Vertices, Eges, Root Vertices\n#******************************************************************************\n\n#*****************************************************************************\n# Part 1: Table and Index Creation\n#*****************************************************************************\n\n#------------------------------------------------------------------------------\n# Table Names\n#------------------------------------------------------------------------------\n\nvertex_table = 'wiki_vertices'\nroot_vertices_table='wiki_root_vertices'\nedge_table_prefix = 'wiki_edges_'\n\n#------------------------------------------------------------------------------\n\n# There are about 50,000,000 edges and we divide them amongst 37 tabbles based\n# the first letter of te edge's source vertex name.\n\n# Use both letters and digits given the number of digit based topic names.\n\n# TODO: Convert to constant.\n\ndef table_suffixes():\n return list(string.ascii_lowercase) + list(map(str, [0,1,2,3,4,5,6,7,8,9]))\n\n#------------------------------------------------------------------------------\n\ndef source_name_letter (name):\n letter = name[0].lower()\n if letter in table_suffixes():\n return letter\n else:\n return 'z'\n\n#------------------------------------------------------------------------------\n\nDEFAULT_EDGE_TYPE = 'related'\n\n#------------------------------------------------------------------------------\n# DB Connection\n#------------------------------------------------------------------------------\n\n# TODO: Move connection parameters into environment variables.\n\ndef wikidb_connect():\n conn = psycopg2.connect(\"dbname='wikidb' user='postgres' password='postgres' host='localhost'\")\n return conn\n\n#------------------------------------------------------------------------------\n\ndef ensure_connection(conn=None):\n if conn == None:\n conn = wikidb_connect()\n return(conn)\n\n#------------------------------------------------------------------------------\n# CSV File Row to DB Row String\n#------------------------------------------------------------------------------\n\n# This returns a string suitable for insert query with strings escaped.\n\ndef row_to_str (row):\n row_str = \"\"\n for elmt in row:\n if type(elmt) == str:\n row_str += \"'\" + elmt + \"',\"\n else:\n row_str += str(elmt) + \",\"\n return \"(\" + row_str[:-1] + \")\"\n\n#------------------------------------------------------------------------------\n\n# Splits the list into even pieces.\n\ndef split_list (a, n):\n k, m = divmod(len(a), n)\n return list (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))\n\n#------------------------------------------------------------------------------\n# Vertex Table Creation\n#------------------------------------------------------------------------------\n\ncreate_vertices_str = \"CREATE TABLE \" + vertex_table + \" (id serial NOT NULL, \" + \\\n \"name character varying, \" + \\\n \"weight integer DEFAULT 0, \" + \\\n \"CONSTRAINT vertex_id PRIMARY KEY (id));\"\n\ndef create_vertices_table(conn):\n cur = conn.cursor()\n print (\"Creating Wikipedia Vertices Table...\")\n cur.execute(\"DROP TABLE IF EXISTS \" + vertex_table + \";\")\n cur.execute(create_vertices_str)\n\n#------------------------------------------------------------------------------\n\ndef create_vertices_table_indexes(conn):\n cur = conn.cursor()\n cur.execute(\"CREATE INDEX ON \" + vertex_table + \" ((lower(name)));\")\n conn.commit()\n\n#------------------------------------------------------------------------------\n# Wiki DB Root Vertices Table Creation\n#------------------------------------------------------------------------------\n\n# NB: The priimary key will be a vertex id from the vertex table.\n\nroot_vertices_str = \"CREATE TABLE \" + root_vertices_table + \\\n \"(id integer NOT NULL, \" + \\\n \"name character varying, \" + \\\n \"weight integer DEFAULT 0, \" + \\\n \"indegree integer DEFAULT 0, \" + \\\n \"outdegree integer DEFAULT 0, \" + \\\n \"CONSTRAINT root_id PRIMARY KEY (id));\"\n\n#------------------------------------------------------------------------------\n\ndef create_root_vertices_table(conn):\n cur = conn.cursor()\n print (\"Creating Wikipedia Vertices Table...\")\n cur.execute(\"DROP TABLE IF EXISTS \" + root_vertices_table + \";\")\n cur.execute(root_vertices_str)\n conn.commit()\n\n#------------------------------------------------------------------------------\n\ndef create_root_vertices_table_indexes(conn):\n cur = conn.cursor()\n cur.execute(\"CREATE INDEX ON \" + root_vertices_table + \" ((lower(name)));\")\n conn.commit()\n\n#------------------------------------------------------------------------------\n \ndef create_root_vertices_tables(conn=None):\n conn = ensure_connection(conn)\n create_root_vertices_table(conn)\n create_root_vertices_table_indexes(conn)\n\n#------------------------------------------------------------------------------\n\nroot_fields = \"(id, name, weight, outdegree)\"\n \ndef add_root_vertex(row, conn=None, commit=False):\n conn = ensure_connection(conn)\n cur = conn.cursor()\n query = \"INSERT INTO \" + root_vertices_table + \" \" + root_fields + \" VALUES \" + row_to_str(row) + \";\"\n try:\n cur.execute(query)\n if commit == True:\n conn.commit()\n except Exception as err:\n print (\"Error: \" + str(err))\n\n#------------------------------------------------------------------------------\n# Dumpings Tables as CSVs\n#------------------------------------------------------------------------------\n \ndef save_vertex_table (pathname, conn=None):\n conn = ensure_connection(conn)\n cur = conn.cursor()\n cur.execute(\"copy (SELECT * FROM wiki_vertices) to \" + \"'\" + pathname + \"' with csv\")\n\n#------------------------------------------------------------------------------\n\ndef save_edge_tables(directory, conn=None):\n None\n \n#------------------------------------------------------------------------------\n# Wiki DB Edge Tables Creation\n#------------------------------------------------------------------------------\n\ndef edge_table_name(letter):\n if letter in table_suffixes():\n return edge_table_prefix + letter\n else:\n return edge_table_prefix + '0'\n\n#------------------------------------------------------------------------------\n\ndef edge_tables (suffixes=table_suffixes()):\n return [edge_table_name(x) for x in suffixes]\n\n#------------------------------------------------------------------------------\n\ndef create_edge_table_str (letter):\n return \"CREATE TABLE \" + \\\n edge_table_name(letter) + \\\n \"(id serial NOT NULL, \" + \\\n \"source integer NOT NULL,\" + \\\n \"target integer NOT NULL, \" + \\\n \"type character varying, \" + \\\n \"weight integer DEFAULT 0, \" + \\\n \"CONSTRAINT edge_id_\" + letter + \" PRIMARY KEY (id));\"\n\n#------------------------------------------------------------------------------\n\ndef create_edge_table(conn, letter):\n cur = conn.cursor()\n cur.execute(\"DROP TABLE IF EXISTS \" + edge_table_name(letter) + \";\")\n cur.execute(create_edge_table_str(letter))\n cur.execute(\"CREATE INDEX ON \" + edge_table_name(letter) + \" (source);\")\n cur.execute(\"CREATE INDEX ON \" + edge_table_name(letter) + \" (target);\")\n\n#------------------------------------------------------------------------------\n\n# Create an edge table for each letter of the alphabet and digit.\n# This is a total of 36 tables for the edges. While this may impede edge\n# retrieval, these tables reprsent the raw graph and it anticipateed\n# that the system will operate on other graphs built from this raw\n# snapshot of the wikipedia pages graph.\n\ndef create_edge_tables(conn):\n cur = conn.cursor()\n print (\"Creating Wikipedia Edge Tables...\")\n for letter in table_suffixes():\n create_edge_table(conn,letter)\n conn.commit()\n \n#------------------------------------------------------------------------------\n# Crete WikiDb Tables\n#------------------------------------------------------------------------------\n\ndef create_wiki_db_graph_tables():\n conn = wikidb_connect()\n # Vertices\n create_vertices_table(conn)\n create_vertices_table_indexes(conn)\n conn.commit()\n # Root Verticees\n create_root_vertices()\n # Edges\n create_edge_tables(conn)\n return True\n\n\n#******************************************************************************\n# Part 2: Retrieval operations\n#******************************************************************************\n\n# NB: The GET functions take an id, the FIND functions take a name or pattern.\n\n#------------------------------------------------------------------------------\n# Vertex Table Retrieval Operations\n#------------------------------------------------------------------------------\n\ndef get_wiki_vertex(vertex_id, conn=None):\n conn = ensure_connection(conn)\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM \" + vertex_table + \" WHERE id=\" + str(vertex_id) + \";\")\n rows = cur.fetchall()\n return rows[0] if rows != [] else None\n\n#------------------------------------------------------------------------------\n\ndef find_wiki_vertex(vertex_name, conn=None):\n if \"'\" in vertex_name:\n return None\n else:\n conn = ensure_connection(conn)\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM \" + vertex_table + \" \" + \\\n \"WHERE LOWER(name)=LOWER('\" + vertex_name + \"');\")\n rows = cur.fetchall()\n return rows[0] if rows != [] else None\n\n#------------------------------------------------------------------------------\n\ndef vertex_id_name(vertex_id, conn=None):\n conn = ensure_connection(conn)\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM \" + vertex_table + \" WHERE id=\" + str(vertex_id) + \";\")\n rows = cur.fetchall()\n return rows[0][1]\n\n#------------------------------------------------------------------------------\n\ndef vertex_row_name(vertex_row):\n return vertex_row[1]\n\n#------------------------------------------------------------------------------\n\n# Returns a list of vertex names\n\ndef get_wiki_vertices(vertex_pattern, conn=None):\n if \"'\" in vertex_pattern:\n return None\n else:\n conn = ensure_connection(conn)\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM \" + vertex_table + \" \" + \\\n \"WHERE LOWER(name) like LOWER('\" + vertex_pattern + \"');\")\n rows = cur.fetchall()\n return rows\n\n#------------------------------------------------------------------------------\n \ndef find_wiki_vertices(vertex_pattern, conn=None):\n rows = get_wiki_vertices(vertex_pattern, conn)\n return [row[1] for row in rows] if rows != [] else []\n\n\n#------------------------------------------------------------------------------\n# identify_root_vertices\n#------------------------------------------------------------------------------\n\ndef root_vertex__p (vertex_id):\n vertex_row = find_wiki_vertex(vertex_id)\n if vertex_name is not None:\n return root_vertex_name_p (vertex_row_name(vertex_row))\n else:\n return False\n \n#------------------------------------------------------------------------------\n\ndef root_vertex_name_p (vertex_name):\n return '_' not in vertex_name and '#' not in vertex_name\n\n#------------------------------------------------------------------------------\n\n# Identifies root vertices in verteices table for specified pattern\n\ndef identify_root_vertices (pattern, conn=None):\n conn = ensure_connection()\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM \" + vertex_table + \" as wv \" + \\\n \"WHERE wv.name NOT SIMILAR TO '%\\_%' \" + \\\n \"AND wv.name NOT SIMILAR TO '%\\#%'\" + \\\n \"AND LOWER(wv.name) like LOWER('\" + pattern + \"');\")\n rows = cur.fetchall()\n return rows\n\n#------------------------------------------------------------------------------\n# In Neighbors\n#------------------------------------------------------------------------------\n\n# Returns a list of neighbor topic names that are pointed to by .\n# These will necessarily be stored in the same table, so we only need to\n# query one table.\n\ndef find_wiki_out_neighbors(topic_name, conn=None):\n conn = ensure_connection(conn)\n topic_row = find_wiki_vertex(topic_name, conn)\n topic_id = topic_row[0] if topic_row is not None else None\n cur = conn.cursor()\n if topic_id == None:\n return []\n else:\n letter = source_name_letter(topic_name)\n edge_table = edge_table_name(letter)\n cur.execute(\"SELECT * FROM \" + edge_table + \" as we \" + \\\n \"JOIN \" + vertex_table + \" as wv on we.target = wv.id \" + \\\n \"WHERE source=\" + str(topic_id) + \";\")\n rows = cur.fetchall()\n return list(set([row[6] for row in rows]))\n\n#------------------------------------------------------------------------------\n# In Neighbors\n#------------------------------------------------------------------------------\n\n# Returns a list of neighbor topic names that point to . These will\n# necessarily be scattered across several tables, so we need to query each of\n# them.\n \ndef _find_wiki_in_neighbors(topic_name, tables=None, conn=None):\n conn = ensure_connection(conn)\n topic_row = find_wiki_vertex(topic_name, conn)\n topic_id = topic_row[0] if topic_row is not None else None\n if topic_id == None:\n return []\n else:\n if tables == None:\n tables = edge_tables()\n cur = conn.cursor()\n all_rows = []\n for edge_table in tables:\n cur.execute(\"SELECT * FROM \" + edge_table + \" as we \" + \\\n \"JOIN \" + vertex_table + \" as wv on we.source = wv.id \" + \\\n \"WHERE target=\" + str(topic_id) + \";\")\n rows = cur.fetchall()\n all_rows += rows\n return list(set([row[6] for row in all_rows]))\n\n#------------------------------------------------------------------------------\n# In Neigbors (PARALLEL Version)\n#------------------------------------------------------------------------------\n\ndef find_wiki_in_neighbors(topic_name, conn=None, threads=8):\n conn = ensure_connection(conn)\n topic_id = find_wiki_vertex(topic_name, conn)\n topic_id = topic_id[0] if topic_id is not None else topic_id\n if topic_id == None:\n return []\n\n lists = split_list(edge_tables(), threads)\n\n # Use a shared variable to collect results fromm each process\n manager = Manager()\n return_dict = manager.dict()\n\n # Run four jobs, one for each quadrant of the matrix.\n jobs = [] \n freeze_support()\n for index, piece in enumerate(lists):\n p = Process(target=neighbor_worker, args=(topic_name, piece, index, return_dict))\n jobs.append(p)\n p.start()\n\n # Gather the results\n for proc in jobs:\n proc.join()\n\n # Join all the neighbors\n neighbors = return_dict.values()\n if neighbors == []:\n return []\n else:\n return reduce(lambda a, b : a + b, neighbors)\n \n#------------------------------------------------------------------------------\n# Root Vertex Table Retrieval Operations\n#------------------------------------------------------------------------------\n\n\ndef get_root_vertex(vertex_id, conn=None):\n conn = ensure_connection(conn)\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM \" + root_vertices_table + \" WHERE id=\" + str(vertex_id) + \";\")\n rows = cur.fetchall()\n return rows[0] if rows != [] else None\n\n#------------------------------------------------------------------------------\n\ndef get_root_vertices(vertex_id, conn=None):\n conn = ensure_connection(conn)\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM \" + root_vertices_table + \";\")\n rows = cur.fetchall()\n return rows\n\n#------------------------------------------------------------------------------\n\ndef find_root_vertex(vertex_name, conn=None):\n if \"'\" in vertex_name:\n return None\n else:\n conn = ensure_connection(conn)\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM \" + root_vertices_table + \" \" + \\\n \"WHERE LOWER(name)=LOWER('\" + vertex_name + \"');\")\n rows = cur.fetchall()\n return rows[0] if rows != [] else None\n\n#------------------------------------------------------------------------------\n\ndef find_related_root_topics (vertex_name):\n related_topics = find_wiki_out_neighbors(vertex_name)\n return [topic for topic in related_topics if root_vertex_name_p(topic)]\n\n#------------------------------------------------------------------------------\n# Edge Table Retrieval Operations\n#------------------------------------------------------------------------------\n\n\ndef find_wiki_edge_by_id(edge_table, source_id, target_id, conn=None):\n conn = ensure_connection(conn)\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM \" + edge_table + \" \" + \\\n \"WHERE source=\" + str(source_id) + \" AND target=\" + str(target_id) + \";\")\n rows = cur.fetchall()\n if rows==[]:\n return None\n else:\n return rows\n\n#------------------------------------------------------------------------------\n\ndef find_wiki_edge(source_name, target_name, conn=None):\n conn = ensure_connection(conn)\n source_id = find_wiki_vertex(source_name, conn)\n target_id = find_wiki_vertex(target_name, conn)\n source_id = source_id[0] if source_id is not None else source_id\n target_id = target_id[0] if target_id is not None else target_id\n letter = source_name_letter(source_name)\n edge_table = edge_table_name(letter)\n if source_id==None or target_id==None:\n return None\n else:\n return find_wiki_edge_by_id(edge_table,source_id, target_id, conn)\n\n#------------------------------------------------------------------------------\n\n# Source can be a string or integer. Return an integer source id.\n\ndef ensure_source_id(source, conn=None):\n if type(source) == str:\n return find_wiki_vertex(source, conn)[0]\n else:\n return source\n\n#------------------------------------------------------------------------------\n\n# NB: can be an id or a name.\n\ndef find_wiki_edges(source_name, edge_type=DEFAULT_EDGE_TYPE, conn=None):\n conn = ensure_connection(conn)\n source_id = ensure_source_id(source_name, conn)\n #source_id = source_id[0] if source_id is not None else source_id\n letter = source_name_letter(source_name)\n edge_table = edge_table_name(letter)\n if source_id==None:\n return None\n else:\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM \" + edge_table + \" \" + \\\n \"WHERE source=\" + str(source_id) + \" AND type='\" + edge_type + \"';\")\n rows = cur.fetchall()\n return rows\n\n \n#******************************************************************************\n# Part 3: Status Operations\n#******************************************************************************\n\ndef count_wiki_vertices(conn=None):\n conn = ensure_connection(conn)\n cur = conn.cursor()\n cur.execute(\"SELECT count(*) FROM wiki_vertices\")\n rows = cur.fetchall()\n return rows[0][0]\n\n#------------------------------------------------------------------------------\n\ndef count_root_vertices(conn=None):\n conn = ensure_connection(conn)\n cur = conn.cursor()\n cur.execute(\"SELECT count(*) FROM wiki_root_vertices\")\n rows = cur.fetchall()\n return rows[0][0]\n\n#------------------------------------------------------------------------------\n\n# vertex can be a vertex_id or a vertex_name.\n\ndef count_vertex_out_neighbors (vertex, conn=None):\n return len(find_wiki_edges(vertex, conn=conn))\n\n#------------------------------------------------------------------------------\n\n# Retturn a dictionaru of table_namme and edge_count.\n\ndef count_wiki_edges_by_table(conn=None):\n conn = ensure_connection(conn)\n cur = conn.cursor()\n edge_counts = {}\n for letter in table_suffixes():\n edge_table = edge_table_name(letter)\n cur.execute(\"SELECT count(*) FROM \" + edge_table)\n rows = cur.fetchall()\n edge_counts.update({edge_table : rows[0][0]})\n return edge_counts\n \n#------------------------------------------------------------------------------\n\ndef count_wiki_edges(conn=None):\n conn = ensure_connection(conn)\n counts = count_wiki_edges_by_table (conn)\n return sum(counts.values())\n\n#******************************************************************************\n# Part 4: Maintenance Operations\n#******************************************************************************\n\n#------------------------------------------------------------------------------\n# Vertex Table INSERT operations\n#------------------------------------------------------------------------------\n\ndef add_wiki_vertex(vertex_name, conn=None, commit_p=False):\n if \"'\" in vertex_name:\n return []\n else:\n conn = ensure_connection(conn)\n cur = conn.cursor()\n if find_wiki_vertex(vertex_name, conn) == None:\n cur.execute(\"INSERT INTO \" + vertex_table + \" (name) VALUES ('\" + vertex_name + \"');\")\n if commit_p == True:\n conn.commit()\n\n#------------------------------------------------------------------------------\n\ndef add_wiki_vertices(vertices, conn=None):\n conn = ensure_connection(conn)\n for vertex in vertices:\n add_wiki_vertex(vertex, conn, True)\n conn.commit()\n\n#------------------------------------------------------------------------------\n\ndef update_vertex_weight(vertex_id, conn=None, commit=False):\n conn = ensure_connection(conn)\n weight = count_vertex_out_neighbors (vertex_name, conn=conn)\n cur = conn.cursor()\n try:\n cur.execute(\"UPDATE \" + vertex_table + \" SET weight = \" + str(weight) + \\\n \"WHERE id=\" + str(vertex_id) + \";\")\n if commit==True:\n conn.commit()\n return find_wiki_vertex(vertex_id, conn=conn)\n except Exception as err:\n print (\"Failed to update vertex weight for vertex: \" + str(vertex_id))\n return None\n\n#------------------------------------------------------------------------------\n\ndef update_all_vertex_weights ():\n count = 0\n for symbol in table_suffixes():\n rows = find_wiki_vertices(symbol + '%')\n print (\"Processinng \" + str(len(vertices)) + \" vertices starting with \" + symbol + \"...\")\n conn = ensure_connection()\n for row in rows:\n if count%10000==0:\n print (\"Processed \" + str(count) + \" vertices.\")\n vertex_id = row[0]\n update_vertex_weight (vertex_id, conn=conn, commit=False)\n count += 1\n conn.commit()\n\n#------------------------------------------------------------------------------\n# Root Vertex Table INSERT operations\n#------------------------------------------------------------------------------\n\n# A little of a hack too ensure the root vertices table has all it's weights.\n\ndef ensure_root_vertex_weight(row):\n id = row[0]\n weight = row[2]\n #outdegree = row[3]\n if weight == 0:\n weight = count_vertex_out_neighbors(row[1])\n #if outdegree == 0:\n # outdegree = weight\n return [id, row[1], weight, weight]\n\n\n#------------------------------------------------------------------------------\n\n# This identified the root verticees in the global vertex table that match\n# pattern and add them to the root vertex table\n\ndef generate_root_vertices_for_prefix (pattern):\n try:\n conn = ensure_connection()\n cur = conn.cursor()\n rows = identify_root_vertices(pattern, conn)\n print (\"Found \" + str(len(rows)) + \" vertices matching \" + pattern)\n count = 0\n for row in rows:\n vertex_id = row[0]\n count += 1\n if count%1000==0:\n print (\"Added \" + str(count) + \" new root vertices...\")\n row = ensure_root_vertex_weight(row)\n add_root_vertex(row, conn=conn)\n except Exception as err:\n print (err)\n conn.commit()\n conn.close()\n\n#------------------------------------------------------------------------------\n\n# This completely populates the root vertices table\n\ndef generate_root_vertices():\n vertex_prefixes = table_suffixes()\n for prefix in vertex_prefixes:\n print (\"\\nProcessing prefix \" + prefix)\n generate_root_vertices_for_prefix(prefix+\"%\")\n\n#------------------------------------------------------------------------------\n# Wiki DB Edge Table Maintenace\n#------------------------------------------------------------------------------\n\n\ndef add_wiki_edge(source_name, target_name, edge_type=DEFAULT_EDGE_TYPE, conn=None, commit_p=False):\n conn = ensure_connection(conn)\n letter = source_name_letter(source_name)\n edge_table = edge_table_name(letter)\n source_id = find_wiki_vertex(source_name, conn)\n target_id = find_wiki_vertex(target_name, conn)\n source_id = source_id[0] if source_id is not None else source_id\n target_id = target_id[0] if target_id is not None else target_id\n if source_id==None or target_id==None:\n return None\n else:\n if find_wiki_edge_by_id(edge_table, source_id, target_id, conn) == None:\n cur = conn.cursor()\n cur.execute(\"INSERT INTO \" + edge_table + \" (source, target, type) \" +\\\n \"VALUES (\" + str(source_id) + \", \" + str(target_id) + \", '\" + edge_type + \"');\")\n if commit_p == True:\n conn.commit()\n\n#------------------------------------------------------------------------------\n\ndef add_wiki_edges(source_name, target_names, edge_type='related', conn=None):\n conn = ensure_connection(conn)\n for target_name in target_names:\n add_wiki_edge(source_name, target_name, edge_type=edge_type, conn=conn)\n conn.commit()\n\n#------------------------------------------------------------------------------\n# BOGUS VERTICES\n#------------------------------------------------------------------------------\n\ndef find_bogus_vertices (conn=None):\n conn = ensure_connection()\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM \" + vertex_table + \\\n \" WHERE name like '%\\#%'\")\n rows = cur.fetchall()\n return rows\n\n#------------------------------------------------------------------------------\n\ndef delete_bogus_vertex(vertex_name, conn=None):\n conn = ensure_connection()\n cur = conn.cursor()\n\n # Gather the vertex and edges\n vertex_row = find_wiki_vertex(vertex_name, conn)\n vertex_id = vertex_row[0] if vertex_row is not None else None\n if vertex_id==None:\n return False\n\n in_neighbors = find_wiki_in_neighbors(vertex_name, conn=conn)\n out_neighbors = find_wiki_out_neighbors(vertex_name, conn=conn)\n\n if len(out_neighbors) < 5 and len(in_neighbors) < 5:\n # Delete the vertex first\n cur.execute(\"DELETE FROM \" + vertex_table + \" as wv \" + \\\n \" WHERE wv.id=\" + str(vertex_id) + \";\")\n conn.commit()\n\n # Delete outbound edges\n outbound_edge_table = edge_table_name(vertex_name[0])\n cur.execute(\"DELETE FROM \" + outbound_edge_table + \" as oe \" + \\\n \" WHERE source=\" + str(vertex_id) + \";\")\n conn.commit()\n\n # Delete inbound edges\n for source_name in in_neighbors:\n inbound_edge_table = edge_table_name(source_name[0])\n source_row = find_wiki_vertex(source_name, conn)\n source_id = source_row[0] if source_row is not None else None\n if source_id != None:\n cur.execute(\"DELETE FROM \" + inbound_edge_table + \" as ie \" + \\\n \" WHERE ie.source=\" + str(source_id) + \\\n \" AND ie. target=\" + str(vertex_id) + \";\")\n conn.commit()\n print (\"Deleted 1 vertex, \" + str(len(out_neighbors)) + \" outbound edges and \" + \\\n str(len(in_neighbors)) + \" inbound edges.\")\n # Wrap up\n conn.close()\n return True\n\n#------------------------------------------------------------------------------\n\ndef delete_bogus_vertices(conn=None):\n conn = ensure_connection()\n bogus_vertices = find_bogus_vertices(conn)\n count = 0\n print (\"Found \" + str(len(bogus_vertices)) + \" bogus vertices.\")\n for vertex_row in bogus_vertices:\n count += 1\n if count%100==0:\n print (\"Deleted \" + str(count) + \" bogus vertices...\")\n delete_bogus_vertex(vertex_row[1], conn=conn)\n return True\n\n#------------------------------------------------------------------------------\n# Out Neighbors\n#------------------------------------------------------------------------------\n\n\n \n#------------------------------------------------------------------------------\n# Root Vertices\n#------------------------------------------------------------------------------\n\n#*****************************************************************************\n# Part 5: Status Operations\n#*****************************************************************************\n\n#------------------------------------------------------------------------------\n# Miscellneous Output Functions.\n#------------------------------------------------------------------------------\n\nimport sys\nfrom urllib.parse import unquote\n\ndef print_raw(text, separator):\n text = text + separator\n textbytes = unquote(text).encode(\"utf-8\")\n sys.stdout.buffer.write(textbytes)\n\n#------------------------------------------------------------------------------\n \n# create_wiki_db_graph_tables()\n\n#------------------------------------------------------------------------------\n# End of File\n#-----------------------------------------------------------------------------\n","sub_path":"src/postgres.py","file_name":"postgres.py","file_ext":"py","file_size_in_byte":31019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"631294118","text":"import unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom data import main\n\n\nclass TestCase(unittest.TestCase):\n def setUp(self):\n self.driver = webdriver.Chrome('/home/user/python/Project2_tilimili/resources/chromedriver')\n self.driver.implicitly_wait(15)\n self.driver.maximize_window()\n\n\n def test_search_by_toy(self):\n driver = self.driver\n driver.get(main.baseurl)\n self.search_toy('search_block_form', 'Игрушка')\n driver.find_element_by_xpath(\".//li[7]/div[6]/form/div/input[2]\").click()\n driver.find_element_by_xpath(\".//div[9]/div[3]/button[1]\").click()\n driver.get_screenshot_as_file(\"./screenshots/Cart.png\")\n\n\n\n def search_toy(self, locator, search_str):\n elem = self.driver.find_element_by_name(locator)\n elem.send_keys(search_str)\n elem.send_keys(Keys.ENTER)\n\n\n def tearDown(self):\n self.driver.quit()\n\n\nif __name__== \"__main__\":\n unittest.main()\n","sub_path":"base_test.py","file_name":"base_test.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"431904585","text":"from maintain_frontend.services.validation.fieldset_validator import FieldsetValidator\nfrom maintain_frontend.services.validation.field_validator import FieldValidator\nfrom maintain_frontend.services.validation.validation_error_builder import ValidationErrorBuilder\n\n\nclass ChargeDateValidator(object):\n\n @staticmethod\n def validate(day, month, year):\n \"\"\"Specifies which validation methods should be called for each input field.\n\n\n parameters:\n - day: The day the charge was added. This is an optional field on the form.\n - month: The month the charge was added. This is an optional field on the form.\n - year: The year the charge was added. This is an optional field on the form.\n\n returns:\n dict: An instance of ValidationErrorBuilder with a ValidationError dict and a heading summary message.\n \"\"\"\n\n validation_error_builder = ValidationErrorBuilder()\n\n if day or month or year:\n FieldsetValidator([day, month, year], 'date', 'Date', validation_error_builder) \\\n .is_valid_date()\n\n FieldValidator(year, \"date\", 'Date', validation_error_builder,\n summary_message=\"Date is invalid\", inline_message=\"Year must be in the format YYYY\")\\\n .is_year_format()\n\n FieldsetValidator([day, month, year], 'date', 'Date', validation_error_builder,\n summary_message=\"Date is invalid\", inline_message=\"Date cannot be a future date\") \\\n .is_past_date()\n\n return validation_error_builder.get()\n","sub_path":"maintain_frontend/add_land_charge/validation/charge_date_validator.py","file_name":"charge_date_validator.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"619619943","text":"# import os\r\n# path=\"D:\\\\create_directories\"\r\n\r\n# for i in range(1,10):\r\n# os.chdir(path)\r\n# newfolder='Tutorial-'+str(i)\r\n# os.makedirs(newfolder)\r\n\r\nimport shutil\r\nimport os\r\n#shutil.copy\r\n#shutil.move\r\npath=\"D:\\\\create_directories\"\r\nsrc = r\"D:\\capcha_images\\capcha\\capcha\"\r\n# dest = r\"C:\\Users\\Moondra\\Desktop\\FOLDER B\"\r\nfiles = os.listdir(src)\r\nprint(len(files))\r\nfor i in range(0,len(files),10):\r\n os.chdir(path)\r\n newfolder='Tutorial-'+str(i)\r\n os.makedirs(newfolder)\r\n for j in range(i,i+10):\r\n dest = r'D:\\\\create_directories\\\\'+newfolder\r\n full_file_name = os.path.join(src,files[j])\r\n shutil.copy(full_file_name, dest)\r\n # print(files[i])\r\n","sub_path":"Tutorial.py","file_name":"Tutorial.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"90850466","text":"# -*- coding: utf-8 -*-\n\nCPP = 'c++'\nPY = 'py'\nGO = 'go'\nVALID_EXT = [CPP, PY, GO]\nLANGUAGE_IDS = {CPP: 3003,\n PY: 3023,\n GO: 3013}\n\ndef get_file_extention(filename):\n # 拡張子の取得\n if '.' not in filename:\n return None\n return filename.split('.')[-1]","sub_path":"valid_file_ext.py","file_name":"valid_file_ext.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"383532659","text":"import time\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#import matplotlib.mlab as mlab\nfrom sklearn.metrics import confusion_matrix\n\n#Plotting confusion Matrix\ndef direction_confusion_matrix(matchIdx,gt_mat,cd_mat,dirIdx):\n\t# L R\n\t# L\n\t# R\n\tentryConfusion = [[0,0],[0,0]] \n\tfor match in matchIdx:\n\t\tgt_entry = gt_mat[match[0], dirIdx]\n\t\tcd_entry = cd_mat[match[1], dirIdx]\n\t\t\n\t\tif cd_entry == 'left' and gt_entry == 'left':\n\t\t\tentryConfusion[0][0] = entryConfusion[0][0] + 1\n\t\telif gt_entry == 'right' and cd_entry == 'left':\n\t\t\tentryConfusion[0][1] = entryConfusion[0][1] + 1\n\t\telif gt_entry == 'right' and cd_entry == 'left':\n\t\t\tentryConfusion[1][0] = entryConfusion[1][0] + 1\n\t\telif cd_entry == 'right' and gt_entry == 'right':\n\t\t\tentryConfusion[1][1] = entryConfusion[1][1] + 1\n\n\tentryConfusion = np.array(entryConfusion)\n\n\tprint (entryConfusion)\n\t\n\txlabels = ['Groud Truth Left', 'Ground Truth Right']\n\tylabels = ['Captured Left', 'Captured Right']\n\n\tfont = {'family': 'serif',\n\t\t'color': 'black',\n\t\t'weight': 'bold',\n\t\t'size': 16,\n\t }\n\n\tx_val = entryConfusion[0][0]\n\ty_val = entryConfusion[1][1]\n\n\tfig = plt.figure()\n\tax = fig.add_subplot(111)\n\tcax = ax.matshow(entryConfusion, interpolation='nearest')\n\tfig.colorbar(cax)\n\n\tax.set_xticklabels(['']+xlabels)\n\tax.set_yticklabels(['']+ylabels)\n\t\n\tax.text(0, 0, str(entryConfusion[0][0]), fontdict=font, va='center', ha='center')\n\tax.text(0, 1, str(entryConfusion[0][1]), fontdict=font, va='center', ha='center')\n\tax.text(1, 0, str(entryConfusion[1][0]), fontdict=font, va='center', ha='center')\n\tax.text(1, 1, str(entryConfusion[1][1]), fontdict=font, va='center', ha='center')\n\n\t#plt.show()\n\tplt.savefig(str(time.strftime('%H:%M:%S')))\n\tplt.close()\n\n\n#Producing Histogram of Heights\ndef height_histogram(matched_heights):\n\t# the histogram of the data\n\t#n, bins, patches = plt.hist(matched_heights, 40, normed=1, facecolor='green', alpha=0.75)\n\tplt.hist(matched_heights)\n\tplt.xlabel('Height Measurments')\n\tplt.ylabel('Count')\n\tplt.title(r'mathrm{Histogram\\ of\\ IQ:}\\ \\mu=100,\\ \\sigma=15$')\n\tplt.axis([10, 80, 0, 10])\n\tplt.grid(True)\n\n\tplt.show()\n\t#plt.savefig('histogram')\n\tplt.close()\n\t#This is being added to collect height values over time\n\theightFile = open('allHeights.txt', 'a')\n\n\tfor s in matched_heights:\n\t heightFile.write(str(s) + ' ')\n\n\theightFile.write('\\n')\n\theightFile.close()\n\n#Opening files and processing data\ngroundTruth = open(\"groundTruthResults.csv\")\ncollectedData = open(\"collectedDataResults.csv\")\n\n#Array of all the lines in Ground Truth\ngt_lines = groundTruth.readlines()\n#Array of all the lines in Collected Data\ncd_lines = collectedData.readlines()\n\n#Ground Truth Matrix\ngt_mat = []\nfor gt in gt_lines:\n\tgt_split = gt.strip().split(\",\")\n\tgt_mat.append(gt_split)\ngt_mat = np.array(gt_mat)\n\n#Collected Data Matrix\ncd_mat = []\nfor cd in cd_lines:\n\tcd_split = cd.strip().split(\",\")\n\tcd_mat.append(cd_split)\ncd_mat = np.array(cd_mat)\n\nmatchIdx = [];\n\nfor i in range(0,len(gt_lines)):\n\t#get info on GT\n\t#print (\"GT\")\n\t#print (gt_mat[i,:])\n\tgt_st = gt_mat[i,0]\n\tgt_et = gt_mat[i,1]\n\t#print (\"Ground Truth: \" + gt_st +\" \"+ gt_et)\n\t#find a matching event in cd\n\tfor j in range(0,len(cd_lines)):\n\t\t#get infor on CD\n\t\t#print (\"CD\")\n\t\t#print (cd_mat[j,:])\n\t\tcd_st = cd_mat[j,0]\n\t\tcd_et = cd_mat[j,1]\n\t\t#print (\"Collected Data: \" + cd_st + \",\" + cd_et)\n\t\t#check if within GT timestamps\n\t\tif cd_st > gt_st and cd_et < gt_et:\n\t\t\t#print (\"Matched Events\")\n\t\t\t#print gt\n\t\t\t#print cd\n\t\t\tmatchIdx.append([i,j])\n\t\t\t#print i\n\t\t\t#print j\n\nmatchIdx = np.array(matchIdx)\n\n#Cycling throught the ground truth matrix and check for false negatives/positives\nfor index in range(len(gt_mat)):\n\t#If the index is not found in the ground truth array then it's a false negative\n\tif index not in matchIdx[:,0]:\n\t\tprint ('False Negative Found On: ', index)\n\t#If the index is not found in the collected data array then it's a false positive\n\tif index not in matchIdx[:,1]:\n\t\tprint ('False Positive Found On: ', index)\n\n\n#histogram of all matching heights\nmatched_heights = cd_mat[matchIdx[:,1],2].astype(float)\nheight_histogram(matched_heights)\n\n#Confusion Matrix of Entry and Exit\n#direction_confusion_matrix(matchIdx,gt_mat,cd_mat,3) # For all entry points\n#direction_confusion_matrix(matchIdx,gt_mat,cd_mat,4) # For all exit points\n","sub_path":"newDirectionAlgorithem/produceComparisonGraphs.py","file_name":"produceComparisonGraphs.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"518824841","text":"import sys\nimport os\nfrom time import sleep\n\nfrom igraph import Graph\nimport matplotlib.pyplot as plt\n\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\nfrom utils.graph import read_graph\n\nvalues, degrees = [], []\ncentralities = []\nfor i in range(10):\n g = read_graph('../data/random/100_250_{}'.format(i))\n #degrees.append([len(neighbors) for neighbors in g.tmp])\n graph = Graph()\n graph.add_vertices(g.n)\n graph.add_edges([(n, neighbor) for n in range(g.n) for neighbor in g.tmp[n] if n < neighbor])\n centralities.append(graph.evcent())\n degrees.append([len(neighbors) for neighbors in g.tmp])\n\nwith open('nodewit3.txt','r') as file:\n for line in file.readlines():\n if line=='' or line=='\\n': values.append([])\n else:\n values[-1].extend([float(token) for token in line.split(' ') if token])\n\nprint(len(centralities), len(degrees))\nfor i, (type_, data) in enumerate(zip(('centrality', 'degree'), (centralities, degrees))):\n print(i)\n plt.figure(i)\n for figure in range(10):\n print(len(data))\n #plt.ion()\n plt.plot(data[figure], values[figure], 'o')\n plt.xlabel('{} per node'.format(type_.title()))\n plt.ylabel('Model-predicted values, $P$, per node')\n plt.title('Graph of model-predicated values,\\n$P$, against {}, per node\\nfor 100-nodes-250-edges graphs'.format(type_))\n plt.tight_layout()\n plt.savefig('../images/{}_correlation.png'.format(type_))\n plt.savefig('../images/{}_correlation.svg'.format(type_))\n #plt.clf()\nplt.show()\n\n \n#values = list(zip(*values))\n#degrees = list(zip(*degrees))\n#with open('values.csv','w') as file:\n# for hundredth in values:\n# file.write(','.join(map(str,hundredth))+'\\n')\n#with open('degrees.csv','w') as file:\n# for hundredth in degrees:\n# file.write(','.join(map(str,hundredth))+'\\n')\n","sub_path":"nodewise/nodewit3.py","file_name":"nodewit3.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"304396605","text":"import numpy as np\nimport pygrib\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\nimport codecs, json\nplt.figure()\n# grib='multi_1.at_10m.t00z.f000.grib2'\n# grib='multi_1.wc_4m.wind.201812.grb2'\ngrib='gfs.t12z.goessimpgrb2.1p00.f000'\ngrbs=pygrib.open(grib)\nfor grb in grbs:\n\tprint(grb)\ngrb = grbs[1]\n\n# grb = grbs.select(shortName='wind')\n# grb = grbs.select(name='Significant height of wind waves')[0]\ndata=grb.values\nlat,lon = grb.latlons()\nprint(lat.shape, lon.shape)\n\nm=Basemap(projection='mill',lat_ts=10,llcrnrlon=lon.min(), \\\n urcrnrlon=lon.max(),llcrnrlat=lat.min(),urcrnrlat=lat.max(), \\\n resolution='c')\n\nx, y = m(lon,lat)\n\ncs = m.pcolormesh(x,y,data,shading='flat',cmap=plt.cm.jet)\n\nm.drawcoastlines()\nm.fillcontinents()\nm.drawmapboundary()\nm.drawparallels(np.arange(-90.,120.,30.),labels=[1,0,0,0])\nm.drawmeridians(np.arange(-180.,180.,60.),labels=[0,0,0,1])\n\nplt.colorbar(cs,orientation='vertical')\nplt.title('Example 2: NWW3 Significant Wave Height from GRiB')\nplt.show()\n\n# with open('output.json', 'w') as json_file:\n# json.dump(data, json_file)\n\n\na = data.reshape(181,360) # a 2 by 5 array\n# b = a.to_dict() # nested lists with same data, indices\n# b = dict(enumerate(a))\n# b = a.tolist()\nb = a.flatten()\n# c = dict(enumerate(b))\n# file_path = \"output.json\" ## your path variable\n# json.dump(c, codecs.open(file_path, 'w', encoding='utf-8'), separators=(',', ':'), sort_keys=True, indent=4) ### this saves the array in .json format\n# json_dump = json.dumps(b)\n# print(json_dump)\n\n# class NumpyEncoder(json.JSONEncoder):\n# def default(self, obj):\n# if isinstance(obj, np.ndarray):\n# return obj.tolist()\n# return json.JSONEncoder.default(self, obj)\n\n\n# file_path = \"output.json\" ## your path variable\n# json.dump({'value': b}, codecs.open(file_path, 'w', encoding='utf-8'), separators=(',', ':'), sort_keys=True, indent=4, cls=NumpyEncoder) ### this saves the array in .json format\n","sub_path":"plotGrib.py","file_name":"plotGrib.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"399952328","text":"\"\"\" This script use values optimized in the NRP to produce a pkl configuration \nfile to use in a open-loop CPG\"\"\"\n\nimport pickle\nfrom tigrillo_ctrl import control\n\n__author__ = \"Gabriel Urbain\" \n__copyright__ = \"Copyright 2017, Human Brain Projet, SP10\"\n\n__license__ = \"MIT\" \n__version__ = \"2.0\" \n__maintainer__ = \"Gabriel Urbain\"\n__email__ = \"gabriel.urbain@ugent.be\" \n__status__ = \"Research\" \n__date__ = \"April 10th, 2018\"\n\n\ncpg_conf = control.cpg_config\n\nruntime = 20\ncpg_conf[\"Controller\"][\"runtime\"] = 20\n\n\namp_front = 1999.3792022624\namp_back = 1837.9431695635\no_fr = 4.0352272465 # p0\no_bl = 0.3475388624\no_br = 3.2733492247\nd0 = 0.8494214357\nd1 = 0.5890023762\noffset_front = 30.4883062863\noffset_back = -0.3721909567\nomega = 6.28\n\no_f = 0\n\nparams = \"[{'mu': \" + str(amp_front) + \",\"\nparams += \"'o': \" + str(offset_front) + \",\"\nparams += \"'duty_factor': \" + str(d0) + \",\"\nparams += \"'phase_offset': \" + str(0) + \",\"\nparams += \"'omega': \" + str(omega) + \",\"\nparams += \"'coupling': [0, 5, 5, 5]}, \"\n\nparams += \"{'mu': \" + str(amp_front) + \",\"\nparams += \"'o': \" + str(offset_front) + \",\"\nparams += \"'duty_factor': \" + str(d0) + \",\"\nparams += \"'phase_offset': \" + str(o_fr) + \",\"\nparams += \"'omega': \" + str(omega) + \",\"\nparams += \"'coupling': [5, 0, 5, 5]}, \"\n\nparams += \"{'mu': \" + str(amp_back) + \",\"\nparams += \"'o': \" + str(offset_back) + \",\"\nparams += \"'duty_factor': \" + str(d1) + \",\"\nparams += \"'phase_offset': \" + str(o_bl) + \",\"\nparams += \"'omega': \" + str(omega) + \",\"\nparams += \"'coupling': [5, 5, 0, 5]}, \"\n\nparams += \"{'mu': \" + str(amp_back) + \",\"\nparams += \"'o': \" + str(offset_back) + \",\"\nparams += \"'duty_factor': \" + str(d1) + \",\"\nparams += \"'phase_offset': \" + str(o_br) + \",\"\nparams += \"'omega': \" + str(omega) + \",\"\nparams += \"'coupling': [5, 5, 5, 0]}]\"\n\ncpg_conf[\"Controller\"][\"params\"] = params\n\nwith open(\"walking.pkl\", \"wb\") as f:\n\tpickle.dump(cpg_conf, f)","sub_path":"src/tigrillo_ctrl/nrp/walking_gait.py","file_name":"walking_gait.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"104690936","text":"# -*- encoding: utf-8 -*-\n\"\"\"\ntests delegation primaily from keri.core.eventing\n\n\"\"\"\nimport os\n\nimport pytest\n\nfrom keri import kering\nfrom keri import help\nfrom keri.db import dbing\nfrom keri.base import keeping\nfrom keri.core import coring\nfrom keri.core import eventing\n\nlogger = help.ogler.getLogger()\n\n\ndef test_delegation():\n \"\"\"\n Test creation and validation of delegated identifer prefixes and events\n\n \"\"\"\n # bob is the delegator del is bob's delegate\n\n bobSalt = coring.Salter(raw=b'0123456789abcdef').qb64\n delSalt = coring.Salter(raw=b'abcdef0123456789').qb64\n\n with dbing.openDB(name=\"bob\") as bobDB, \\\n keeping.openKS(name=\"bob\") as bobKS, \\\n dbing.openDB(name=\"del\") as delDB, \\\n keeping.openKS(name=\"del\") as delKS:\n\n # Init key pair managers\n bobMgr = keeping.Manager(keeper=bobKS, salt=bobSalt)\n delMgr = keeping.Manager(keeper=delKS, salt=delSalt)\n\n # Init Keverys\n bobKvy = eventing.Kevery(db=bobDB)\n delKvy = eventing.Kevery(db=delDB)\n\n # Setup Bob by creating inception event\n verfers, digers, cst, nst = bobMgr.incept(stem='bob', temp=True) # algo default salty and rooted\n bobSrdr = eventing.incept(keys=[verfer.qb64 for verfer in verfers],\n nxt=coring.Nexter(digs=[diger.qb64 for diger in digers]).qb64,\n code=coring.MtrDex.Blake3_256)\n\n bobPre = bobSrdr.ked[\"i\"]\n assert bobPre == 'EiBlVttjqvySMbA4ShN19rSrz3D0ioNW-Uj92Ri7XnFE'\n\n bobMgr.move(old=verfers[0].qb64, new=bobPre) # move key pair label to prefix\n\n sigers = bobMgr.sign(ser=bobSrdr.raw, verfers=verfers)\n\n msg = bytearray(bobSrdr.raw)\n counter = coring.Counter(code=coring.CtrDex.ControllerIdxSigs, count=len(sigers))\n msg.extend(counter.qb64b)\n for siger in sigers:\n msg.extend(siger.qb64b)\n\n assert msg == bytearray(b'{\"v\":\"KERI10JSON0000e6_\",\"i\":\"EiBlVttjqvySMbA4ShN19rSrz3D0ioNW-U'\n b'j92Ri7XnFE\",\"s\":\"0\",\"t\":\"icp\",\"kt\":\"1\",\"k\":[\"DqI2cOZ06RwGNwCovYU'\n b'WExmdKU983IasmUKMmZflvWdQ\"],\"n\":\"E7FuL3Z_KBgt_QAwuZi1lUFNC69wvyH'\n b'SxnMFUsKjZHss\",\"wt\":\"0\",\"w\":[],\"c\":[]}-AABAAQPFdtnncXLz6dE6A-tXG'\n b'YYK0BHu3I3Pj-G8DxlbzC3yx5MV8yucZILqAA5toZNODnHVHZtPIMkDknqldL4utBQ')\n\n # apply msg to bob's Kevery\n bobKvy.process(ims=bytearray(msg)) # process local copy of msg\n bobK = bobKvy.kevers[bobPre]\n assert bobK.prefixer.qb64 == bobPre\n assert bobK.serder.diger.qb64 == bobSrdr.dig\n assert bobK.serder.diger.qb64 == 'EvP2kWxEjTMI3auc6x64EpU-nMQZHiBeKeuavcGdRB24'\n\n # apply msg to del's Kevery\n delKvy.process(ims=bytearray(msg)) # process remote copy of msg\n assert bobPre in delKvy.kevers\n\n # Setup Del's inception event assuming that Bob's next event will be an ixn delegating event\n verfers, digers, cst, nst = delMgr.incept(stem='del', temp=True) # algo default salty and rooted\n\n seal = eventing.SealLocation(i=bobK.prefixer.qb64,\n s=\"{:x}\".format(bobK.sn+1),\n t=coring.Ilks.ixn,\n p=bobK.serder.diger.qb64)\n\n assert seal._asdict() == dict(i='EiBlVttjqvySMbA4ShN19rSrz3D0ioNW-Uj92Ri7XnFE',\n s='1',\n t='ixn',\n p='EvP2kWxEjTMI3auc6x64EpU-nMQZHiBeKeuavcGdRB24')\n\n delSrdr = eventing.delcept(keys=[verfer.qb64 for verfer in verfers],\n seal=seal,\n nxt=coring.Nexter(digs=[diger.qb64 for diger in digers]).qb64)\n\n delPre = delSrdr.ked[\"i\"]\n assert delPre == 'ErLe2qWp4VCmDp7v_R01tC-ha13ZEZY0VGcgYtPRhqPs'\n\n delMgr.move(old=verfers[0].qb64, new=delPre) # move key pair label to prefix\n assert delSrdr.dig == 'ESDuaqpoI8-HLD8-eLijUMZpXqYFkNArJFDvt3ABYr9I'\n\n # Now create delegating event\n seal = eventing.SealEvent(i=delPre,\n s=delSrdr.ked[\"s\"],\n d=delSrdr.dig)\n bobSrdr = eventing.interact(pre=bobK.prefixer.qb64,\n dig=bobK.serder.diger.qb64,\n sn=bobK.sn+1,\n data=[seal._asdict()])\n\n sigers = bobMgr.sign(ser=bobSrdr.raw, verfers=bobK.verfers)\n\n msg = bytearray(bobSrdr.raw)\n counter = coring.Counter(code=coring.CtrDex.ControllerIdxSigs,\n count=len(sigers))\n msg.extend(counter.qb64b)\n for siger in sigers:\n msg.extend(siger.qb64b)\n\n assert msg == bytearray(b'{\"v\":\"KERI10JSON000107_\",\"i\":\"EiBlVttjqvySMbA4ShN19rSrz3D0ioNW-U'\n b'j92Ri7XnFE\",\"s\":\"1\",\"t\":\"ixn\",\"p\":\"EvP2kWxEjTMI3auc6x64EpU-nMQZH'\n b'iBeKeuavcGdRB24\",\"a\":[{\"i\":\"ErLe2qWp4VCmDp7v_R01tC-ha13ZEZY0VGcg'\n b'YtPRhqPs\",\"s\":\"0\",\"d\":\"ESDuaqpoI8-HLD8-eLijUMZpXqYFkNArJFDvt3ABY'\n b'r9I\"}]}-AABAAZ4V2cSIXYEPg5BtkJSHVBj-A0dGI6rH2XGaVt1kewqGeJjpy4uz'\n b'ObPWnoBpaEojFa5AnrUJEgMytORoWMqEhCw')\n\n # apply msg to bob's Kevery\n bobKvy.process(ims=bytearray(msg)) # process local copy of msg\n assert bobK.serder.diger.qb64 == bobSrdr.dig # key state updated so event was validated\n assert bobK.serder.diger.qb64 == 'EtzXPztLsGC5DGyooSdHdBGIOHjhblBWtZ_AOhGS-hDE'\n\n # apply msg to del's Kevery\n delKvy.process(ims=bytearray(msg)) # process remote copy of msg\n assert delKvy.kevers[bobPre].serder.diger.qb64 == bobSrdr.dig\n\n # now create msg with Del's delegated inception event\n sigers = delMgr.sign(ser=delSrdr.raw, verfers=verfers)\n\n msg = bytearray(delSrdr.raw)\n counter = coring.Counter(code=coring.CtrDex.ControllerIdxSigs,\n count=len(sigers))\n msg.extend(counter.qb64b)\n for siger in sigers:\n msg.extend(siger.qb64b)\n\n assert msg == bytearray(b'{\"v\":\"KERI10JSON000165_\",\"i\":\"ErLe2qWp4VCmDp7v_R01tC-ha13ZEZY0VG'\n b'cgYtPRhqPs\",\"s\":\"0\",\"t\":\"dip\",\"kt\":\"1\",\"k\":[\"DuK1x8ydpucu3480Jpd'\n b'1XBfjnCwb3dZ3x5b1CJmuUphA\"],\"n\":\"EWWkjZkZDXF74O2bOQ4H5hu4nXDlKg2'\n b'm4CBEBkUxibiU\",\"wt\":\"0\",\"w\":[],\"c\":[],\"da\":{\"i\":\"EiBlVttjqvySMbA'\n b'4ShN19rSrz3D0ioNW-Uj92Ri7XnFE\",\"s\":\"1\",\"t\":\"ixn\",\"p\":\"EvP2kWxEjT'\n b'MI3auc6x64EpU-nMQZHiBeKeuavcGdRB24\"}}-AABAADv-a3LeXEStuY1LHknepu'\n b'J7mBcTByugqQ1TNRMrIa0rctfjKsh-hkkkpwDj6M_OLLaFtLqBpmdNTUgBPANLzCQ')\n\n # apply Del's delegated inception event message to bob's Kevery\n bobKvy.process(ims=bytearray(msg)) # process local copy of msg\n assert delPre in bobKvy.kevers # successfully validated\n delK = bobKvy.kevers[delPre]\n assert delK.delegated\n assert delK.serder.diger.qb64 == delSrdr.dig # key state updated so event was validated\n assert delK.serder.diger.qb64 == 'ESDuaqpoI8-HLD8-eLijUMZpXqYFkNArJFDvt3ABYr9I'\n\n # apply msg to del's Kevery\n delKvy.process(ims=bytearray(msg)) # process remote copy of msg\n assert delKvy.kevers[delPre].serder.diger.qb64 == delSrdr.dig\n\n # Setup Del rotation event assuming that Bob's next event will be an ixn delegating event\n verfers, digers, cst, nst = delMgr.rotate(pre=delPre, temp=True)\n\n seal = eventing.SealLocation(i=bobK.prefixer.qb64,\n s=\"{:x}\".format(bobK.sn+1),\n t=coring.Ilks.ixn,\n p=bobK.serder.diger.qb64)\n\n assert seal._asdict() == {'i': 'EiBlVttjqvySMbA4ShN19rSrz3D0ioNW-Uj92Ri7XnFE',\n 's': '2',\n 't': 'ixn',\n 'p': 'EtzXPztLsGC5DGyooSdHdBGIOHjhblBWtZ_AOhGS-hDE'}\n\n\n delSrdr = eventing.deltate(pre=delK.prefixer.qb64,\n keys=[verfer.qb64 for verfer in verfers],\n dig=delK.serder.diger.qb64,\n seal=seal,\n sn=delK.sn+1,\n nxt=coring.Nexter(digs=[diger.qb64 for diger in digers]).qb64)\n\n assert delSrdr.dig == 'E-dZsWLp2IIPVDbGdGS-yvuw4HeV_w_w76FHsofmuiq0'\n\n # Now create delegating rotation event\n seal = eventing.SealEvent(i=delK.prefixer.qb64,\n s=delSrdr.ked[\"s\"],\n d=delSrdr.dig)\n bobSrdr = eventing.interact(pre=bobK.prefixer.qb64,\n dig=bobK.serder.diger.qb64,\n sn=bobK.sn+1,\n data=[seal._asdict()])\n\n sigers = bobMgr.sign(ser=bobSrdr.raw, verfers=bobK.verfers)\n\n msg = bytearray(bobSrdr.raw)\n counter = coring.Counter(code=coring.CtrDex.ControllerIdxSigs,\n count=len(sigers))\n msg.extend(counter.qb64b)\n for siger in sigers:\n msg.extend(siger.qb64b)\n\n assert msg == bytearray(b'{\"v\":\"KERI10JSON000107_\",\"i\":\"EiBlVttjqvySMbA4ShN19rSrz3D0ioNW-U'\n b'j92Ri7XnFE\",\"s\":\"2\",\"t\":\"ixn\",\"p\":\"EtzXPztLsGC5DGyooSdHdBGIOHjhb'\n b'lBWtZ_AOhGS-hDE\",\"a\":[{\"i\":\"ErLe2qWp4VCmDp7v_R01tC-ha13ZEZY0VGcg'\n b'YtPRhqPs\",\"s\":\"1\",\"d\":\"E-dZsWLp2IIPVDbGdGS-yvuw4HeV_w_w76FHsofmu'\n b'iq0\"}]}-AABAAmloDxOwz6ztvRR_4N8Hn-6ZJk6_0nQhfNE7bzX6NpJRfYDwmUw3'\n b'rXod0g46iFOLqEWw12oaFVzVH85NYAh67Ag')\n\n # apply msg to bob's Kevery\n bobKvy.process(ims=bytearray(msg)) # process local copy of msg\n assert bobK.serder.diger.qb64 == bobSrdr.dig # key state updated so event was validated\n\n # apply msg to del's Kevery\n delKvy.process(ims=bytearray(msg)) # process remote copy of msg\n assert delKvy.kevers[bobPre].serder.diger.qb64 == bobSrdr.dig\n\n # now create msg from Del's delegated rotation event\n sigers = delMgr.sign(ser=delSrdr.raw, verfers=verfers)\n\n msg = bytearray(delSrdr.raw)\n counter = coring.Counter(code=coring.CtrDex.ControllerIdxSigs,\n count=len(sigers))\n msg.extend(counter.qb64b)\n for siger in sigers:\n msg.extend(siger.qb64b)\n\n assert msg == bytearray(b'{\"v\":\"KERI10JSON0001a1_\",\"i\":\"ErLe2qWp4VCmDp7v_R01tC-ha13ZEZY0VG'\n b'cgYtPRhqPs\",\"s\":\"1\",\"t\":\"drt\",\"p\":\"ESDuaqpoI8-HLD8-eLijUMZpXqYFk'\n b'NArJFDvt3ABYr9I\",\"kt\":\"1\",\"k\":[\"DTf6QZWoet154o9wvzeMuNhLQRr8JaAU'\n b'eiC6wjB_4_08\"],\"n\":\"E8kyiXDfkE7idwWnAZQjHbUZMz-kd_yIMH0miptIFFPo'\n b'\",\"wt\":\"0\",\"wr\":[],\"wa\":[],\"a\":[],\"da\":{\"i\":\"EiBlVttjqvySMbA4ShN'\n b'19rSrz3D0ioNW-Uj92Ri7XnFE\",\"s\":\"2\",\"t\":\"ixn\",\"p\":\"EtzXPztLsGC5DG'\n b'yooSdHdBGIOHjhblBWtZ_AOhGS-hDE\"}}-AABAAXcUl6KlY4VOx8ZumFMc0uR4iH'\n b'BGmPQo4IAx0nIiiEDB_u2ewkvgIDIp1ELDGxfc2VVUkl38Z7PqwydBdpIK0DA')\n\n\n # apply Del's delegated inception event message to bob's Kevery\n bobKvy.process(ims=bytearray(msg)) # process local copy of msg\n assert delK.delegated\n assert delK.serder.diger.qb64 == delSrdr.dig # key state updated so event was validated\n assert delK.serder.diger.qb64 == 'E-dZsWLp2IIPVDbGdGS-yvuw4HeV_w_w76FHsofmuiq0'\n\n # apply msg to del's Kevery\n delKvy.process(ims=bytearray(msg)) # process remote copy of msg\n assert delKvy.kevers[delPre].serder.diger.qb64 == delSrdr.dig\n\n assert not os.path.exists(delKS.path)\n assert not os.path.exists(delDB.path)\n assert not os.path.exists(bobKS.path)\n assert not os.path.exists(bobDB.path)\n\n \"\"\"End Test\"\"\"\n\n\nif __name__ == \"__main__\":\n test_delegation()\n","sub_path":"tests/core/test_delegating.py","file_name":"test_delegating.py","file_ext":"py","file_size_in_byte":12490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"225105259","text":"from keras.datasets import mnist\nfrom keras.utils import to_categorical\nfrom keras import layers\nfrom keras import models\n\n(features, labels), (test_features, test_labels) = mnist.load_data()\n\nnetwork = models.Sequential()\nnetwork.add(layers.Dense(512, activation='relu', input_shape=(28 * 28,)))\nnetwork.add(layers.Dense(10, activation='softmax'))\n\nnetwork.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\nfeatures = features.reshape((60000, 28 * 28))\nfeatures = features.astype('float32') / 255\ntest_features = test_features.reshape((10000, 28 * 28))\ntest_features = test_features.astype('float32') / 255\n\nlabels = to_categorical(labels)\ntest_labels = to_categorical(test_labels)\n\nnetwork.fit(features, labels, epochs=5, batch_size=128)\ntest_loss, test_acc = network.evaluate(test_features, test_labels)\nprint('test_acc:', test_acc)\n","sub_path":"python/handwriting.py","file_name":"handwriting.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"397877992","text":"# -*- coding: utf-8 -*-\n\n# 抖音无水印视频解析\n\nimport re\nimport PySimpleGUI as sg\nimport requests\n\nsg.change_look_and_feel(\"Default1\")\nheaders = {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36\"\n}\n\nsj_headers = {\n \"user-agent\": \"Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1\"\n}\n\n\nclass ParseVideo(object):\n \"\"\"\n 根据分享链接,解析抖音无水印视频\n \"\"\"\n\n def __init__(self, url):\n self.url = url\n self.xhr_url = \"https://www.iesdouyin.com/web/api/v2/aweme/iteminfo/?item_ids={}\"\n self.wsy_url = \"https://aweme-hl.snssdk.com/aweme/v1/play/?video_id={}\"\n self.headers = headers\n self.sj_headers = sj_headers\n\n def get_url_id(self):\n \"\"\"\n 获取跳转后的id\n \"\"\"\n response = requests.get(url=self.url, headers=self.headers, allow_redirects=False)\n\n location = response.headers.get(\"location\")\n\n url_id = re.search(r'video/(.*?)/', location).group(1)\n print(url_id)\n\n return url_id\n\n def get_video_id(self, v_id):\n \"\"\"\n 获取video_id\n \"\"\"\n url = self.xhr_url.format(v_id)\n\n response = requests.get(url=url, headers=self.headers)\n data = response.json()\n\n v_url = data['item_list'][0]['video']['play_addr']['url_list'][0]\n\n video_id = re.search(r'video_id=(.*?)&', v_url).group(1)\n print(video_id)\n\n return video_id\n \n def get_real_url(self, v_id):\n \"\"\"获取无水印url\"\"\"\n url = self.wsy_url.format(v_id)\n\n response = requests.get(url=url, headers=self.sj_headers, allow_redirects=False)\n\n real_url = response.headers.get('location')\n print(real_url)\n return real_url\n\n def run(self):\n url_id = self.get_url_id()\n video_id = self.get_video_id(url_id)\n # self.get_real_url(video_id)\n return self.get_real_url(video_id)\n\nclass Kuaishou(object):\n def __init__(self,url):\n self.url = url\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3329.0 Mobile Safari/537.36'}\n self.req = requests.get(url=url, headers=self.headers).url\n self.headers1 = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Connection': 'keep-alive',\n # 'Cookie': 'did=web_282e70945a114e2389e104b8bdc7388a; didv=1596413681000; clientid=3; client_key=65890b29; Hm_lvt_86a27b7db2c5c0ae37fee4a8a35033ee=1596413682; Hm_lpvt_86a27b7db2c5c0ae37fee4a8a35033ee=1596413697',\n 'Cookie': 'did=web_282e70945a114e2389e104b8bdc7388a; didv=1596413681000; clientid=3; client_key=65890b29; Hm_lvt_86a27b7db2c5c0ae37fee4a8a35033ee=1596413682; Hm_lpvt_86a27b7db2c5c0ae37fee4a8a35033ee=1596413697',\n 'Host': 'v.kuaishou.com',\n 'Sec-Fetch-Dest': 'document',\n 'Sec-Fetch-Mode': 'navigate',\n 'Sec-Fetch-Site': 'none',\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Mobile Safari/537.36', }\n self.est1 = requests.get(url=self.req, headers=self.headers1).text\n self.url = re.findall('srcNoMark\":\"(.*?)\"', self.est1)[0]\n\n def rt(self):\n return self.url\n\nclass Gui(object):\n def __init__(self):\n self.tab1_layout = [[sg.Text('抖音分享链接')],\n [sg.Multiline(key='url')],\n [sg.OK('解析'), sg.Cancel('关闭')],\n [sg.Text('无水印地址')],\n [sg.Multiline('解析结果',key='down_url')]\n\n ]\n self.tab2_layout = [[sg.Text('快手分享链接')],\n [sg.Multiline(key='ks_url')],\n [sg.OK('解析'), sg.Cancel('关闭')],\n [sg.Text('无水印地址')],\n [sg.Multiline('解析结果',key='ks_down_url')]\n\n ]\n self.layout = [\n [sg.TabGroup([[sg.Tab('抖音', self.tab1_layout), sg.Tab('快手', self.tab2_layout)]])],\n [sg.Button('下载该视频', key='but_dow', visible=False)]\n ]\n self.window = sg.Window('抖音快手无水印视频 V2.2 By 李彦军', self.layout,size=(380,280))\n\n def down_video(self, url, path):\n try:\n res = requests.get(url, headers=headers).content\n with open(path, 'wb') as f:\n f.write(res)\n save_out = '保存成功'\n except Exception as e:\n save_out = '保存失败'\n return save_out\n def run(self):\n while True:\n event, values = self.window.read()\n\n if event in (None, '关闭','关闭1'): # if user closes window or clicks cancel\n break\n elif event == '解析':\n url = values[r'url']\n url = re.findall('https://.*/', url)\n\n if url not in (None, []):\n test = ParseVideo(url=url[0])\n self.val = test.run()\n self.window['down_url'].update(self.val)\n self.window['but_dow'].update(visible=True)\n\n else:\n sg.Popup('分享链接错误', no_titlebar='True', background_color='#fff')\n elif event == '解析0':\n url = values[r'ks_url']\n url = re.findall('https://.*', url)\n if url not in (None, []):\n test = Kuaishou(url=url[0])\n self.val = test.rt()\n self.window['ks_down_url'].update(self.val)\n self.window['but_dow'].update(visible=True)\n else:\n sg.Popup('分享链接错误', no_titlebar='True', background_color='#fff')\n elif event == 'but_dow':\n text = sg.popup_get_file('message', save_as=True, no_window=True,file_types=(('视频文件', '*.mp4'),))\n if text != '':\n out_save = self.down_video(self.val,text)\n sg.Popup(out_save, no_titlebar='True', background_color='#fff')\n\n\nif __name__ == '__main__':\n douyin_window = Gui()\n douyin_window.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"253689270","text":"import requests\nfrom lxml import etree\nimport db_tools\n\nurl = 'http://music.taihe.com/top/dayhot'\n\n\nres = requests.get(url).content.decode()\nele = etree.HTML(res)\nurls = ele.xpath('//span[@class=\"song-title \"]/a[1]/@href')\nj = 1\ndb = db_tools.DBTools.creater()\nfor i in urls:\n print(j)\n url = 'http://music.taihe.com' + i\n res1 = requests.get(url).content.decode()\n ele1 = etree.HTML(res1)\n song_name = ele1.xpath('//span[@class=\"name\"][1]/text()')[0]\n singer = ele1.xpath('//span[@class=\"author_list\"][1]/@title')[0]\n album = ele1.xpath('//p[@class=\"album desc\"][1]/a/text()')[0]\n time = ele1.xpath('//p[@class=\"publish desc\"]/text()')[0]\n company = ele1.xpath('//p[@class=\"company desc\"]/text()')[0]\n lyric = ele1.xpath('//div[@id=\"lyricCont\"]/@data-lrclink')[0]\n j += 1\n print(song_name, singer, album, time, company, lyric)\n db.setSQL('insert into qianqianyinyue(song_name, singer, album, time, company, lyric) values(%s,%s,%s,%s,%s,%s)')\n db.excute((song_name, singer, album, time, company, lyric))\ndb.close()\n","sub_path":"Replace/晨讲/千千音乐/千千音乐榜单.py","file_name":"千千音乐榜单.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"62351530","text":"from django.urls import path\nfrom django.conf.urls import url, include\nfrom . import views\n\nurlpatterns = [\n path('', views.Home, name='home'),\n path('listado_filtro//', views.ListadoPersonasFiltroTipoPersona, name='listado_filtro_cargo'),\n path('ingreso_personas/', views.IngresoPersonas, name='ingreso_personas'),\n path('listado_personas/', views.ListadoPersonas, name='listado_personas'),\n path('eliminar_persona//', views.Eliminar_Personas, name='eliminar_persona'),\n path('modificar_persona//', views.ModificacionPersonas, name='modificar_persona'),\n path('lista_filtro_cargo//', views.ListadoPersonasFiltroTipoPersona, name='lista_filtro_cargo'),\n path('lista_eventos/', views.ListadoEventos, name='lista_eventos'),\n path('lista_eventos_filtro_tipo/te//', views.ListadoEventosFiltroTipoEvento, name='lista_eventos_filtro_tipo'),\n path('lista_eventos_filtro_comunidad/c//', views.ListadoEventosFiltroComunidad, name='lista_eventos_filtro_comunidad'),\n path('administracion_comunidades/', views.AdministracionComunidades, name='administracion_comunidades'),\n path('listado_agentes_pastorales/', views.ListadoAgentesPastorales, name='listado_agentes_pastorales'),\n path('listado_comunidades', views.ListaComunidades, name='listado_comunidades')\n\n]","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"368200114","text":"\"\"\"Compute Rossby wave source from the long-term mean flow.\n\nThis example uses the standard interface.\n\n\"\"\"\nimport numpy as np\nimport matplotlib as mpl\nmpl.rcParams['mathtext.default'] = 'regular'\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap, addcyclic\nfrom netCDF4 import Dataset\n\nfrom windspharm.standard import VectorWind\nfrom windspharm.tools import prep_data, recover_data, order_latdim\nfrom windspharm.examples import example_data_path\n\n\n# Read zonal and meridional wind components from file using the cdms2 module\n# from CDAT. The components are defined on pressure levels and are in separate\n# files.\nncu = Dataset(example_data_path('uwnd_mean.nc'), 'r')\nuwnd = ncu.variables['uwnd'][:]\nlons = ncu.variables['longitude'][:]\nlats = ncu.variables['latitude'][:]\nncu.close()\nncv = Dataset(example_data_path('vwnd_mean.nc'), 'r')\nvwnd = ncv.variables['vwnd'][:]\nncv.close()\n\n# The standard interface requires that latitude and longitude be the leading\n# dimensions of the input wind components, and that wind components must be\n# either 2D or 3D arrays. The data read in is 3D and has latitude and\n# longitude as the last dimensions. The bundled tools can make the process of\n# re-shaping the data a lot easier to manage.\nuwnd, uwnd_info = prep_data(uwnd, 'tyx')\nvwnd, vwnd_info = prep_data(vwnd, 'tyx')\n\n# It is also required that the latitude dimension is north-to-south. Again the\n# bundled tools make this easy.\nlats, uwnd, vwnd = order_latdim(lats, uwnd, vwnd)\n\n# Create a VectorWind instance to handle the computations.\nw = VectorWind(uwnd, vwnd)\n\n# Compute components of rossby wave source: absolute vorticity, divergence,\n# irrotational (divergent) wind components, gradients of absolute vorticity.\neta = w.absolutevorticity()\ndiv = w.divergence()\nuchi, vchi = w.irrotationalcomponent()\netax, etay = w.gradient(eta)\n\n# Combine the components to form the Rossby wave source term. Re-shape the\n# Rossby wave source array to the 4D shape of the wind components as they were\n# read off files.\nS = -eta * div - (uchi * etax + vchi * etay)\nS = recover_data(S, uwnd_info)\n\n# Pick out the field for December and add a cyclic point (the cyclic point is\n# for plotting purposes).\nS_dec, lons_c = addcyclic(S[11], lons)\n\n# Plot Rossby wave source.\nm = Basemap(projection='cyl', resolution='c', llcrnrlon=0, llcrnrlat=-90,\n urcrnrlon=360.01, urcrnrlat=90)\nx, y = m(*np.meshgrid(lons_c, lats))\nclevs = [-30, -25, -20, -15, -10, -5, 0, 5, 10, 15, 20, 25, 30]\nm.contourf(x, y, S_dec*1e11, clevs, cmap=plt.cm.RdBu_r,\n extend='both')\nm.drawcoastlines()\nm.drawparallels((-90, -60, -30, 0, 30, 60, 90), labels=[1,0,0,0])\nm.drawmeridians((0, 60, 120, 180, 240, 300, 360), labels=[0,0,0,1])\nplt.colorbar(orientation='horizontal')\nplt.title('Rossby Wave Source ($10^{-11}$s$^{-1}$)', fontsize=16)\nplt.show()\n","sub_path":"examples/standard/rws_example.py","file_name":"rws_example.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"409621720","text":"import boto3\n\n\ndef create_loadbalancer(lb_name, vpc, protocol):\n \"\"\"\n A fucntion to create load balancer\n \"\"\"\n client = boto3.client('elbv2', region_name='ap-south-1')\n conn = boto3.client('ec2', region_name='ap-south-1')\n # get subnets to create load balancer (Min=3)\n response = conn.describe_subnets(\n Filters=[\n {\n 'Name': 'vpc-id',\n 'Values': [\n vpc\n ]\n }\n ]\n )['Subnets']\n subnets = []\n\n for res in response:\n subnets.append(res['SubnetId'])\n\n # create the load balancer\n\n response = client.create_load_balancer(\n Name=lb_name,\n Subnets=subnets,\n Type='application'\n )['LoadBalancers']\n\n # get load balancer arn to create the target group\n lb_arn = response[0]['LoadBalancerArn']\n # create the targer group\n tg_arn = create_target_group('tg1', protocol, vpc)\n\n # create the listener that points to the target group\n response = client.create_listener(\n LoadBalancerArn=lb_arn,\n Protocol=protocol,\n Port=80,\n DefaultActions=[\n {\n 'Type': 'forward',\n 'TargetGroupArn': tg_arn\n }\n ],\n )\n\n\ndef create_target_group(tg_name, protcol, vpc):\n \"\"\"\n A function to create the target group\n \"\"\"\n tg = boto3.client('elbv2', region_name='ap-south-1')\n # create the target groups\n response = tg.create_target_group(\n Name=tg_name,\n Protocol=protcol,\n VpcId=vpc,\n Port=80,\n TargetType='instance'\n )['TargetGroups']\n\n # get the target\n tg_arn = response[0]['TargetGroupArn']\n\n # input the running ec2 instance you want to attach to the target group\n response = tg.register_targets(\n TargetGroupArn=tg_arn,\n Targets=[\n {\n 'Id': 'i-074c56ce8b59e515f',\n },\n {\n 'Id': 'i-08eb4e570fa4cdf6b',\n },\n ],\n )\n return tg_arn\n\n\ncreate_loadbalancer(\"Loadbalancer1\", 'vpc-7d587715', \"HTTP\")\n","sub_path":"Aws create service/load balancer_application.py","file_name":"load balancer_application.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"276925220","text":"from __future__ import print_function, division, absolute_import\n\nfrom timeit import default_timer as timer\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nfrom matplotlib.pylab import imshow, jet, savefig, ion\nimport numpy as np\nfrom numba import cuda\n\n@cuda.jit(device=True)\ndef get_color(row, col, max_iter):\n color = 0\n z = 0.0j\n for color in range(max_iter):\n z = z*z + complex(row, col)\n if (z.real*z.real + z.imag*z.imag) >= 4:\n return color\n return max_iter\n\n@cuda.jit\ndef generate_mandelbrot(image):\n px = ((3.0) / WIDTH)\n py = ((2.0) / HEIGHT) \n\n startX = cuda.blockDim.x * cuda.blockIdx.x + cuda.threadIdx.x\n startY = cuda.blockDim.y * cuda.blockIdx.y + cuda.threadIdx.y\n gridX = cuda.gridDim.x * cuda.blockDim.x\n gridY = cuda.gridDim.y * cuda.blockDim.y\n\n for row in range(startX, WIDTH, gridX):\n i = -1.75 + row * px\n for col in range(startY, HEIGHT, gridY):\n j = -1.0 + col * py\n color = get_color(i, j, MAX_ITER)\n image[col, row] = color\n #return image\n\nSIZE = 1024\nWIDTH = SIZE\nHEIGHT = SIZE\nMAX_ITER = 20\n\ndef main(): \n\n blockdim = (32, 8)\n griddim = (32,16)\n\n image = np.zeros((WIDTH, HEIGHT), dtype=np.uint8)\n #s = timer()\n d_image = cuda.to_device(image)\n generate_mandelbrot[griddim, blockdim](d_image) \n d_image.to_host()\n #generate_mandelbrot(image)\n # e = timer() - s\n imshow(image)\n savefig('mandelbrot.png')\n\nif __name__== \"__main__\":\n main()\n","sub_path":"numba-gpu-version/mandelbrotV3.py","file_name":"mandelbrotV3.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"654241290","text":"\"\"\"\nDataset Utils\n-------------\n\nShared functionality for downloading, naming, and extracting the contents\nof datasets, as well as filtering for particular subsets.\n\"\"\"\nimport logging\n\nfrom .. import constants, utils\nfrom ..io import utils as io_utils\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef download_file(\n url, *, filename=None, dirpath=constants.DEFAULT_DATA_DIR, force=False\n):\n utils.deprecated(\n \"This function has been moved to `textacy.io.utils.download_file()` \"\n \"and is aliased here only for backwards compatibility. \"\n \"This alias will be removed in v0.10.0.\",\n action=\"once\",\n )\n return io_utils.download_file(url, filename=filename, dirpath=dirpath, force=force)\n\n\ndef get_filename_from_url(url):\n utils.deprecated(\n \"This function has been moved to `textacy.io.utils.get_filename_from_url()` \"\n \"and is aliased here only for backwards compatibility. \"\n \"This alias will be removed in v0.10.0.\",\n action=\"once\",\n )\n return io_utils.get_filename_from_url(url)\n\n\ndef unpack_archive(filepath, *, extract_dir=None):\n utils.deprecated(\n \"This function has been moved to `textacy.io.utils.unpack_archive()` \"\n \"and is aliased here only for backwards compatibility. \"\n \"This alias will be removed in v0.10.0.\",\n action=\"once\",\n )\n return io_utils.unpack_archive(filepath, extract_dir=extract_dir)\n\n\ndef validate_set_member_filter(filter_vals, vals_type, valid_vals=None):\n utils.deprecated(\n \"This function has been moved to `textacy.utils.validate_set_members()` \"\n \"and is aliased here only for backwards compatibility. \"\n \"This alias will be removed in v0.10.0.\",\n action=\"once\",\n )\n return utils.validate_set_members(filter_vals, vals_type, valid_vals=valid_vals)\n\n\ndef validate_and_clip_range_filter(filter_range, full_range, val_type=None):\n utils.deprecated(\n \"This function has been moved to `textacy.utils.validate_and_clip_range()` \"\n \"and is aliased here only for backwards compatibility. \"\n \"This alias will be removed in v0.10.0.\",\n action=\"once\",\n )\n return utils.validate_and_clip_range(filter_range, full_range, val_type=val_type)\n","sub_path":"src/textacy/datasets/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"202760430","text":"class BinaryTree:\n\n def __init__(self, size):\n self.customList = size * [None]\n self.lastUsedIndex = 0\n self.maxSize = size\n \n def insertNode(self, value):\n if self.lastUsedIndex + 1 == self.maxSize:\n return \"The Binary Tree is full\"\n self.customList[self.lastUsedIndex+1] = value\n self.lastUsedIndex += 1\n return \"The value has been added successfully\"\n\n def searchNode(self, nodeValue):\n for i in range(len(self.customList)):\n if self.customList[i] == nodeValue:\n return \"Success\"\n return \"Not found\"\n \n def preOrderTraversal(self, index):\n if index > self.lastUsedIndex:\n return \n print(self.customList[index])\n self.preOrderTraversal(index * 2) \n self.preOrderTraversal(index * 2 + 1)\n \n def inOrderTraversal(self, index):\n if index > self.lastUsedIndex:\n return\n self.inOrderTraversal(index * 2)\n print(self.customList[index])\n self.inOrderTraversal(index * 2 + 1)\n\n\n def postOrderTraversal(self, index):\n if index > self.lastUsedIndex:\n return\n self.postOrderTraversal(index * 2)\n self.postOrderTraversal(index * 2 + 1)\n print(self.customList[index])\n \n def levelOverTraversal(self, index):\n for i in range(index, self.lastUsedIndex + 1):\n print(self.customList[i])\n \n def deleteNode(self, value):\n if self.lastUsedIndex == 0:\n return \"No node to delete\"\n for i in range(1, self.lastUsedIndex + 1):\n if self.customList[i] == value:\n self.customList[i] = self.customList[self.lastUsedIndex]\n self.customList[self.lastUsedIndex] = None\n self.lastUsedIndex -= 1\n return \"Node is deleted successfully\"\n\n def deleteBinaryTree(self):\n self.customList = None\n return \"Binary tree is successfully deleted\"\n\n\nnewBT = BinaryTree(8)\nnewBT.insertNode(\"Drinks\")\nnewBT.insertNode(\"Hot\")\nnewBT.insertNode(\"Cold\")\nnewBT.insertNode(\"Tea\")\nnewBT.insertNode(\"Coffee\")\n\nnewBT.deleteNode('Tea')\n\nnewBT.deleteBinaryTree()\n\nnewBT.levelOverTraversal(1)\n# newBT.preOrderTraversal(1)\n# newBT.inOrderTraversal(1)","sub_path":"DataStructuresAndAlgo/Tree/PythonList/binaryTree.py","file_name":"binaryTree.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"5786740","text":"import json\n\nfrom boto3 import client as boto3_client\nfrom boto3.dynamodb.conditions import Attr\nimport requests\n\nfrom common import get_room\nfrom cfg import API_URL\n\n\ndef lambda_handler(event, context):\n user_id = event.get(\"queryStringParameters\", {}).get('userId')\n params = {}\n if user_id:\n params['userId'] = user_id\n\n resp = requests.get(f\"{API_URL}/api/v1/rooms\", params=params)\n if resp.ok:\n\n rooms = resp.json()\n\n for room in rooms:\n realtime_room_data = get_room(room['id'])\n if realtime_room_data:\n room['userCount'] = len(realtime_room_data['users'])\n else:\n room['userCount'] = 0\n\n return {\n 'statusCode': 200,\n 'body': json.dumps(rooms),\n 'headers': {\n 'Access-Control-Allow-Origin': '*',\n 'Access-Control-Allow-Headers': 'token'\n },\n }\n\n\nif __name__ == \"__main__\":\n print(lambda_handler(None, None))\n","sub_path":"src/chat/rest_api/fixed_room.py","file_name":"fixed_room.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"535083121","text":"import datetime\nimport time\nimport json\nimport Tkinter as tk \nimport ttk\n\nimport data_bus\nimport wire\n\nclass MotorCTRLStatusModule(ttk.Frame):\n\tdef __init__(self, parent, data_bus):\n\t\tself.parent = parent\n\t\tself.data_bus = data_bus\n\n\t\tself.MotorCTRLStatusFrame = ttk.Labelframe(self.parent, text='Motor CTRL Status', width=200)\n\n\t\tself.limitLabel = ttk.Label(self.MotorCTRLStatusFrame, text='N/A', width=20)\n\t\tself.limitLabel.grid(column=1, row=1)\n\n\t\tself.errorLabel = ttk.Label(self.MotorCTRLStatusFrame, text='N/A', width=20)\n\t\tself.errorLabel.grid(column=1, row=2)\n\n\t\tself.motLabel = ttk.Label(self.MotorCTRLStatusFrame, text='N/A', width=20)\n\t\tself.motLabel.grid(column=1, row=3)\n\n\t\tself.txRxErrLabel = ttk.Label(self.MotorCTRLStatusFrame, text='N/A', width=20)\n\t\tself.txRxErrLabel.grid(column=1, row=4)\n\n\t\tself.limit2Label = ttk.Label(self.MotorCTRLStatusFrame, text='N/A')\n\t\tself.limit2Label.grid(column=2, row=1)\n\n\t\tself.error2Label = ttk.Label(self.MotorCTRLStatusFrame, text='N/A')\n\t\tself.error2Label.grid(column=2, row=2)\n\n\t\tself.mot2Label = ttk.Label(self.MotorCTRLStatusFrame, text='N/A')\n\t\tself.mot2Label.grid(column=2, row=3)\n\n\t\tself.txRxErr2Label = ttk.Label(self.MotorCTRLStatusFrame, text='N/A')\n\t\tself.txRxErr2Label.grid(column=2, row=4)\n\n\t\tself.harnesses = {}\n\n\t\tself.data_bus.record_callback.append(self.handleNewRecord)\n\n\tdef handleNewRecord(self, record):\n\t\tfields = record.field.split('\\0')\n\t\tname = fields[0]\n\t\tif name == 'motflag':\n\t\t\tmeta = json.loads(fields[1])\n\t\t\tself.harnesses[name] = wire.Harness(meta['harness'])\n\t\t\trecord.value_callback.append(self.handleValue)\n\t\t\trecord.Subscribe()\n\t\telif name == 'motflag2':\n\t\t\tmeta = json.loads(fields[1])\n\t\t\tself.harnesses[name] = wire.Harness(meta['harness'])\n\t\t\trecord.value_callback.append(self.handleValue2)\n\t\t\trecord.Subscribe()\n\n\tdef handleValue(self, record):\n\t\tself.harnesses['motflag'].buf = buffer(record.value)\n\t\tlim_d = self.harnesses['motflag']['lim'].value\n\t\tlimText = 'lim: (' + str(lim_d) + ') '\n\t\tif ((lim_d & 0b00000001) != 0):\n\t\t\tlimText += 'Temp'\n\t\tif ((lim_d & 0b00000010) != 0):\n\t\t\tlimText += 'Bus Volt Low'\n\t\tif ((lim_d & 0b00000100) != 0):\n\t\t\tlimText += 'Bus Volt High'\n\t\tif ((lim_d & 0b00001000) != 0):\n\t\t\tlimText += 'Bus Current'\n\t\tif ((lim_d & 0b00010000) != 0):\n\t\t\tlimText += 'Velocity'\n\t\tif ((lim_d & 0b00100000) != 0):\n\t\t\tlimText += 'Motor Curr'\n\t\tif ((lim_d & 0b01000000) != 0):\n\t\t\tlimText += 'Out Volt PWM'\n\t\tself.limitLabel['text'] = limText\n\n\t\terror = self.harnesses['motflag']['err'].value \n\t\tif error == 0:\n\t\t\tself.errorLabel['text'] = 'err: OK (' + str(error) + ')'\n\t\t\tself.errorLabel['foreground'] = 'black'\n\t\telse:\n\t\t\terrorText = 'err: (' + str(error) + ') '\n\t\t\tif ((error & 0b00000001) != 0):\n\t\t\t\terrorText += 'Dsat, '\n\t\t\tif ((error & 0b00000010) != 0):\n\t\t\t\terrorText += '15Vrail, '\n\t\t\tif ((error & 0b00000100) != 0):\n\t\t\t\terrorText += 'CfgReadErr, '\n\t\t\tif ((error & 0b00001000) != 0):\n\t\t\t\terrorText += 'WatchdogRst, '\n\t\t\tif ((error & 0b00010000) != 0):\n\t\t\t\terrorText += 'Hall, '\n\t\t\tif ((error & 0b00100000) != 0):\n\t\t\t\terrorText += 'DC Overvolt, '\n\t\t\tif ((error & 0b01000000) != 0):\n\t\t\t\terrorText += 'SW OverCurr, '\n\t\t\tif ((error & 0b10000000) != 0):\n\t\t\t\terrorText += 'HW OverCurr'\n\t\t\tself.errorLabel['text'] = errorText\n\t\t\tself.errorLabel['foreground'] = 'red'\n\n\t\tself.motLabel['text'] = 'mot: ' + str(self.harnesses['motflag']['mot'].value)\n\n\t\tself.txRxErrLabel['text'] = 'txerr: ' + str(self.harnesses['motflag']['txerr'].value) + ' rxerr: ' + str(self.harnesses['motflag']['rxerr'].value)\n\n\tdef handleValue2(self, record):\n\t\tself.harnesses['motflag2'].buf = buffer(record.value)\n\t\tlim_d = self.harnesses['motflag2']['lim'].value\n\t\tlimText = 'lim: (' + str(lim_d) + ') '\n\t\tif ((lim_d & 0b00000001) != 0):\n\t\t\tlimText += 'Temp'\n\t\tif ((lim_d & 0b00000010) != 0):\n\t\t\tlimText += 'Bus Volt Low'\n\t\tif ((lim_d & 0b00000100) != 0):\n\t\t\tlimText += 'Bus Volt High'\n\t\tif ((lim_d & 0b00001000) != 0):\n\t\t\tlimText += 'Bus Current'\n\t\tif ((lim_d & 0b00010000) != 0):\n\t\t\tlimText += 'Velocity'\n\t\tif ((lim_d & 0b00100000) != 0):\n\t\t\tlimText += 'Motor Curr'\n\t\tif ((lim_d & 0b01000000) != 0):\n\t\t\tlimText += 'Out Volt PWM'\n\t\tself.limit2Label['text'] = limText\n\n\t\terror = self.harnesses['motflag2']['err'].value \n\t\tif error == 0:\n\t\t\tself.error2Label['text'] = 'err: OK (' + str(error) + ')'\n\t\t\tself.error2Label['foreground'] = 'black'\n\t\telse:\n\t\t\terrorText = 'err: (' + str(error) + ') '\n\t\t\tif ((error & 0b00000001) != 0):\n\t\t\t\terrorText += 'Dsat, '\n\t\t\tif ((error & 0b00000010) != 0):\n\t\t\t\terrorText += '15Vrail, '\n\t\t\tif ((error & 0b00000100) != 0):\n\t\t\t\terrorText += 'CfgReadErr, '\n\t\t\tif ((error & 0b00001000) != 0):\n\t\t\t\terrorText += 'WatchdogRst, '\n\t\t\tif ((error & 0b00010000) != 0):\n\t\t\t\terrorText += 'Hall, '\n\t\t\tif ((error & 0b00100000) != 0):\n\t\t\t\terrorText += 'DC Overvolt, '\n\t\t\tif ((error & 0b01000000) != 0):\n\t\t\t\terrorText += 'SW OverCurr, '\n\t\t\tif ((error & 0b10000000) != 0):\n\t\t\t\terrorText += 'HW OverCurr'\n\t\t\tself.error2Label['text'] = errorText\n\t\t\tself.error2Label['foreground'] = 'red'\n\n\t\tself.mot2Label['text'] = 'mot: ' + str(self.harnesses['motflag2']['mot'].value)\n\n\t\tself.txRxErr2Label['text'] = 'txerr: ' + str(self.harnesses['motflag2']['txerr'].value) + ' rxerr: ' + str(self.harnesses['motflag2']['rxerr'].value)\n","sub_path":"Telemetry/RF Telems/onboard/modules/MotorCTRLStatusModule.py","file_name":"MotorCTRLStatusModule.py","file_ext":"py","file_size_in_byte":5215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"180673609","text":"import App\n\ndef StartingOrbit(pShip, pPlanet):\n\t# Send an event saying that the ship is now orbitting the\n\t# planet it's around.\n\tpEvent = App.TGEvent_Create()\n\tpEvent.SetEventType(App.ET_AI_ORBITTING)\n\tpEvent.SetDestination(pPlanet)\n\tpEvent.SetSource(pShip)\n\tApp.g_kEventManager.AddEvent(pEvent)\n\ndef CreateAI(pShip, pPlanet):\n\t#########################################\n\t# Creating PlainAI StartingOrbitScript at (241, 52)\n\tpStartingOrbitScript = App.PlainAI_Create(pShip, \"StartingOrbitScript\")\n\tpStartingOrbitScript.SetScriptModule(\"RunScript\")\n\tpStartingOrbitScript.SetInterruptable(1)\n\tpScript = pStartingOrbitScript.GetScriptInstance()\n\tpScript.SetScriptModule(__name__)\n\tpScript.SetFunction(\"StartingOrbit\")\n\tpScript.SetArguments(pShip, pPlanet)\n\t# Done creating PlainAI StartingOrbitScript\n\t#########################################\n\t#########################################\n\t# Creating PlainAI CirclePlanet at (353, 55)\n\tpCirclePlanet = App.PlainAI_Create(pShip, \"CirclePlanet\")\n\tpCirclePlanet.SetScriptModule(\"CircleObject\")\n\tpCirclePlanet.SetInterruptable(1)\n\tpScript = pCirclePlanet.GetScriptInstance()\n\tpScript.SetFollowObjectName(pPlanet.GetName())\n\tpScript.SetNearFacingVector(App.TGPoint3_GetModelLeft())\n\tpScript.SetRoughDistances(pPlanet.GetRadius() + 500, pPlanet.GetRadius() + 540)\n\t# Done creating PlainAI CirclePlanet\n\t#########################################\n\t#########################################\n\t# Creating SequenceAI Sequence at (201, 111)\n\tpSequence = App.SequenceAI_Create(pShip, \"Sequence\")\n\tpSequence.SetInterruptable(1)\n\tpSequence.SetLoopCount(1)\n\tpSequence.SetResetIfInterrupted(1)\n\tpSequence.SetDoubleCheckAllDone(0)\n\tpSequence.SetSkipDormant(0)\n\t# SeqBlock is at (301, 127)\n\tpSequence.AddAI(pStartingOrbitScript)\n\tpSequence.AddAI(pCirclePlanet)\n\t# Done creating SequenceAI Sequence\n\t#########################################\n\t#########################################\n\t# Creating ConditionalAI CloseEnough at (210, 167)\n\t## Conditions:\n\t#### Condition InRange\n\tpInRange = App.ConditionScript_Create(\"Conditions.ConditionInRange\", \"ConditionInRange\", pPlanet.GetRadius() * 3, pShip.GetName(), pPlanet.GetName())\n\t## Evaluation function:\n\tdef EvalFunc(bInRange):\n\t\tACTIVE = App.ArtificialIntelligence.US_ACTIVE\n\t\tDORMANT = App.ArtificialIntelligence.US_DORMANT\n\t\tDONE = App.ArtificialIntelligence.US_DONE\n\t\tif bInRange:\n\t\t\treturn ACTIVE\n\t\treturn DORMANT\n\t## The ConditionalAI:\n\tpCloseEnough = App.ConditionalAI_Create(pShip, \"CloseEnough\")\n\tpCloseEnough.SetInterruptable(1)\n\tpCloseEnough.SetContainedAI(pSequence)\n\tpCloseEnough.AddCondition(pInRange)\n\tpCloseEnough.SetEvaluationFunction(EvalFunc)\n\t# Done creating ConditionalAI CloseEnough\n\t#########################################\n\t#########################################\n\t# Creating PlainAI FlyToPlanet at (328, 183)\n\tpFlyToPlanet = App.PlainAI_Create(pShip, \"FlyToPlanet\")\n\tpFlyToPlanet.SetScriptModule(\"Intercept\")\n\tpFlyToPlanet.SetInterruptable(1)\n\tpScript = pFlyToPlanet.GetScriptInstance()\n\tpScript.SetTargetObjectName(pPlanet.GetName())\n\tpScript.SetInterceptDistance(0.0)\n\tpScript.SetAddObjectRadius(1)\n\t# Done creating PlainAI FlyToPlanet\n\t#########################################\n\t#########################################\n\t# Creating PriorityListAI OrbitPriorityList at (156, 227)\n\tpOrbitPriorityList = App.PriorityListAI_Create(pShip, \"OrbitPriorityList\")\n\tpOrbitPriorityList.SetInterruptable(1)\n\t# SeqBlock is at (272, 228)\n\tpOrbitPriorityList.AddAI(pCloseEnough, 1)\n\tpOrbitPriorityList.AddAI(pFlyToPlanet, 2)\n\t# Done creating PriorityListAI OrbitPriorityList\n\t#########################################\n\t#########################################\n\t# Creating PreprocessingAI OrbitAvoidObstacles at (128, 289)\n\t## Setup:\n\timport AI.Preprocessors\n\tpScript = AI.Preprocessors.AvoidObstacles()\n\t## The PreprocessingAI:\n\tpOrbitAvoidObstacles = App.PreprocessingAI_Create(pShip, \"OrbitAvoidObstacles\")\n\tpOrbitAvoidObstacles.SetInterruptable(1)\n\tpOrbitAvoidObstacles.SetPreprocessingMethod(pScript, \"Update\")\n\tpOrbitAvoidObstacles.SetContainedAI(pOrbitPriorityList)\n\t# Done creating PreprocessingAI OrbitAvoidObstacles\n\t#########################################\n\treturn pOrbitAvoidObstacles\n","sub_path":"scripts/AI/Player/OrbitPlanet.py","file_name":"OrbitPlanet.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"586858321","text":"import os\nimport json\n\nimport synapse.common as s_common\n\nimport synapse.lib.cell as s_cell\nimport synapse.lib.jupyter as s_jupyter\nimport synapse.lib.msgpack as s_msgpack\nimport synapse.lib.stormsvc as s_stormsvc\n\nimport synapse.tests.utils as s_t_utils\n\nclass TstsvcApi(s_cell.CellApi, s_stormsvc.StormSvc):\n _storm_svc_name = 'testsvc'\n _storm_svc_pkgs = (\n {\n 'name': 'testsvc',\n 'version': (0, 0, 1),\n 'synapse_minversion': [2, 144, 0],\n 'synapse_version': '>=2.8.0,<3.0.0',\n 'commands': (\n {\n 'name': 'testsvc.magic',\n 'storm': '[inet:ipv4=0]',\n },\n )\n },\n )\n\n async def testmeth(self):\n return 'shazam'\n\nclass Tstsvc(s_cell.Cell):\n cellapi = TstsvcApi\n\nclass JupyterTest(s_t_utils.SynTest):\n testmods = ['synapse.tests.utils.TestModule']\n\n async def test_tempcorecmdr(self):\n outp = self.getTestOutp()\n cmdrcore = await s_jupyter.getTempCoreCmdr(self.testmods, outp)\n self.false(cmdrcore.isfini)\n nodes = await cmdrcore.eval('[test:str=beep]', cmdr=True)\n self.len(1, nodes)\n self.eq(nodes[0][0], ('test:str', 'beep'))\n self.true(outp.expect('cli> storm [test:str=beep]'))\n await cmdrcore.fini()\n self.true(cmdrcore.isfini)\n\n async def test_tempcoreprox(self):\n prox = await s_jupyter.getTempCoreProx(self.testmods)\n self.false(prox.isfini)\n msgs = await prox.storm('[test:str=beep]').list()\n nodes = [m[1] for m in msgs if m[0] == 'node']\n self.len(1, nodes)\n self.eq(nodes[0][0], ('test:str', 'beep'))\n await prox.fini()\n self.true(prox.isfini)\n\n async def test_tempcorecmdrstormsvc(self):\n cmdrcore, svcprox = await s_jupyter.getTempCoreCmdrStormsvc('testsvc', Tstsvc.anit)\n\n self.false(cmdrcore.isfini)\n self.false(svcprox.isfini)\n\n mesgs = await cmdrcore.storm('service.list')\n self.true(any(['true (testsvc)' in str(mesg) for mesg in mesgs]))\n\n nodes = await cmdrcore.eval('testsvc.magic')\n self.len(1, nodes)\n\n self.eq('shazam', await svcprox.testmeth())\n\n await cmdrcore.fini()\n self.true(cmdrcore.isfini)\n\n await svcprox.fini()\n self.true(svcprox.isfini)\n\n async def test_cmdrcore(self):\n\n async with self.getTestCoreAndProxy() as (realcore, core):\n\n outp = self.getTestOutp()\n async with await s_jupyter.CmdrCore.anit(core, outp=outp) as cmdrcore:\n podes = await cmdrcore.eval('[test:str=beep]',\n num=1, cmdr=False)\n self.len(1, podes)\n self.false(outp.expect('[test:str=beep]', throw=False))\n\n mesgs = await cmdrcore.storm('[test:str=boop]',\n num=1, cmdr=True)\n self.true(outp.expect('[test:str=boop]', throw=False))\n podes = [m[1] for m in mesgs if m[0] == 'node']\n self.gt(len(mesgs), len(podes))\n self.len(1, podes)\n self.eq(podes[0][0], ('test:str', 'boop'))\n\n # Opts works for cmdr=False\n podes = await cmdrcore.eval('[test:str=$foo]',\n {'vars': {'foo': 'duck'}},\n num=1, cmdr=False)\n self.len(1, podes)\n self.eq(podes[0][0], ('test:str', 'duck'))\n\n # Opts does not work with cmdr=True - we have no way to plumb it through.\n with self.getAsyncLoggerStream('synapse.lib.view',\n 'Error during storm execution') as stream:\n with self.raises(AssertionError):\n await cmdrcore.eval('[test:str=$foo]',\n opts={'vars': {'foo': 'fowl'}},\n cmdr=True)\n self.true(await stream.wait(6))\n\n # Assertion based tests\n podes = await cmdrcore.eval('test:int', num=0)\n self.len(0, podes)\n podes = await cmdrcore.eval('test:str', num=3)\n self.len(3, podes)\n await self.asyncraises(AssertionError, cmdrcore.eval('test:str', num=1))\n\n # Feed function for data loading\n data = [\n (('test:int', 137), {}),\n ]\n\n await cmdrcore.addFeedData('syn.nodes', data)\n podes = await cmdrcore.eval('test:int=137',\n num=1, cmdr=False)\n self.len(1, podes)\n\n # Raw cmdline test\n async with self.getTestCoreAndProxy() as (realcore, core):\n\n outp = self.getTestOutp()\n async with await s_jupyter.CmdrCore.anit(core, outp=outp) as cmdrcore:\n await cmdrcore.runCmdLine('help')\n self.true(outp.expect('cli> help'))\n self.true(outp.expect('List commands and display help output.'))\n\n async def test_log_supression(self):\n\n async with self.getTestCoreAndProxy() as (realcore, core):\n\n outp = self.getTestOutp()\n async with await s_jupyter.CmdrCore.anit(core, outp=outp) as cmdrcore:\n with self.getAsyncLoggerStream('synapse.lib.view') as stream:\n mesgs = await cmdrcore.storm('[test:int=beep]',\n num=0, cmdr=False,\n suppress_logging=True)\n self.stormIsInErr('invalid literal for int', mesgs)\n stream.seek(0)\n self.notin('Error during storm execution', stream.read())\n\n with self.getAsyncLoggerStream('synapse.lib.view',\n 'Error during storm execution') as stream:\n mesgs = await cmdrcore.storm('[test:int=beep]',\n num=0, cmdr=False,\n suppress_logging=False)\n self.true(await stream.wait(6))\n self.stormIsInErr('invalid literal for int', mesgs)\n\n def test_doc_data(self):\n with self.getTestDir() as dirn:\n s_common.gendir(dirn, 'docdata', 'stuff')\n\n docdata = s_common.genpath(dirn, 'docdata')\n\n root = s_common.genpath(dirn, 'synapse', 'userguides')\n\n d = {'key': 'value'}\n\n s_common.jssave(d, docdata, 'data.json')\n s_common.yamlsave(d, docdata, 'data.yaml')\n s_msgpack.dumpfile(d, os.path.join(docdata, 'data.mpk'))\n with s_common.genfile(docdata, 'stuff', 'data.txt') as fd:\n fd.write('beep'.encode())\n with s_common.genfile(docdata, 'data.jsonl') as fd:\n fd.write(json.dumps(d).encode() + b'\\n')\n fd.write(json.dumps(d).encode() + b'\\n')\n fd.write(json.dumps(d).encode() + b'\\n')\n\n data = s_jupyter.getDocData('data.json', root)\n self.eq(data, d)\n data = s_jupyter.getDocData('data.yaml', root)\n self.eq(data, d)\n data = s_jupyter.getDocData('data.mpk', root)\n self.eq(data, d)\n data = s_jupyter.getDocData('stuff/data.txt', root)\n self.eq(data, b'beep')\n data = s_jupyter.getDocData('data.jsonl', root)\n self.eq(data, [d, d, d])\n\n self.raises(ValueError, s_jupyter.getDocData, 'newp.bin', root)\n self.raises(ValueError, s_jupyter.getDocData,\n '../../../../../../etc/passwd', root)\n\n async def test_stormcore(self):\n outp = self.getTestOutp()\n stormcore, svcprox = await s_jupyter.getTempCoreStormStormsvc('testsvc', Tstsvc.anit, outp=outp)\n\n self.false(stormcore.isfini)\n self.false(svcprox.isfini)\n\n msgs = await stormcore.storm('service.list')\n self.stormIsInPrint('true (testsvc)', msgs)\n\n await stormcore.storm('testsvc.magic', num=1)\n\n with self.raises(AssertionError):\n await stormcore.storm('testsvc.magic', num=999)\n\n outp.clear()\n msgs = await stormcore.storm('$lib.print(hello)', cli=True)\n self.stormIsInPrint('hello', msgs)\n outp.expect('storm> $lib.print(hello)')\n outp.expect('storm> $lib.print(hello)\\nhello')\n\n self.eq('shazam', await svcprox.testmeth())\n\n await stormcore.fini()\n self.true(stormcore.isfini)\n\n await svcprox.fini()\n self.true(svcprox.isfini)\n","sub_path":"synapse/tests/test_lib_jupyter.py","file_name":"test_lib_jupyter.py","file_ext":"py","file_size_in_byte":8785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"365609430","text":"# -*- coding: utf-8 -*-\nimport re\nimport scrapy\nfrom re import S\nfrom urllib.parse import urljoin\nfrom w3lib.html import remove_tags\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.loader.processors import MapCompose\nfrom medicalmap.items import CommonLoader2, HospitalInfoItem, HospitalDepItem, DoctorInfoItem, DoctorRegInfoItem\nfrom medicalmap.utils.common import now_day, custom_remove_tags, get_county2, match_special, match_special2, \\\n clean_info, clean_info2, now_year\n\n\nclass Nt12320Spider(scrapy.Spider):\n name = 'nt12320'\n allowed_domains = ['nt12320.cn']\n start_urls = ['https://www.nt12320.cn/ntres/reservation/hos_search.do']\n\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Host': 'www.nt12320.cn',\n 'Referer': 'https://www.nt12320.cn/ntres/',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/65.0.3325.181 Safari/537.36'\n }\n custom_settings = {\n # 延迟设置\n # 'DOWNLOAD_DELAY': random.randint(1, 2),\n # 自动限速设置\n 'AUTOTHROTTLE_ENABLED': True,\n 'AUTOTHROTTLE_START_DELAY': 1,\n 'AUTOTHROTTLE_MAX_DELAY': 5,\n 'AUTOTHROTTLE_TARGET_CONCURRENCY': 16.0,\n 'AUTOTHROTTLE_DEBUG': True,\n # 并发请求数的控制,默认为16\n 'CONCURRENT_REQUESTS': 100\n }\n host = 'https://www.nt12320.cn'\n search_hos_url = 'https://www.nt12320.cn/ntres/reservation/hos_search.do'\n doctor_pagination_url = 'https://www.nt12320.cn/ntres/reservation/hos_showReservation.do?' \\\n 'depid=&principalship=&docname=&depName=&hoscode={}&stdDepid=' \\\n '&parentStdDepid=&changeFlay=0¤tpage={}¤tWeekCount=1' \\\n '&disid=&bigCode=&allDoctors=0&startHour=&endHour=&schcode=' \\\n '&__multiselect_haveNum=&selectPage={}'\n data_source_from = '南通市预约挂号服务平台'\n\n def start_requests(self):\n for each_url in self.start_urls:\n yield Request(each_url, headers=self.headers, callback=self.parse)\n\n def parse(self, response):\n try:\n all_hospital_links = response.xpath('//table[@class=\"tab\"]/tbody/tr')\n for each_hospital_link in all_hospital_links:\n hospital_link = each_hospital_link.xpath('td[1]/b/a/@href').extract_first('')\n hospital_level = each_hospital_link.xpath('td[2]/p/span/text()').extract_first('')\n hospital_name = each_hospital_link.xpath('td[2]/h2/text()').extract_first('')\n all_doctor_links = each_hospital_link.xpath('td[3]/p/span/a'\n '[contains(text(),\"查看医生\")]/@href').extract_first('')\n # 获取医院信息\n if hospital_link:\n hospital_link = urljoin(self.host, hospital_link)\n self.headers.update({\n 'Referer': response.url,\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Origin': 'http://www.nt12320.cn'\n })\n yield Request(hospital_link,\n headers=self.headers,\n callback=self.parse_hospital_info,\n meta={'hospital_level': hospital_level},\n dont_filter=True)\n\n # 获取医生信息\n if all_doctor_links:\n all_doctor_links = urljoin(self.host, all_doctor_links)\n self.headers['Referer'] = response.url\n yield Request(all_doctor_links,\n headers=self.headers,\n callback=self.parse_doctor_info,\n meta={'hospital_name': hospital_name},\n dont_filter=True)\n\n # 医院翻页\n next_page_number = response.xpath('//div[@id=\"fenye\"]/'\n 'a[contains(text(),\"下一页\")]/@href').extract_first('')\n now_page_number = response.xpath('//div[@id=\"fenye\"]/a[@class=\"fenye_num_s\"]/text()').extract_first('')\n if next_page_number and now_page_number:\n next_page_number = str(re.search(r'(\\d+)', next_page_number).group(1))\n data = {\n 'currentpage': next_page_number,\n 'hoslevel': '',\n 'hosname': '',\n 'hostype': '',\n 'selectPage': now_page_number\n }\n self.headers['Referer'] = response.url\n yield FormRequest(self.search_hos_url,\n formdata=data,\n headers=self.headers,\n callback=self.parse,\n dont_filter=True)\n except Exception as e:\n self.logger.error('在抓取医院信息的过程中出错了,原因是:{}'.format(repr(e)))\n\n def parse_hospital_info(self, response):\n self.logger.info('>>>>>>正在抓取:医院信息>>>>>>')\n\n try:\n # 获取区或县\n hospital_address = response.xpath('//div[@class=\"yy_js clearfix\"]/div/dl/dd[1]/text()').extract_first('')\n if hospital_address:\n hospital_county = get_county2('中国|江苏省|江苏|南通市|南通', hospital_address)\n else:\n hospital_county = None\n\n # 获取医院信息\n loader = CommonLoader2(item=HospitalInfoItem(), response=response)\n loader.add_xpath('hospital_name', '//div[@class=\"yy_til\"]/h2/text()', MapCompose(custom_remove_tags))\n loader.add_value('hospital_level',\n response.meta.get('hospital_level'),\n MapCompose(custom_remove_tags, clean_info))\n loader.add_xpath('hospital_addr',\n '//div[@class=\"yy_js clearfix\"]/div/dl/dd[1]/text()',\n MapCompose(custom_remove_tags))\n loader.add_value('hospital_pro', '江苏省')\n loader.add_value('hospital_city', '南通市')\n loader.add_value('hospital_county', hospital_county)\n loader.add_xpath('hospital_phone',\n '//div[@class=\"yy_js clearfix\"]/div/dl/dd[2]/text()',\n MapCompose(custom_remove_tags))\n loader.add_xpath('hospital_intro',\n '//em[contains(text(),\"简介\")]/ancestor::div[1]',\n MapCompose(remove_tags, custom_remove_tags, match_special, clean_info2))\n loader.add_value('registered_channel', self.data_source_from)\n loader.add_value('dataSource_from', self.data_source_from)\n loader.add_value('hospital_url', response.url)\n loader.add_value('update_time', now_day())\n hospital_info_item = loader.load_item()\n yield hospital_info_item\n\n # 获取科室信息\n # self.logger.info('>>>>>>正在抓取{}:科室详细信息>>>>>>')\n all_dept_links = response.xpath('//dl[@class=\"kfyy clearfix\"]/dd/span/a/@href').extract()\n for each_dept_link in all_dept_links:\n dept_link = urljoin(self.host, re.sub(r';jsessionid=(.*?)\\?', '?', each_dept_link))\n self.headers['Referer'] = response.url\n yield Request(dept_link, headers=self.headers, callback=self.parse_hospital_dep_detail)\n except Exception as e:\n self.logger.error('在抓取医院详细信息和科室的过程中出错了,原因是:{}'.format(repr(e)))\n\n def parse_hospital_dep_detail(self, response):\n self.logger.info('>>>>>>正在抓取科室详细信息>>>>>>')\n loader = CommonLoader2(item=HospitalDepItem(), response=response)\n loader.add_xpath('dept_name',\n '//div[@class=\"zrys\"]/p/strong/text()',\n MapCompose(custom_remove_tags))\n loader.add_xpath('hospital_name', '//div[@class=\"yy_til\"]/h2/text()', MapCompose(custom_remove_tags))\n loader.add_xpath('dept_info', '//div[@class=\"zrys\"]/dl/dd', MapCompose(remove_tags, custom_remove_tags))\n loader.add_value('dataSource_from', self.data_source_from)\n loader.add_value('crawled_url', response.url)\n loader.add_value('update_time', now_day())\n dept_item = loader.load_item()\n yield dept_item\n\n def parse_doctor_info(self, response):\n self.logger.info('>>>>>>正在抓取医生信息>>>>>>')\n try:\n all_doctors = response.xpath('//table[@class=\"tab\"]/tbody/tr[position() mod 2!=0]')\n # hospital_name = response.xpath('//p[@class=\"search_num\"]/strong/text()').extract_first('')\n for each_doctor in all_doctors:\n doctor_name = each_doctor.xpath('td[2]/a/text()').extract_first('')\n doctor_level = each_doctor.xpath('td[2]/i/text()').extract_first('')\n dept_name = each_doctor.xpath('td[2]/em/a/text()').extract_first('')\n doctor_link = each_doctor.xpath('td[2]/a/@href').extract_first('')\n hospital_name = each_doctor.xpath('td[2]/p/a/text()').extract_first('')\n if doctor_link:\n doctor_link = urljoin(self.host, re.sub(r';jsessionid=(.*?)\\?', '?', doctor_link))\n # doctor_link2 = '{0}{1}'.format(doctor_link, '¤tWeekCount=2')\n self.headers['Referer'] = response.url\n yield Request(doctor_link,\n headers=self.headers,\n callback=self.parse_doctor_info_detail,\n meta={\n 'doctor_name': doctor_name,\n 'doctor_level': doctor_level,\n 'dept_name': dept_name,\n 'hospital_name': hospital_name\n },\n dont_filter=True)\n # 医生翻页\n hos_code = re.search(r'hoscode=(.*?)&', response.url) or re.search(r'hoscode=(.*?)$', response.url)\n next_page_number = response.xpath('//div[@id=\"fenye\"]/a[contains(text(),\"下一页\")]/@href').extract_first('')\n now_page_number = response.xpath('//div[@id=\"fenye\"]/a[@class=\"fenye_num_s\"]/text()').extract_first('')\n if not now_page_number:\n now_page_number = '1'\n if hos_code and next_page_number:\n hos_code = str(hos_code.group(1))\n next_page_number = str(re.search(r'(\\d+)', next_page_number).group(1))\n next_page_link = self.doctor_pagination_url.format(hos_code, next_page_number, now_page_number)\n self.headers['Referer'] = response.url\n yield Request(next_page_link, headers=self.headers, callback=self.parse_doctor_info, dont_filter=True)\n except Exception as e:\n self.logger.error('在抓取医生信息的过程中出错了,原因是:{}'.format(repr(e)))\n\n def parse_doctor_info_detail(self, response):\n self.logger.info('>>>>>>正在抓取医生详细信息>>>>>>')\n try:\n doctor_name = response.meta.get('doctor_name')\n dept_name = response.meta.get('dept_name')\n # dept_name = dept_name.split('-')[-1] if '-' in dept_name else dept_name\n doctor_level = response.meta.get('doctor_level')\n hospital_name = response.meta.get('hospital_name')\n # hospital_name2 = response.xpath('//div[@class=\"yy_til\"]/h2/text()').extract_first('')\n # hospital_name = hospital_name2 if hospital_name2 else hospital_name1\n diagnosis_amt = response.xpath('//td/span[@class=\"doc_yuyue_time\"]/a/@title').extract()\n if diagnosis_amt:\n res = re.search(r'.*挂号费:(.*?)$', diagnosis_amt[0], S)\n if res:\n diagnosis_amt = res.group(1)\n else:\n diagnosis_amt = None\n else:\n diagnosis_amt = None\n loader = CommonLoader2(item=DoctorInfoItem(), response=response)\n loader.add_value('doctor_name', doctor_name, MapCompose(custom_remove_tags))\n loader.add_value('dept_name', dept_name, MapCompose(custom_remove_tags))\n loader.add_value('hospital_name', hospital_name, MapCompose(custom_remove_tags))\n loader.add_value('doctor_level', doctor_level, MapCompose(custom_remove_tags, match_special2))\n loader.add_xpath('doctor_intro',\n '//div[@class=\"zrys\"]/dl/dd',\n MapCompose(remove_tags, custom_remove_tags, clean_info2))\n loader.add_value('diagnosis_amt', diagnosis_amt)\n loader.add_value('dataSource_from', self.data_source_from)\n loader.add_value('crawled_url', response.url)\n loader.add_value('update_time', now_day())\n doctor_item = loader.load_item()\n yield doctor_item\n\n # 获取医生排班信息\n has_reg_info = response.xpath('//td/span[@class=\"doc_yuyue_time\"]').extract()\n if has_reg_info:\n for each_reg_info in has_reg_info:\n reg_info_date = re.search(r'.*出诊时间:(.*?)\\n', each_reg_info, S)\n reg_info_date = reg_info_date.group(1) if reg_info_date else None\n reg_info = '{0}-{1}'.format(now_year(), reg_info_date).replace('月', '-').replace('日', '')\n reg_loader = CommonLoader2(item=DoctorRegInfoItem(), response=response)\n reg_loader.add_value('doctor_name', doctor_name, MapCompose(custom_remove_tags))\n reg_loader.add_value('dept_name', dept_name, MapCompose(custom_remove_tags))\n reg_loader.add_xpath('hospital_name',\n '//div[@class=\"yy_til\"]/h2/text()',\n MapCompose(custom_remove_tags))\n reg_loader.add_value('reg_info', reg_info, MapCompose(custom_remove_tags))\n reg_loader.add_value('dataSource_from', self.data_source_from)\n reg_loader.add_value('crawled_url', response.url)\n reg_loader.add_value('update_time', now_day())\n reg_item = reg_loader.load_item()\n yield reg_item\n except Exception as e:\n self.logger.error('在抓取医生详细信息的过程中出错了,原因是:{}'.format(repr(e)))\n","sub_path":"medicalmap/medicalmap/spiders/nt12320.py","file_name":"nt12320.py","file_ext":"py","file_size_in_byte":15236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"248007633","text":"# Copyright 2013 UNED\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\n\nfrom django.conf import settings\nfrom django.core.mail.message import EmailMultiAlternatives\nfrom django.utils.translation import ugettext as _\nimport re\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef send_contact_message(communication_type, course, sender_username, sender_email,\n message, fail_silently=False, connection=None):\n\n # Add course name to the message body\n message = \"%s: %s\\n\\n%s\" % (_(\"Course\"), course, message)\n\n subject = \"%s | %s <%s>\" % (communication_type.title,\n sender_username,\n sender_email)\n headers = {'Reply-To': sender_email}\n\n destination = communication_type.destination\n if not destination:\n if not settings.MANAGERS:\n logger.error('Could not send a contact message because there is no destination email configured neither in the communication type or the MANAGERS setting.')\n return\n else:\n to = [m[1] for m in settings.MANAGERS]\n else:\n to = [destination]\n\n try:\n if settings.SEND_CONTACT_EMAIL_FROM_SENDER:\n from_ = sender_email\n else:\n from_ = settings.DEFAULT_FROM_EMAIL\n except AttributeError:\n from_ = settings.DEFAULT_FROM_EMAIL\n\n mail = EmailMultiAlternatives(\n u'%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),\n message,\n from_,\n to,\n connection=connection,\n headers=headers,\n )\n mail.send(fail_silently=fail_silently)\n\ndef send_support_message(subject, body, url, user, device, position, date, timezone, fail_silently=False, connection=None):\n headers = { 'Reply-To': user.email }\n mail_subject = \"%s - %s\" % (settings.SUPPORT_SUBJECT_PREFIX, subject)\n mail_body = \"Message:\\n%s\\n\" % (body)\n m = re.search('^http(s)?:\\/\\/[a-z0-9-]+(\\.[a-z0-9-]+)*?(:[0-9]+)?(\\/)?.([a-z]+)/', url)\n base_url = m.group(0)\n mail_extrainfo = ( \"-----------\\n\"\n \"Username: %s (%suser/profile/%s)\\n\"\n \"User real name: %s\\n\"\n \"User id: %s (%sadmin/auth/user/%s/)\\n\"\n \"User email: %s\\n\"\n \"Related URL: %s\\n\"\n \"Contact date: %s\\n\"\n \"User geoposition: %s(%s,%s) (http://maps.google.com/maps?q=%s,%s)\\n\"\n \"Timezone: %s\\n\"\n \"Device type: %s (%s)\\n\"\n \"Browser: %s (%s)\\n\") % ( user.username, base_url, user.username,\n user.get_full_name(),\n user.id, base_url, user.id,\n user.email,\n url,\n date,\n position['location'], position['latitude'], position['longitude'],\n position['latitude'], position['longitude'],\n timezone,\n device['type'], device['os'],\n device['browser'], device['orientation'])\n destination = [settings.SUPPORT_EMAIL]\n origin = user.email\n\n mail = EmailMultiAlternatives(\n mail_subject,\n u'%s%s' % (mail_body, mail_extrainfo),\n origin,\n destination,\n connection=connection,\n headers=headers,\n )\n mail.send(fail_silently=fail_silently)\n","sub_path":"moocng/contact/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":3946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"256014006","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\na_list = [\"a\",\"b\",\"c\"]\n#b_dic = [{\"id\":\"a\"},{\"id\":\"b\"},{\"id\":\"c\"}]\n\nresult = [{\"id\":value for value in i} for i in a_list]\n\nprint(json.dumps(result))\n","sub_path":"json/list_to_json.py","file_name":"list_to_json.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"148415323","text":"#!/usr/bin/env python\n# license removed for brevity\n\n# import sys\n# import os\n# curdir = os.path.dirname(os.path.realpath(__file__))\n# print curdir+'/..'\n# sys.path.insert(0,curdir+'/..')\n\n# import copy\nimport rospy\nimport tf\nimport time\nimport math\nimport numpy as np\nimport expo_utility as expo_util\nimport airsim\nfrom airsim.types import Pose, Vector3r, Quaternionr\n\nfrom airsim import utils as sim_util\nfrom airsim.utils import to_quaternion\nfrom nav_msgs.msg import Odometry\nfrom sensor_msgs.msg import PointCloud2\n\nfrom planner_base.srv import path as PathSrv\nfrom planner_base.srv import nearfrontier as NearfrontierSrv\nfrom geometry_msgs.msg import Point\n# import operator\nimport os\n\n# Global Parameters\n# ExpoController.fov\n# ExpoController.path_skip\nOnlyGlobal = False\nMapFilename = 'slaughter'\n\nclass ExpoController:\n def __init__(self):\n self.cmd_client = airsim.VehicleClient()\n self.cmd_client.confirmConnection()\n self.tf_broad_ = tf.TransformBroadcaster()\n self.odom_pub_ = rospy.Publisher('pose', Odometry, queue_size=1)\n self.cloud_pub_ = rospy.Publisher('cloud_in', PointCloud2, queue_size=1)\n self.camid = 0\n self.img_type = [airsim.ImageRequest(self.camid, airsim.ImageType.DepthPlanner, True)]\n self.FAR_POINT = 22\n self.cam_pos = [0., 0., 0.]\n self.fov = 95.0\n self.path_skip = 7\n self.last_list_len = 10\n self.last_ten_goals = [[0.,0.,0.]]*self.last_list_len # detect and avoid occilation\n self.lfg_ind = 0\n self.replan_step = 1\n\n def get_depth_campos(self):\n '''\n cam_pose: 0: [x_val, y_val, z_val] 1: [x_val, y_val, z_val, w_val]\n '''\n img_res = self.cmd_client.simGetImages(self.img_type)\n \n if img_res is None or img_res[0].width==0: # Sometime the request returns no image\n return None, None\n\n depth_front = sim_util.list_to_2d_float_array(img_res[0].image_data_float,\n img_res[0].width, img_res[0].height)\n depth_front[depth_front>self.FAR_POINT] = self.FAR_POINT\n cam_pose = (img_res[0].camera_position, img_res[0].camera_orientation)\n\n return depth_front, cam_pose\n\n def collect_points_6dir(self, tgt):\n # must be in CV mode, otherwise the point clouds won't align\n scan_config = [0, -1, 2, 1, 4, 5] # front, left, back, right, up, down\n points_6dir = np.zeros((0, self.imgwidth, 3), dtype=np.float32)\n for k,face in enumerate(scan_config): \n if face == 4: # look upwards at tgt\n pose = Pose(Vector3r(tgt[0], tgt[1], tgt[2]), to_quaternion(math.pi / 2, 0, 0)) # up\n elif face == 5: # look downwards\n pose = Pose(Vector3r(tgt[0], tgt[1], tgt[2]), to_quaternion(-math.pi / 2, 0, 0)) # down - pitch, roll, yaw\n else: # rotate from [-90, 0, 90, 180]\n yaw = math.pi / 2 * face\n pose = Pose(Vector3r(tgt[0], tgt[1], tgt[2]), to_quaternion(0, 0, yaw))\n\n self.set_vehicle_pose(pose)\n depth_front, _ = self.get_depth_campos()\n if depth_front is None:\n rospy.logwarn('Missing image {}: {}'.format(k, tgt))\n continue\n # import ipdb;ipdb.set_trace()\n point_array = expo_util.depth_to_point_cloud(depth_front, self.focal, self.pu, self.pv, mode = k)\n points_6dir = np.concatenate((points_6dir, point_array), axis=0)\n # reset the pose for fun\n pose = Pose(Vector3r(tgt[0], tgt[1], tgt[2]), to_quaternion(0, 0, 0))\n self.set_vehicle_pose(pose)\n # print 'points:', points_6dir.shape \n return points_6dir\n\n def publish_lidar_scans_6dir(self, tgt):\n rostime = rospy.Time.now()\n points = self.collect_points_6dir(tgt)\n pc_msg = expo_util.xyz_array_to_point_cloud_msg(points, rostime)\n odom_msg = expo_util.trans_to_ros_odometry(tgt, rostime)\n self.cam_pose = tgt\n\n self.tf_broad_.sendTransform(translation=tgt, rotation=[0.,0.,0.,1.],\n time=rostime, child='map', parent='world')\n self.odom_pub_.publish(odom_msg)\n self.cloud_pub_.publish(pc_msg)\n\n\n def set_vehicle_pose(self, pose, ignore_collison=True, vehicle_name=''):\n self.cmd_client.simSetVehiclePose(pose, ignore_collison, vehicle_name) # amigo: this is supposed to be used in CV mode\n time.sleep(0.1)\n\n def init_exploration(self):\n # cloud_msg, cloud_odom_msg = self.get_point_cloud_msg(cam_id=0)\n # cam_trans, cam_rot = expo_util.odom_to_trans_and_rot(cloud_odom_msg)\n # cam_pose = self.cmd_client.simGetCameraInfo(camera_name=0).pose\n cam_info = self.cmd_client.simGetCameraInfo(camera_name=self.camid)\n img_res = self.cmd_client.simGetImages(self.img_type)\n img_res = img_res[0]\n cam_pose = Pose(img_res.camera_position, img_res.camera_orientation)\n cam_trans, cam_rot = expo_util.sim_pose_to_trans_and_rot(cam_pose)\n self.imgwidth = img_res.width\n self.imgheight = img_res.height\n self.focal, self.pu, self.pv = expo_util.get_intrinsic(img_res, cam_info, self.fov)\n self.cam_pos = cam_trans\n\n rospy.loginfo('Initialized img ({},{}) focal {}, ({},{})'.format(self.imgwidth, self.imgheight, self.focal, self.pu, self.pv))\n self.publish_lidar_scans_6dir(cam_trans)\n time.sleep(5)\n\n def points_dist(self, pt1, pt2):\n dist = (pt1[0] - pt2[0])**2 + (pt1[1] - pt2[1])**2 + (pt1[2] - pt2[2])**2\n dist = math.sqrt(dist)\n return dist\n\n def is_same_point(self, pt1, pt2):\n if abs(pt1[0]-pt2[0]) > 1e-3 or abs(pt1[1]-pt2[1]) > 1e-3 or abs(pt1[2]-pt2[2]) > 1e-3:\n return False\n return True\n\n def explore(self, try_round=-1):\n \"\"\"\n We have to handle two cases: oscillation and local frontier disappear\n :param try_round: -1 for all the frontiers\n :path_skip: skip way points on the path\n :return:\n \"\"\"\n # A star path finding for local exploration\n local_path = self.call_local_planning_service(try_round)\n if local_path is None: \n return False\n # import ipdb;ipdb.set_trace()\n # No feasible local_path is found\n if len(local_path) == 0: \n return False\n\n # insert the goal point to the list for occilation detection\n target_pt = local_path[0]\n occilation = False\n if not self.is_same_point(target_pt, self.last_ten_goals[self.lfg_ind]): # target point changes\n for k,pt in enumerate(self.last_ten_goals): # check if the target already exist in the goal list\n if self.is_same_point(target_pt, pt): # this is a occilation\n occilation = True\n tmp = self.last_ten_goals[self.lfg_ind]\n self.last_ten_goals[self.lfg_ind] = pt\n self.last_ten_goals[k] = tmp\n rospy.logwarn('Occilation detected, increase replan step! %d - %d', self.lfg_ind, k)\n break\n if occilation:\n self.replan_step = self.replan_step * 3\n if self.replan_step>7: # serious occilation\n rospy.logwarn('Escapte occilation by going the the goal directly!!')\n local_path = [local_path[0]]\n else:\n rospy.loginfo(\"new target, reset replan step..\")\n self.lfg_ind = (self.lfg_ind+1)%self.last_list_len\n self.last_ten_goals[self.lfg_ind] = target_pt\n self.replan_step = 1\n else:\n rospy.loginfo(\"flying to the same target\")\n\n path_len = len(local_path)\n for i in range(self.replan_step):\n if path_len < self.path_skip:\n next_ind = path_len - (i+1)*((path_len+1)/2) \n else:\n next_ind = len(local_path) - (i+1)*self.path_skip \n if next_ind<0:\n break\n rospy.loginfo('Path len {}, move to waypoint {}'.format(len(local_path),local_path[next_ind]))\n next_way_point = local_path[next_ind]\n # self.move_to_tgt(next_way_point)\n self.publish_lidar_scans_6dir(next_way_point)\n return True\n\n def explore_global_frontier(self,):\n # import ipdb;ipdb.set_trace()\n next_point = self.get_nearest_frontier()\n if next_point is None:\n return False\n rospy.loginfo('Next global frontier ({}, {}, {})'.format(next_point[0], next_point[1], next_point[2]))\n self.publish_lidar_scans_6dir(next_point)\n return True\n\n def get_nearest_frontier(self):\n rospy.wait_for_service('near_frontier_srv')\n\n robot_pos = Point(self.cam_pose[0], self.cam_pose[1], self.cam_pose[2])\n try:\n global_frontier_srv = rospy.ServiceProxy('near_frontier_srv', NearfrontierSrv)\n global_frontier = global_frontier_srv(robot_pos)\n except rospy.ServiceException:\n print(\"No frontier returned..\")\n return None\n\n return [global_frontier.nearfrontier.x, global_frontier.nearfrontier.y, global_frontier.nearfrontier.z]\n\n def call_local_planning_service(self, try_round):\n target_path = []\n rospy.wait_for_service('bbx_path_srv')\n try:\n feasible_path = rospy.ServiceProxy('bbx_path_srv', PathSrv)\n resp = feasible_path(try_round)\n for item in resp.path.points:\n target_path.append([item.x, item.y, item.z])\n return target_path\n except rospy.ServiceException:\n print(\"service call failed\")\n return None\n\n\ndef save_map():\n # save map\n timestr = time.strftime('%m%d_%H%M%S',time.localtime())\n filepathname = '~/tmp/'+MapFilename+'_'+timestr+'.ot'\n cmd = 'rosrun octomap_server octomap_saver ' + filepathname\n os.system(cmd)\n rospy.loginfo('Save map {}'.format(filepathname))\n\n\nif __name__ == '__main__':\n rospy.init_node('expo_control', anonymous=True)\n controller = ExpoController()\n controller.init_exploration()\n # rate = rospy.Rate(1)\n count = 0\n while not rospy.is_shutdown():\n count += 1\n if OnlyGlobal:\n if controller.explore_global_frontier():\n time.sleep(2.0)\n else: # mapping finished\n break \n else: # A star planning on local map, move to nearest global frontier when no local frontiers\n if controller.explore(): \n time.sleep(2.0)\n else: # no local frontier\n if controller.explore_global_frontier():\n time.sleep(2.0)\n else:\n break \n if count%100==0:\n save_map()\n save_map()\n\n","sub_path":"src/expo_control.py","file_name":"expo_control.py","file_ext":"py","file_size_in_byte":10894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"456256976","text":"from emubase import run\nfrom genericemu import GenericEmu\n\nclass Chip8(GenericEmu):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self.setToolBarSeq(\n {\n \"label\": \"Open\",\n \"func\": self.runFile\n }\n )\n\nif __name__ == \"__main__\":\n run(Chip8, screenSize = (200, 200))\n","sub_path":"chip8.py","file_name":"chip8.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"222218102","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport fc_nn as nn\r\nfrom step_dynamic import metropolis_adjusted_langevin_step\r\n\r\ndef build(id_inputs, params, steps=None, init_thetas=None):\r\n if steps:\r\n global_step, id_global_step = steps\r\n else:\r\n global_step, id_global_step = 0, 0\r\n\r\n images = tf.placeholder(tf.float32, [None, params['layer_dims'][0]])\r\n labels = tf.placeholder(tf.float32, [None, params['layer_dims'][-1]])\r\n for val in [images, labels]:\r\n tf.add_to_collection('inputs', val)\r\n\r\n with tf.device('/cpu:0'):\r\n # Define learning rate and related operations\r\n global_step = tf.Variable(global_step, trainable=False)\r\n inc_global_step_op = tf.assign_add(global_step, 1)\r\n lr = tf.train.exponential_decay(\r\n params['init_lr'], global_step, params['decay_steps'], params['decay_rate'], staircase=True)\r\n\r\n id_global_step = tf.Variable(id_global_step, trainable=False)\r\n inc_id_global_step_op = tf.assign_add(id_global_step, 1)\r\n id_lr = tf.train.exponential_decay(\r\n params['init_id_lr'], id_global_step, params['id_decay_steps'], params['id_decay_rate'], staircase=True)\r\n\r\n with tf.device('/gpu:0'):\r\n # Define variables\r\n if init_thetas is not None:\r\n thetas = tf.Variable(init_thetas)\r\n else:\r\n thetas = nn.create_thetas(params['layer_dims'], params['nb_particles'])\r\n\r\n id_images = tf.Variable(id_inputs[0])\r\n id_labels = tf.Variable(id_inputs[1])\r\n\r\n # Opt related values\r\n id_log_probs_fn = lambda thetas_: nn.create_log_prob(thetas_, id_images, id_labels, params['layer_dims'], params['train_data_dim'], smooth_labels=params['opt_labels'])\r\n step_op, avg_acceptance_rate = metropolis_adjusted_langevin_step(thetas, id_log_probs_fn, lr, nb_walk_per_step=params['nb_walk_per_step'])\r\n\r\n # Loss related values\r\n logits = nn.create_logits(thetas, images, params['layer_dims'])\r\n log_liks = nn.create_log_lik_from_logits(thetas, logits, labels, params['train_data_dim'])\r\n log_lik_grads = tf.gradients(log_liks, [thetas])[0]\r\n\r\n # Induced loss related values\r\n id_logits = nn.create_logits(thetas, id_images, params['layer_dims'])\r\n id_log_liks = nn.create_log_lik_from_logits(thetas, id_logits, id_labels, params['train_data_dim'], smooth_labels=params['opt_labels'])\r\n id_log_lik_grads = tf.gradients(id_log_liks, [thetas])[0]\r\n\r\n # Score loss and optimizer\r\n id_loss = tf.reduce_mean(tf.square((log_lik_grads-id_log_lik_grads)/float(params['train_data_dim'])))\r\n if params['opt_labels']:\r\n gd_op = tf.train.GradientDescentOptimizer(learning_rate=id_lr).minimize(id_loss, var_list=[id_images, id_labels])\r\n with tf.control_dependencies([gd_op]):\r\n clip_op = tf.clip_by_value(id_labels, clip_value_min=0.0, clip_value_max=10.0)\r\n id_step_op = tf.group(gd_op, clip_op)\r\n else:\r\n id_step_op = tf.train.GradientDescentOptimizer(learning_rate=id_lr).minimize(id_loss, var_list=[id_images])\r\n\r\n # Test and evaluate related values\r\n log_softmaxs = tf.nn.log_softmax(logits)\r\n avg_log_softmax = tf.reduce_mean(log_softmaxs, axis=0)\r\n sparse_labels = tf.argmax(labels, axis=1, output_type=tf.int32)\r\n sparse_pred = tf.argmax(avg_log_softmax, axis=1, output_type=tf.int32)\r\n comp = tf.cast(tf.equal(sparse_labels, sparse_pred), tf.float32)\r\n err = 100.*(1.-tf.reduce_mean(comp))\r\n\r\n tf.add_to_collection('thetas', thetas)\r\n for val in [id_images, id_labels]:\r\n tf.add_to_collection('id_inputs', val)\r\n\r\n for val in [step_op, inc_global_step_op, lr, avg_acceptance_rate]:\r\n tf.add_to_collection('step_ops', val)\r\n\r\n for val in [id_step_op, inc_id_global_step_op, id_lr, id_loss]:\r\n tf.add_to_collection('id_step_ops', val)\r\n\r\n for val in [err]:\r\n tf.add_to_collection('test_ops', val)\r\n\r\n for val in [log_softmaxs]:\r\n tf.add_to_collection('pred_ops', val)\r\n","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":4072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"154735292","text":"### ---------------------------------------------------------------------\n### Exercise-4: Write a Python script which calculates volume \n### and surface area of a cylinder of radius R and length L.\n### ---------------------------------------------------------------------\nimport math # use math.pi from this module\n\n# Function for calculation of volume of a cylinder \n\npi = 3.14 \ndef calculate(R, L):\n surface = 2 * pi * R * (R + L)\n volume = pi * R * R * L \n return ('surface: ' + str(surface),\n 'Volume: ' + str(volume))\n \n \nR = int(input('Please Enter the radius of a Cylinder: '))\nL = int(input('Please Enter the length of a Cylinder: '))\n\nprint(calculate(R, L)) \n","sub_path":"cylinder.py","file_name":"cylinder.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"207721754","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 7 13:54:23 2018\n\n@author: zachb\n\"\"\"\n\ndef onehotletter(s):\n result = [0] * 33\n if s == ' ':\n result[32] = 1\n elif s in 'abcdefghijklmnopqrstuvwxyz':\n letter_index = ord(s) - 97\n result[letter_index] = 1\n elif s == 'á':\n result[26] = 1\n elif s == 'é':\n result[27] = 1\n elif s == 'í':\n result[28] = 1\n elif s == 'ó':\n result[29] = 1\n elif s == 'ú':\n result[30] = 1\n elif s == 'ñ':\n result[31] = 1\n return result\n\ndef onehotword(word):\n result = []\n word = word + ((10 - len(word)) * \" \")\n for i in word:\n result += onehotletter(i)\n return result\n\nimport numpy as np\nimport pandas as pd\n\n#Creation of wordlist\nenglish = open(\"english.txt\")\ninit_englishlist = []\nfor line in english:\n line = line[:-1]\n if len(line) > 4:\n init_englishlist += [line]\n \n# Creation of randomlist\nspanish = open(\"spanish.txt\")\ninit_spanishlist = []\nfor line in spanish:\n line = line[:-1]\n if len(line) > 4 and len(line) < 11:\n init_spanishlist += [line]\n \n# Purge bad characters\nspanishlist = []\nbad_word = False\nfor i in range(len(init_spanishlist)):\n for j in init_spanishlist[i]:\n if ord(j) < 97 or ord(j) > 122:\n if j != (j in 'áéíóúñ'):\n bad_word = True\n if bad_word == False:\n spanishlist += [init_spanishlist[i]]\n bad_word = False\n \nenglishlist = []\nbad_word = False\nfor i in range(len(init_englishlist)):\n for j in init_englishlist[i]:\n if ord(j) < 97 or ord(j) > 122:\n if j != (j in 'áéíóúñ'):\n bad_word = True\n if bad_word == False:\n englishlist += [init_englishlist[i]]\n bad_word = False\n\n# Trimming to same size \nspanishlist = spanishlist[:len(englishlist)]\nenglishlist = englishlist[:len(spanishlist)]\n\n# Creating input list\nx=[]\nfor i in spanishlist:\n hot = onehotword(i)\n x += [hot]\nfor i in englishlist:\n hot = onehotword(i)\n x += [hot]\n \n# Creating output list\ny=[]\nfor i in spanishlist:\n y += [0]\nfor i in englishlist:\n y += [1]\n \n# Converting lists to numpy matrices\nx = pd.DataFrame(x)\ny = pd.DataFrame(y)\nx = x.iloc[:, 0:330].values\ny = y.iloc[:,:].values\n\n# Splitting into train and test data\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2)\n\n# Initializing network\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nclassifier = Sequential()\n\n# Adding the input layer and the first hidden layer\nclassifier.add(Dense(output_dim = 330, init = 'uniform', activation = 'sigmoid', input_dim = 330))\n# Adding a hidden layer\nclassifier.add(Dense(output_dim = 330, init = 'uniform', activation = 'sigmoid'))\n# Adding a hidden layer\nclassifier.add(Dense(output_dim = 330, init = 'uniform', activation = 'sigmoid'))\n# Adding the output layer\nclassifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))\n\n# Determining loss\nclassifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n# Fitting model to data\nclassifier.fit(x_train, y_train, batch_size = 3, nb_epoch = 30)\n\ny_pred = classifier.predict(x_test)\ny_pred = (y_pred > .5)\n\nscore = classifier.evaluate(x_test, y_test, verbose=0)\nprint(score)\n\nclassifier.save(\"englishVspanish\")\n","sub_path":"englishOrSpanish.py","file_name":"englishOrSpanish.py","file_ext":"py","file_size_in_byte":3424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"647256131","text":"from flask import Flask, render_template\nfrom time import sleep\nfrom flask.views import MethodView\nfrom pubnub.pnconfiguration import PNConfiguration\nfrom pubnub.pubnub import PubNub, SubscribeListener\n \npnconfig = PNConfiguration()\n \npnconfig.subscribe_key = 'sub-c-ef2e6136-5055-11e9-bc27-728c10c631fc'\npnconfig.publish_key = 'pub-c-3d806c54-fc53-4b35-b22a-5a911a52f42f'\npnconfig.uuid = 'srinu'\n \npubnub = PubNub(pnconfig)\n \nmy_listener = SubscribeListener()\npubnub.add_listener(my_listener)\n \npubnub.subscribe().channels('awesomeChannel').execute()\nmy_listener.wait_for_connect()\nprint('connected')\n \n\n\n\napp = Flask(__name__, template_folder='templates')\n\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n\n@app.route('/command/', methods=['GET'])\ndef getCmd(cmd):\n print(cmd)\n pubnub.publish().channel('awesomeChannel').message(cmd).sync()\n return render_template('home.html')\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"400694028","text":"#! /usr/bin/env python\r\n# -*- coding: utf-8\r\n\r\nfrom django.conf.urls import url, patterns, include\r\n\r\nurlpatterns = patterns(\r\n 'restapi.views',\r\n url(r'^user/$',\"user_list\"),\r\n url(r'^user/(?P[0-9]+)/$',\"user_detial\"),\r\n )","sub_path":"restapi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"503794286","text":"from keras.models import Sequential\r\nfrom keras.models import load_model\r\n\r\n# 建立Keras的Sequential模型\r\nmodel = Sequential()\r\nmodel = load_model(\"imdb_rnn.h5\")\r\nmodel.summary() # 顯示模型摘要資訊\r\n# 編譯模型\r\nmodel.compile(loss=\"binary_crossentropy\", optimizer=\"rmsprop\", \r\n metrics=[\"accuracy\"])\r\n# 顯示SimpleRNN層的權重形狀\r\nprint(2, model.layers[2].name, \":\")\r\nweights = model.layers[2].get_weights()\r\nfor i in range(len(weights)):\r\n print(\"==>\", i, weights[i].shape)","sub_path":"F9744/Keras/Ch16/Ch16_2_1c.py","file_name":"Ch16_2_1c.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"117987273","text":"class EventAttributes:\n def __init__(self, default_attributes):\n self.attributes = {}\n self.default = default_attributes\n\n\n def get_attributes(self, event):\n if event in self.attributes:\n return self.attributes[event]\n\n attributes = dict(self.default)\n if event[0] == 'A':\n attributes['min court quality'] = Court.HIGH_QUALITY\n elif event[0] == 'B':\n attributes['min court quality'] = Court.MED_QUALITY\n else:\n attributes['min court quality'] = Court.LOW_QUALITY\n\n if event[-1] == 'C':\n attributes['duration'] = less_duration(attributes['duration'])\n attributes['min court quality'] = Court.inferior_quality(attributes['min court quality'])\n\n return attributes\n\n\n def set_attributes(self, event, attributes):\n self.attributes[event] = attributes\n\n\nclass Match:\n def __init__(self, number, event='', is_final=False):\n self.event = event\n self.number = number\n self.successors = [] # rank = duration + rest_time + (rank of succ w/ greatest rank)\n self.predecessors = [] # all predecessors must finish before scheduling\n\n event_attributes = EVENT_ATTRIBUTES.get_attributes(event)\n self.duration = event_attributes['duration']\n self.min_court_quality = event_attributes['min court quality']\n self.rest_time = event_attributes['rest time']\n\n if is_final:\n self.duration = more_duration(self.duration)\n self.min_court_quality = Court.superior_quality(self.min_court_quality)\n\n self.rank = self.duration\n self.is_rank_stale = False\n self.scheduled_court = None\n self.scheduled_time = None\n self.completed_time = None\n\n\n def __repr__(self):\n successors = (' > ' + (str(self.successors) if len(self.successors) > 1 else str(self.successors[0]))) if self.successors else ''\n return self.event + str(self.number) + successors\n\n\n def get_rank(self):\n if not self.is_rank_stale:\n return self.rank\n\n self.rank = self.duration + self.rest_time + self.max_successor_rank()\n return self.rank\n\n\n def max_successor_rank(self):\n max_rank = 0\n for successor in self.successors:\n rank = successor.get_rank()\n if rank > max_rank:\n max_rank = rank\n\n return max_rank\n\n\n def total_successor_rank(self):\n total_rank = 0\n for successor in self.successors:\n total_rank += successor.get_rank()\n\n return total_rank\n\n\n def add_successor(self, successor):\n if successor not in self.successors:\n self.is_rank_stale = True\n self.successors.append(successor)\n\n if self not in successor.predecessors:\n successor.predecessors.append(self)\n\n\n def add_predecessor(self, predecessor):\n predecessor.add_successor(self)\n\n\n def remove_successor(self, successor):\n if successor in self.successors:\n self.successors.remove(successor)\n\n if self in successor.predecessors:\n successor.predecessors.remove(self)\n\n\n def remove_predecessor(self, predecessor):\n predecessor.remove_successor(self)\n\n\n def remove(self):\n if len(self.predecessors) > 1:\n print('WARNING: Removing Match with more than one predecessor.')\n\n for successor in self.successors:\n for predecessor in self.predecessors:\n predecessor.add_successor(successor)\n\n successors, predecessors = list(self.successors), list(self.predecessors)\n for successor in successors:\n self.remove_successor(successor)\n\n for predecessor in predecessors:\n self.remove_predecessor(predecessor)\n\n\n def schedule(self, court, time):\n self.scheduled_court = court\n self.scheduled_time = time\n self.completed_time = time + self.duration\n return {'time': time, 'court': court, 'match': self}\n\n\n def is_finished(self, time):\n return ((self.completed_time is not None) and\n (self.completed_time + self.rest_time <= time))\n\n\n def are_predecessors_finished(self, time):\n for predecessor in self.predecessors:\n if not predecessor.is_finished(time):\n return False\n return True\n\n\nclass Court:\n LOW_QUALITY = 0\n MED_QUALITY = 1\n HIGH_QUALITY = 2\n\n @staticmethod\n def superior_quality(quality):\n return quality + 1 if quality < Court.HIGH_QUALITY else quality\n\n\n @staticmethod\n def inferior_quality(quality):\n return quality - 1 if quality > Court.LOW_QUALITY else quality\n\n\n def __init__(self, number, quality, time_til_free=0):\n self.number = number\n self.quality = quality\n self.match = None\n self.time_til_free = time_til_free\n\n\n def __repr__(self):\n return 'Court ' + str(self.number)\n\n\n def is_free(self):\n if self.time_til_free <= 0:\n self.match = None\n else:\n return False\n\n return self.match is None\n\n\n def schedule_match(self, match):\n self.match = match\n self.time_til_free = match.duration\n\n\n def decrement_time(self, decrement):\n if not self.is_free():\n self.time_til_free -= min(self.time_til_free, decrement)\n\n\ndef more_duration(duration):\n return duration + 20\n\n\ndef less_duration(duration):\n return duration - 20\n\n\nMAIN_DURATION = 30\nEVENT_ATTRIBUTES = EventAttributes({'duration': MAIN_DURATION, 'rest time': 15, 'min court quality': Court.MED_QUALITY})\n","sub_path":"tournament_structures.py","file_name":"tournament_structures.py","file_ext":"py","file_size_in_byte":5137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"140285144","text":"import numpy as np\nimport csv\nfrom scipy import signal\nfrom matplotlib import pyplot as plt\nimport math\n\n\n# Importo datos de medicion\nmedicion = open(\"/Users/rodrigovazquez/Library/Mobile Documents/3L68KQB4HG~com~readdle~CommonDocuments/Documents/ADC/TP2019/datos/medicionPasaAltos.csv\",\"r\")\ndatos = np.loadtxt(medicion,delimiter=',')\nf = datos[:,0]\nA = datos[:,1]\n\nsimulacion = open(\"/Users/rodrigovazquez/Library/Mobile Documents/3L68KQB4HG~com~readdle~CommonDocuments/Documents/ADC/TP2019/Simulacion/PasaAlto.txt\",\"r\")\ndatos = np.loadtxt(simulacion,delimiter='\\t')\nprint(datos)\n# Generador de transferencia en funcion de los coeficientes del numerador y denominador\nnumI = [1,0,0]\ndenI = [1,3510,1.004e7]\nnumR = [1,0,0]\ndenR = [1,(100000/33),9768868]\n\nhz = np.logspace(6.2, 8)\nrad_n = hz * 2 * np.pi / (1 / 5e-05)\n\ns1 = signal.lti(numI,denI)\ns2 = signal.lti(numR,denR)\nwI, amplitudI, faseI = signal.bode(s1, w=rad_n)\nwR, amplitudR, faseR = signal.bode(s2, w=rad_n)\nwI = wI / 2 / np.pi\nwR = wR / 2 / np.pi\n\n# Graficos\nplt.semilogx(wI,amplitudI,'-b',linewidth = 1.8,label = 'Curva ideal') # Grafica bode ideal\nplt.semilogx(wR,amplitudR,'--r',linewidth = 1.8, label = 'Curva real')\nplt.semilogx(f,A,'.g',linewidth = 1.8,label = 'Curva medida') # Grafica bode de modulo\nplt.title('Bode de modulo')\nplt.xlabel('Frecuencia [Hz]')\nplt.ylabel('Amplitud [dB]')\nplt.legend(loc='lower right', fontsize = 'large')\nplt.grid(which = 'major', color = 'gray', linestyle = '--')\nplt.minorticks_on()\nplt.grid(which = 'minor', color = '#bababa', linestyle = ':')\n#plt.show()\n","sub_path":"Informe/Python/ComparativaFinal.py","file_name":"ComparativaFinal.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"437955094","text":"import copy\nimport datetime\nfrom dateutil.easter import *\nfrom sqlalchemy import text\nfrom sqlalchemy.orm import eagerload,joinedload\n\nfrom ..models import FloatingFeast, ServicePattern\nfrom .. import utils\nfrom ..valid_dates import valid_in_list\n\nclass FloatingFeasts:\n \"\"\"Class for placing floating feasts in a year\"\"\"\n\n def __init__(self, session, year):\n \"\"\"Sets up the placer\"\"\"\n self.session = session\n self.year = year\n self.load_feasts()\n\n def load_feasts(self):\n \"\"\"Loads the feasts for this year\"\"\"\n self.by_code = {}\n self.feasts = []\n for instance in self.session.query(FloatingFeast).\\\n options(joinedload(FloatingFeast.otype)).\\\n options(joinedload(FloatingFeast.all_patterns)).\\\n options(joinedload(FloatingFeast.all_eve_patterns)).\\\n filter(text(\"valid_for_date(:jan_first, floating_feasts.valid_start, floating_feasts.valid_end)\")).\\\n params(jan_first=datetime.date(self.year, 1, 1).strftime('%Y-%m-%d')).\\\n order_by(FloatingFeast.placement_index):\n code = instance.code\n if code not in self.by_code:\n self.by_code[code] = []\n self.by_code[code].append(instance)\n self.feasts.append(code)\n\n def get_for_year(self, year, full_year):\n \"\"\"Given the current calendar, find the days the floating feasts should be placed\"\"\"\n by_date = {}\n for code in self.feasts:\n for feast in self.by_code[code]:\n if feast.algorithm == 'of_our_lady':\n by_date = self.of_our_lady(feast, year, full_year, by_date)\n elif feast.algorithm == 'of_our_lady_old':\n by_date = self.of_our_lady_old(feast, year, full_year, by_date)\n elif feast.algorithm == 'first_bcp':\n by_date = self.first_bcp(feast, year, full_year, by_date)\n elif feast.algorithm == 'parish_requiem':\n by_date = self.parish_requiem(feast, year, full_year, by_date)\n elif feast.algorithm == 'parish_requiem_5_skip':\n by_date = self.parish_requiem_5_skip(feast, year, full_year, by_date)\n elif feast.algorithm == 'parish_requiem_4_skip':\n by_date = self.parish_requiem_4_skip(feast, year, full_year, by_date)\n else:\n raise ValueError('\"{algorithm}\" is an unknown algorithm for floating feasts'.format(algorithm=repr(feast.algorithm)))\n return by_date.values()\n\n def of_our_lady(self, feast, year, full_year, by_date):\n \"\"\"Saturdays in a green season without another commemoration are Of Our Lady\"\"\"\n return self._internal_of_our_lady(feast, year, full_year, by_date, False)\n\n def of_our_lady_old(self, feast, year, full_year, by_date):\n \"\"\"Year-round, Saturdays without another commemoration were Of Our Lady\"\"\"\n return self._internal_of_our_lady(feast, year, full_year, by_date, True)\n\n def _internal_of_our_lady(self, feast, year, full_year, by_date, is_old_style):\n \"\"\"Handles both the old and new versions of Of Our Lady\"\"\"\n current_day = datetime.date(year, 1, 1)\n while current_day.year == year:\n # only Saturdays\n if utils.weekday(current_day) != 'sat':\n current_day = current_day + datetime.timedelta(days=1)\n continue\n # only if the feast is valid for this day\n ok = valid_in_list([feast], current_day)\n if ok is None:\n current_day = current_day + datetime.timedelta(days=1)\n continue\n # (new style) only if the season is green\n cdate = utils.day_to_lookup(current_day)\n if not is_old_style and full_year[cdate].season.color != 'green':\n current_day = current_day + datetime.timedelta(days=1)\n continue\n # only if another feast is not scheduled for that day\n if len(full_year[cdate].feasts) == 0:\n if cdate not in by_date:\n by_date[cdate] = { 'day': copy.deepcopy(current_day) , 'feasts': [] }\n by_date[cdate]['feasts'].append(feast)\n current_day = current_day + datetime.timedelta(days=1)\n return by_date\n\n def first_bcp(self, feast, year, full_year, by_date):\n \"\"\"First free weekday after Pentecost for which there's no other commemoration\"\"\"\n current_day = easter(year) + datetime.timedelta(weeks=7)\n while current_day.year == year:\n # only weekdays\n if utils.weekday(current_day) == 'sun':\n current_day = current_day + datetime.timedelta(days=1)\n continue\n # only if the feast is valid for this day\n ok = valid_in_list([feast], current_day)\n if ok is None:\n current_day = current_day + datetime.timedelta(days=1)\n continue\n # only if another feast is not scheduled for that day\n cdate = utils.day_to_lookup(current_day)\n if len(full_year[cdate].feasts) == 0:\n if cdate not in by_date:\n by_date[cdate] = { 'day': copy.deepcopy(current_day) , 'feasts': [] }\n by_date[cdate]['feasts'].append(feast)\n break\n current_day = current_day + datetime.timedelta(days=1)\n return by_date\n\n def parish_requiem(self, feast, year, full_year, by_date):\n \"\"\"First five days available after All Souls, demoting commemorations to notes\"\"\"\n return self._internal_parish_requiem(feast, year, full_year, by_date, 5, False)\n\n def parish_requiem_5_skip(self, feast, year, full_year, by_date):\n \"\"\"First five days available after All Souls, skipping commemorations\"\"\"\n return self._internal_parish_requiem(feast, year, full_year, by_date, 5, True)\n\n def parish_requiem_4_skip(self, feast, year, full_year, by_date):\n \"\"\"First four days available after All Souls, skipping commemorations\"\"\"\n return self._internal_parish_requiem(feast, year, full_year, by_date, 4, True)\n\n def _internal_parish_requiem(self, feast, year, full_year, by_date, count_days, skip_comms):\n \"\"\"Handles the old and new versions of the Parish Requiem\"\"\"\n current_day = datetime.date(year, 11, 2)\n all_souls = None\n if count_days == 5:\n names = [' (A-E)', ' (F-K)', ' (L-M)', ' (O-Q)', ' (R-Z)']\n else:\n names = ['', '', '', '']\n index = 0\n while current_day.year == year:\n cdate = utils.day_to_lookup(current_day)\n if all_souls is None:\n for f in full_year[cdate].feasts:\n if f.code() == 'all-souls':\n all_souls = full_year[cdate].day\n break\n else:\n ok = True\n cname = None\n if skip_comms:\n if full_year[cdate].current_precedence < 60:\n ok = False\n else:\n if full_year[cdate].current_precedence < 50:\n ok = False\n else:\n if full_year[cdate].current_feast is not None:\n cname = '(' + full_year[cdate].current_feast.name() + ')'\n if ok:\n if index < len(names):\n f = copy.deepcopy(feast)\n f.name = feast.name + names[index]\n f.note = cname\n if cdate not in by_date:\n by_date[cdate] = { 'day': copy.deepcopy(current_day) , 'feasts': [] }\n by_date[cdate]['feasts'].append(f)\n index += 1\n else:\n break\n current_day = current_day + datetime.timedelta(days=1)\n return by_date\n\n","sub_path":"bin/calendar_builder/fetch/floating_feasts.py","file_name":"floating_feasts.py","file_ext":"py","file_size_in_byte":8070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"553822419","text":"#!/usr/bin/python\n#A module to parse a file and create the dict of resource-id as key\n#and tagging value as its value \n\n\ndef parsefile(fileName):\n fileObj = open(fileName)\n fList = fileObj.readlines()\n contDict = {}\n for fL in fList:\n if not fL[0] == \"#\":\n tagInfo = fL.strip().split(',')\n resKey = tagInfo[0]\n resVal = {}\n for tI in tagInfo[1:]:\n tagList = tI.split(':')\n resVal.update({tagList[0]:tagList[1]})\n \n contDict.update({resKey:resVal})\n else:\n continue \n \n return contDict\n","sub_path":"parse2dict.py","file_name":"parse2dict.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"308635413","text":"import RPi.GPIO as GPIO\n\nimport time\n\nfrom firebase import firebase\n\nimport requests\n\n\nfirebase_url = 'https://multisensor9.firebaseio.com'\ntimestamp = time.strftime(\"%Y_%m_%d\" + \"@ \" + \"%H:%M%S\")\nuserName = ''\nuserPass = ''\nreading = 0\n\nGPIO.setwarnings(False)\n\n\n\nTRIG = 23\n\nECHO = 24\n\ndef distChecker ():\n \n for i in range (1, 10):\n\n## print (\"Distance Measurement In Progress\")\n \n GPIO.setmode(GPIO.BCM)\n\n GPIO.setup(TRIG,GPIO.OUT)\n\n GPIO.setup(ECHO,GPIO.IN)\n\n GPIO.output(TRIG, False)\n\n## print (\"Waiting For Sensor To Settle\")\n\n time.sleep(2)\n\n GPIO.output(TRIG, True)\n\n time.sleep(0.00001)\n\n GPIO.output(TRIG, False)\n\n while GPIO.input(ECHO)==0:\n\n pulse_start = time.time()\n \n while GPIO.input(ECHO)==1:\n\n pulse_end = time.time()\n \n pulse_duration = pulse_end - pulse_start\n\n distance = pulse_duration*17150\n\n distance = round(distance, 2)\n \n result = (\"Distance: \" , distance , \" cm\")\n\n GPIO.cleanup()\n \n return result\n \nwhile True:\n print (distChecker())\n ","sub_path":"initialiseHeight.py","file_name":"initialiseHeight.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"262813478","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jan 19 22:22:25 2019\r\n\r\n@author: TEJA\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.cluster import KMeans,AgglomerativeClustering\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\ndataset=pd.read_csv(r\"C:\\Users\\TEJA\\Desktop\\Mall_Customers.csv\")\r\nX=dataset.iloc[:,[3,4]]\r\n##### k-means ######\r\n#finding the optimal n vallue using elbow method\r\n\r\neucl_dist_sum=[]\r\nfor i in range(1,11):\r\n kmeans=KMeans(n_clusters=i,init=\"k-means++\")\r\n kmeans.fit(X)\r\n eucl_dist_sum.append(kmeans.inertia_)\r\n \r\n \r\nplt.plot(range(1,11),eucl_dist_sum)#from this elbow plot we can find the optimal no of clusters\r\nn_clusters=5\r\n\r\n#fitting dataset to kmeans\r\nkmeans=KMeans(n_clusters=5,init=\"k-means++\",max_iter=500)\r\npredicted=kmeans.fit_predict(X)\r\n\r\n#visualising the results\r\nX=np.array(X)\r\nplt.title(\"k-means\")\r\nplt.scatter(X[predicted==0,0],X[predicted==0,1],c=\"red\")\r\nplt.scatter(X[predicted==1,0],X[predicted==1,1],c=\"blue\")\r\nplt.scatter(X[predicted==2,0],X[predicted==2,1],c=\"green\")\r\nplt.scatter(X[predicted==3,0],X[predicted==3,1],c=\"magenta\")\r\nplt.scatter(X[predicted==4,0],X[predicted==4,1],c=\"black\")\r\n\r\n####hierarchial clustering#####\r\n\r\n\"\"\"\"In general manhattan dist is greater than eucledian\"\"\"\r\n\r\n\"\"\"complete linkages considers the max distance betweeen the clusters to form new clusters\"\"\"\r\n\r\n\r\n\r\n\r\n\"\"\"The distance matrix used here is L1 which uses manhattan distance to find the distance between the points\r\nwhich is greater than eucledian.outliers will play major role in if we use complete linkage as it takes max dist b/w clusters.\r\nusing average linkage will give better results than complete linkage because we take the average of distance of all the points,\r\nso that the effect of outliers will be demesified\"\"\"\r\n\r\n\r\naggclus1=AgglomerativeClustering(n_clusters=5,affinity=\"l1\",linkage=\"average\")\r\npredicted2=aggclus1.fit_predict(X)\r\n\r\nplt.title(\"l1 using average linkage\")\r\nplt.scatter(X[predicted2==0,0],X[predicted2==0,1],c=\"red\")\r\nplt.scatter(X[predicted2==1,0],X[predicted2==1,1],c=\"blue\")\r\nplt.scatter(X[predicted2==2,0],X[predicted2==2,1],c=\"green\")\r\nplt.scatter(X[predicted2==3,0],X[predicted2==3,1],c=\"magenta\")\r\nplt.scatter(X[predicted2==4,0],X[predicted2==4,1],c=\"black\")\r\n\r\n\r\n\r\n\r\n\"\"\"while we use L2 affinity which uses eucledian distance to find the similarity between the points which is less than eucledian\r\nif we use average distance the outliers will remain isolated.So complete linkage will give better result average\"\"\"\r\n\r\naggclus2=AgglomerativeClustering(n_clusters=5,affinity=\"l2\",linkage=\"complete\")\r\npredicted3=aggclus2.fit_predict(X)\r\n\r\n\r\nplt.title(\"l2 using complete linkage\")\r\nplt.scatter(X[predicted3==0,0],X[predicted3==0,1],c=\"red\")\r\nplt.scatter(X[predicted3==1,0],X[predicted3==1,1],c=\"blue\")\r\nplt.scatter(X[predicted3==2,0],X[predicted3==2,1],c=\"green\")\r\nplt.scatter(X[predicted3==3,0],X[predicted3==3,1],c=\"magenta\")\r\nplt.scatter(X[predicted3==4,0],X[predicted3==4,1],c=\"black\")\r\n\r\n\r\n\"\"\"considering above three we can say that agglomerative clustering performed better than k-means\"\"\"\r\n\"\"\"while l1 and l2 gave almost equal results with l2 some much better result\"\"\"","sub_path":"K-means.py","file_name":"K-means.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"36527066","text":"# Imports:\nfrom time import sleep\nimport RPi.GPIO as GPIO\nimport os\n\n# GPIO setups:\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(16, GPIO.IN)\n\n# Constant checken of knop ingedrukt is of niet, is dit het geval gaat finale_code.py gerund worden:\nwhile 1:\n\tknop = GPIO.input(16)\n\n\tif (knop == 1):\n\t\tos.system(\"python3 finale_code.py\")","sub_path":"opnieuw_scannen.py","file_name":"opnieuw_scannen.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"231751416","text":"from nonebot_plugin_datastore import get_plugin_data\nfrom sqlalchemy import UniqueConstraint\nfrom sqlalchemy.orm import Mapped, MappedAsDataclass, mapped_column\n\nModel = get_plugin_data().Model\n\n\nclass MorningGreeting(MappedAsDataclass, Model):\n __table_args__ = (\n UniqueConstraint(\n \"platform\",\n \"bot_id\",\n \"group_id\",\n \"guild_id\",\n \"channel_id\",\n name=\"unique_morning_greeting\",\n ),\n )\n\n id: Mapped[int] = mapped_column(init=False, primary_key=True)\n platform: Mapped[str]\n bot_id: Mapped[str]\n group_id: Mapped[str] = mapped_column(default=\"\")\n guild_id: Mapped[str] = mapped_column(default=\"\")\n channel_id: Mapped[str] = mapped_column(default=\"\")\n","sub_path":"src/plugins/morning/plugins/morning_greeting/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"303848898","text":"import requests, json\nimport pprint\n\npp = pprint.PrettyPrinter(indent=4)\n\nurl = 'https://www.kerboodle.com/users/login'\n\ns = requests.Session()\n# s.get(url)\n\npost_request = s.post(url, data={\n 'utf8': True,\n 'user[login]': 'omansell',\n 'user[password]': 'brabble99',\n 'user[institution_code]': 'hwb6',\n 'user_return_to': '/app',\n 'commit': 'Log in'\n })\n\nprint(post_request.headers)\n\n\nheaders = {\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-US,en;q=0.9',\n 'Connection': 'keep-alive',\n 'Cookie': '_ga=GA1.2.2039826782.1531134061; _gid=GA1.2.684184152.1531134061; oup-cookie=1_9-7-2018; _session_id=fea98a6fe1600af2199768de88f9d65a',\n 'Host': 'www.kerboodle.com',\n 'Referer': 'https://www.kerboodle.com/app/courses/15900/modules/Resources',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',\n 'X-CSRF-Token': 'LXU5YGMrUrC+0pm0kkC6b/oZdoS63/EIjIohfZm6H6I=',\n 'X-Requested-With': 'XMLHttpRequest'\n}\ntest = s.get(\n 'https://www.kerboodle.com/api/courses/15900/contents/search?&per_page=2836&page=1&sort=&order=&query=&module=Resources&tags=&user=&parent=&new_contents=&type=&source=&used_as=&filter=all%20resources&content_ids=&_=1531140400522', headers=headers)\n\n\nurls = []\n\nfor entry in test.json()['entries']:\n urls.append(entry['content_object_link'])\n\nfor url in urls:\n query_url = 'https://www.kerboodle.com/' + url\n filename = url.rsplit('/',1)[1]\n response = requests.get(query_url)\n if response.status_code == 200:\n with open(\"/Users/olivermansell/Desktop/kerboodlefiles/\" + filename, 'wb') as f:\n f.write(response.content)\n","sub_path":"archive/kerboodle.py","file_name":"kerboodle.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"237209272","text":"from collections import namedtuple\nSubscriber = namedtuple('Subscriber', ['addr', 'joined'])\nsub = Subscriber('123@appl.com', '2012-10-9')\nprint(sub)\nprint(sub.joined)\nprint(len(sub))\naddr, joined = sub\nprint(addr)\nprint(joined)\n\n\"\"\"\ndef compute_cost(records):\n total = 0.0\n for rec in records:\n total += rec[1] * rec[2]\n return total\n\"\"\"\n\n\nStock = namedtuple('Stock', ['name', 'shares', 'price'])\n\n\ndef compute_cost(records):\n total = 0.0\n for rec in records:\n s = Stock(*rec)\n total += s.shares * s.price\n return total\n\n\nstock = Stock('apple', 132.2, 132.2)\nprint(stock)\nres = compute_cost(stock)\n","sub_path":"namedTuple.py","file_name":"namedTuple.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"61453214","text":"import torch\nimport numpy as np\nfrom torch import nn\nfrom torch.distributions import Distribution, Normal, Independent\nfrom torch.distributions.constraints import Constraint, real\nfrom torch.distributions.transforms import Transform, ComposeTransform\nfrom torch.distributions import biject_to\n\nfrom typing import Optional, Iterable\n\nfrom scipy.optimize import fsolve\n\nfrom pyro.distributions import transforms, constraints, TransformModule\nfrom pyro.distributions.transforms.utils import clamp_preserve_gradients\nfrom pyro.nn import AutoRegressiveNN, DenseNN\n\nfrom torchdiffeq import odeint_adjoint, odeint\n\nTYPES = [\n \"planar\",\n \"radial\",\n \"sylvester\",\n \"polynomial\",\n \"affine_diag\",\n \"affine_coupling\",\n \"affine_autoregressive\",\n \"neural_autoregressive\",\n \"block_autoregressive\",\n \"spline\",\n \"spline_coupling\",\n \"spline_autoregressive\",\n \"neural_ode\",\n]\n\n\ndef get_parameters(t: Transform):\n \"\"\" Recursive helper function to determine all possible parameters \"\"\"\n if hasattr(t, \"parameters\"):\n yield from t.parameters()\n elif isinstance(t, ComposeTransform):\n for part in t.parts:\n yield from get_parameters(part)\n else:\n pass\n\n\ndef get_modules(t: Transform):\n \"\"\" Recursive helper function to determine all modules \"\"\"\n if isinstance(t, nn.Module):\n yield t\n elif isinstance(t, ComposeTransform):\n for part in t.parts:\n yield from get_modules(part)\n else:\n pass\n\n\ndef _inverse(self, y, xtol=1e-4):\n \"\"\" Numerical root finding algorithm to evaluate the log probability if the inverse\n is analytically not tractable. \"\"\"\n with torch.no_grad():\n shape = tuple(y.shape)\n\n def f(x):\n x = torch.from_numpy(x).reshape(shape).float()\n return (self(x) - y).flatten().numpy()\n\n x = torch.tensor(\n fsolve(f, np.random.randn(*shape).flatten(), xtol=xtol)\n ).float()\n x = x.reshape(shape)\n\n return x\n\n\ndef _inverse_batched(self, y, batch_size=20):\n \"\"\" Batched inverse. For large batches of data it is more efficient to process it in\n small batches, because a single 'hard' point can slow down all other.\"\"\"\n shape = y.shape\n batch_size = min(batch_size, shape[0])\n xs = []\n y = y.reshape(-1, shape[-1])\n for i in range(0, shape[0], batch_size):\n y_i = y[i : i + batch_size, :]\n x_i = _inverse(self, y_i)\n xs.append(x_i)\n x = torch.vstack(xs).reshape(shape)\n # For batched we need another forward pass to get correct determinant in cache\n self(y)\n return x\n\n\nclass TransformedDistribution(torch.distributions.TransformedDistribution):\n \"\"\" This is TransformedDistribution with the capability to return parameters!\"\"\"\n\n def parameters(self):\n for t in self.transforms:\n yield from get_parameters(t)\n\n def modules(self):\n for t in self.transforms:\n yield from get_modules(t)\n\n\nclass AffineTransform(transforms.AffineTransform):\n \"\"\" Trainable version of an Affine transform. This can be used to get diagonal\n gaussian approximation \"\"\"\n\n def parameters(self):\n self.loc.requires_grad_(True)\n self.scale.requires_grad_(True)\n yield self.loc\n yield self.scale\n\n def with_cache(self, cache_size=1):\n if self._cache_size == cache_size:\n return self\n return AffineTransform(self.loc, self.scale, cache_size=cache_size)\n\n def log_abs_jacobian_diag(self, x, y):\n return self.scale\n\n\nclass LowerCholeskyAffine(transforms.LowerCholeskyAffine):\n \"\"\" Trainable version of a Lower Cholesky Affine transform. This can be used to get\nfull Gaussian approximations.\"\"\"\n\n def parameters(self):\n self.loc.requires_grad_(True)\n self.scale_tril.requires_grad_(True)\n yield self.loc\n yield self.scale_tril\n\n def with_cache(self, cache_size=1):\n if self._cache_size == cache_size:\n return self\n return LowerCholeskyAffine(self.loc, self.scale_tril, cache_size=cache_size)\n\n def log_abs_det_jacobian(self, x, y):\n \"\"\" This modification allows batched scale_tril matrices. \"\"\"\n return self.log_abs_jacobian_diag(x, y).sum(-1)\n\n def log_abs_jacobian_diag(self, x, y):\n \"\"\" This returns the full diagonal which is necessary to compute conditionals \"\"\"\n dim = self.scale_tril.dim()\n return torch.diagonal(self.scale_tril, dim1=dim - 2, dim2=dim - 1).log()\n\n\nclass AffineAutoregressive(transforms.AffineAutoregressive):\n \"\"\" Modification that also returns the jacobian diagonal. \"\"\"\n\n def log_abs_jacobian_diag(self, x, y):\n \"\"\"\n Calculates the diagonal of the log Jacobian\n \"\"\"\n x_old, y_old = self._cached_x_y\n if x is not x_old or y is not y_old:\n self(x)\n\n if self._cached_log_scale is not None:\n log_scale = self._cached_log_scale\n elif not self.stable:\n _, log_scale = self.arn(x)\n log_scale = clamp_preserve_gradients(\n log_scale, self.log_scale_min_clip, self.log_scale_max_clip\n )\n else:\n _, logit_scale = self.arn(x)\n log_scale = self.logsigmoid(logit_scale + self.sigmoid_bias)\n return log_scale\n\n\nclass SplineAutoregressive(transforms.SplineAutoregressive):\n \"\"\" Modification that also returns the jacobian diagonal. \"\"\"\n\n def log_abs_jacobian_diag(self, x, y):\n \"\"\"\n Calculates the diagonal of the log Jacobian\n \"\"\"\n x_old, y_old = self._cached_x_y\n if x is not x_old or y is not y_old:\n self(x)\n\n return self._cache_log_detJ\n\n\ndef build_flow(\n event_shape: torch.Size,\n support: Constraint = real,\n num_flows: int = 5,\n type: str = \"affine_autoregressive\",\n permute: bool = True,\n batch_norm: bool = False,\n base_dist: Distribution = None,\n **kwargs,\n) -> TransformedDistribution:\n f\"\"\"Generates a Transformed Distribution where the base_dist is transformed by\n num_flows normalizing flows of specified type.\n \n \n \n Args:\n event_shape: Dimension of the events generated by the distribution.\n support: The support of the distribution.\n num_flows: Number of normalizing flows that are concatenated.\n type: The type of normalizing flow. Should be one of {TYPES}\n permute: Permute dimension after each layer. This may helpfull for\n autoregressive or coupling nets.\n batch_norm: Perform batch normalization.\n base_dist: Base distribution.\n kwargs\n Returns:\n TransformedDistribution\n \n \"\"\"\n\n # Base distribution is standard normal if not specified\n if base_dist is None:\n base_dist = Independent(\n Normal(torch.zeros(event_shape), torch.ones(event_shape)), 1,\n )\n # Generate normalizing flow\n if isinstance(event_shape, int):\n dim = event_shape\n elif isinstance(event_shape, Iterable):\n dim = event_shape[-1]\n else:\n raise ValueError(\"The eventshape must either be an Integer or a Iterable.\")\n\n flows = []\n for i in range(num_flows):\n flows.append(flow_block(dim, type, **kwargs).with_cache())\n if permute and i < num_flows - 1:\n flows.append(transforms.permute(dim).with_cache())\n if batch_norm and i < num_flows - 1:\n flows.append(transforms.batchnorm(dim))\n link_flow = biject_to(support)\n flows.append(link_flow.with_cache())\n dist = TransformedDistribution(base_dist, flows)\n return dist\n\n\nclass DenseNN(DenseNN):\n \"\"\" More powerfull dense net compared to the pyro implementation \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n layers = [\n nn.Sequential(\n nn.Linear(self.input_dim + self.context_dim, self.hidden_dims[0]),\n nn.ReLU(),\n )\n ]\n for hidden_dim in self.hidden_dims:\n layers += [nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.ReLU())]\n\n layers += [\n nn.Sequential(nn.Linear(self.hidden_dims[-1], self.output_multiplier))\n ]\n self.layers = nn.ModuleList(layers)\n\n\ndef trace_df_dz(f, z):\n trace = 0.0\n for i in range(z.shape[-1]):\n trace += (\n torch.autograd.grad(f[:, i].sum(), z, create_graph=True)[0]\n .contiguous()[:, i]\n .contiguous()\n )\n\n return trace.contiguous()\n\n\nclass ODEnet(nn.Module):\n def __init__(\n self, input_dim, hidden_dim=20, num_layers=2, normalization=nn.Identity\n ):\n super().__init__()\n self.time_embed = nn.Sequential(nn.Linear(1, hidden_dim), nn.ELU())\n self.input_embed = nn.Sequential(nn.Linear(input_dim, hidden_dim), nn.ELU())\n self.layers = nn.ModuleList(\n [\n nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.ELU())\n for _ in range(num_layers)\n ]\n )\n self.out_layer = nn.Linear(hidden_dim, input_dim)\n\n def forward(self, t, x):\n h = self.input_embed(x)\n h = h + self.time_embed(t.reshape(-1, 1))\n for layer in self.layers:\n h = layer(h)\n out = self.out_layer(h)\n return out\n\n\nclass NeuralODETransform(TransformModule):\n domain = constraints.real_vector\n codomain = constraints.real_vector\n bijective = True\n sign = +1\n\n def __init__(\n self,\n ODEnet,\n T=1.0,\n t0=0.0,\n atol=1e-5,\n rtol=1e-5,\n solver=\"rk4\",\n options=dict(),\n adjoint=False,\n ):\n \"\"\"This is a continous normalizing flow, which use a neural ODE transform.\n \n \n \n Args:\n ODEnet: An network that inputs time and x and outputs dx.\n T: End time of the ODE (default T=1)\n t0: Start time of the ODE (default t0=0)\n atol: Absolute tolerance of the ODE solver\n rtol: Relative tolerance of the ODE solver\n solver: The ODE solver (one implemented in torchdiffeq)\n options: Further options of odeint\n adjoint: If the adjoint method should be used to compute gradients.\n \n \"\"\"\n super().__init__(cache_size=1)\n self.net = ODEnet\n self.T = float(T)\n self.t0 = float(t0)\n self.atol = 1e-5\n self.rtol = 1e-5\n self.solver = solver\n self.adjoint = adjoint\n self.options = options\n\n def _call(self, x):\n logp_diff_t0 = torch.zeros(x.shape[0], 1)\n if not self.adjoint:\n y, logP_diff_T = odeint(\n self.ode_func,\n (x, logp_diff_t0),\n torch.tensor([self.t0, self.T]),\n atol=self.atol,\n rtol=self.rtol,\n method=self.solver,\n options=self.options,\n )\n else:\n y, logP_diff_T = odeint_adjoint(\n self.ode_func,\n (x, logp_diff_t0),\n torch.tensor([self.t0, self.T]),\n atol=self.atol,\n rtol=self.rtol,\n method=self.solver,\n adjoint_params=self.net.parameters(),\n options=self.options,\n )\n self._cached_logP_diff_T = -logP_diff_T[-1].flatten()\n return y[-1]\n\n def _inverse(self, y):\n logp_diff_t0 = torch.zeros(y.shape[0], 1)\n if not self.adjoint:\n x, logP_diff_T = odeint(\n self.ode_func,\n (y, logp_diff_t0),\n torch.tensor([self.T, self.t0]),\n atol=self.atol,\n rtol=self.rtol,\n method=self.solver,\n options=self.options,\n )\n else:\n x, logP_diff_T = odeint_adjoint(\n self.ode_func,\n (y, logp_diff_t0),\n torch.tensor([self.T, self.t0]),\n atol=self.atol,\n rtol=self.rtol,\n method=self.solver,\n adjoint_params=self.net.parameters(),\n options=self.options,\n )\n self._cached_logP_diff_T = logP_diff_T[-1].flatten()\n return x[-1]\n\n def ode_func(self, t, states):\n z = states[0]\n logp_z = states[1]\n batch_size = z.shape[0]\n with torch.set_grad_enabled(True):\n z.requires_grad_(True)\n dz_dt = self.net(t, z)\n dlogp_z_dt = -trace_df_dz(dz_dt, z)\n return (dz_dt, dlogp_z_dt)\n\n def log_abs_det_jacobian(self, x, y):\n return self._cached_logP_diff_T\n\n\ndef neural_ode_transform(dim, **kwargs):\n hidden_dim = 5 * dim + 10\n net = ODEnet(dim, hidden_dim=hidden_dim)\n t = NeuralODETransform(net, **kwargs)\n return t\n\n\ndef flow_block(dim, type, **kwargs):\n r\"\"\" Gives pyro flow of specified type.\n Args:\n dim: Event shape of input. \n type: Type, should be one of \n \n Returns:\n pyro.distributions.transform: Transform object of specified type\n \n \"\"\"\n inverse = kwargs.pop(\"inverse\", False)\n if type.lower() == \"planar\":\n flow = transforms.planar(dim, **kwargs)\n flow._inverse = lambda x: _inverse_batched(flow, x)\n elif type.lower() == \"radial\":\n flow = transforms.radial(dim, **kwargs)\n flow._inverse = lambda x: _inverse_batched(flow, x)\n elif type.lower() == \"sylvester\":\n flow = transforms.sylvester(dim, **kwargs)\n flow._inverse = lambda x: _inverse_batched(flow, x)\n elif type.lower() == \"polynomial\":\n flow = transforms.polynomial(dim, **kwargs)\n elif type.lower() == \"affine_diag\":\n # This is equivalent to a gaussian with diagonal covariance (up to support link transforms)\n flow = AffineTransform(torch.zeros(dim), torch.ones(dim))\n elif type.lower() == \"affine_tril\":\n # This is equivalent to a gaussian with full covariance (up to support link transforms)\n flow = LowerCholeskyAffine(torch.zeros(dim), torch.eye(dim))\n elif type.lower() == \"affine_coupling\":\n flow = transforms.affine_coupling(dim, **kwargs)\n elif type.lower() == \"affine_autoregressive\":\n hidden_dims = kwargs.get(\"hidden_dims\", None)\n if hidden_dims is None:\n hidden_dims = [5 * dim + 5]\n arn = AutoRegressiveNN(dim, hidden_dims)\n flow = AffineAutoregressive(arn, log_scale_min_clip=-3.0)\n elif type.lower() == \"neural_autoregressive\":\n flow = transforms.neural_autoregressive(dim, **kwargs)\n flow._inverse = lambda x: _inverse_batched(flow, x)\n elif type.lower() == \"block_autoregressive\":\n flow = transforms.block_autoregressive(dim, **kwargs)\n flow._inverse = lambda x: _inverse_batched(flow, x)\n elif type.lower() == \"spline\":\n flow = transforms.spline(dim, **kwargs)\n elif type.lower() == \"spline_coupling\":\n # Linear or quadratic rational splines\n split_dim = kwargs.get(\"split_dim\", dim // 2)\n hidden_dims = kwargs.get(\"hidden_dims\", [dim * 20, dim * 20])\n count_bins = kwargs.get(\"count_bins\", 8)\n order = kwargs.get(\"order\", \"linear\")\n bound = kwargs.get(\"bound\", 3.0)\n if order == \"linear\":\n param_dims = [\n (dim - split_dim) * count_bins,\n (dim - split_dim) * count_bins,\n (dim - split_dim) * (count_bins - 1),\n (dim - split_dim) * count_bins,\n ]\n else:\n param_dims = [\n (dim - split_dim) * count_bins,\n (dim - split_dim) * count_bins,\n (dim - split_dim) * (count_bins - 1),\n ]\n nn = DenseNN(split_dim, hidden_dims, param_dims)\n flow = transforms.SplineCoupling(\n dim, split_dim, nn, count_bins, bound=bound, order=order\n )\n elif type.lower() == \"spline_autoregressive\":\n # Linear or quadratic rational spline transform\n hidden_dims = kwargs.get(\"hidden_dims\", [dim * 10, dim * 10])\n count_bins = kwargs.get(\"count_bins\", 8)\n order = kwargs.get(\"order\", \"linear\")\n bound = kwargs.get(\"bound\", 3.0)\n if order == \"linear\":\n param_dims = [count_bins, count_bins, (count_bins - 1), count_bins]\n else:\n param_dims = [count_bins, count_bins, (count_bins - 1)]\n nn = AutoRegressiveNN(dim, hidden_dims, param_dims)\n flow = transforms.SplineAutoregressive(\n dim, nn, count_bins, bound=bound, order=order\n )\n elif type.lower() == \"neural_ode\":\n flow = neural_ode_transform(dim, **kwargs)\n else:\n raise NotImplementedError()\n\n if inverse:\n # That is relevant for e.g. Autoregressive Flows or Flows where the inverse is\n # numerically tractable.\n flow = flow.inv\n return flow\n","sub_path":"pck1/sbi/vi/flows.py","file_name":"flows.py","file_ext":"py","file_size_in_byte":16923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"477684669","text":"import numpy as np\n\nfrom scratch.sam.util import *\nfrom scratch.neural_net.lib import *\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\n\nimport matplotlib as mpl\n\nimport os, glob, pathlib\n\nimport itertools\nfrom sklearn import linear_model\n\nimport statsmodels.api as sm\n\ncolor_m1 = '#3060A1'\ncolor_m2 = '#B76029'\ncolor_m7 = '#5E813F'\ncolor_m3 = '#5C198E'\n\n# idx is a list of n_merge (k) sets of indices (each set is a new feature)\ndef gen_multi_feat(feat_vec, names, indices):\n\n n_merge = len(indices)\n\n new_feat = np.zeros((feat_vec.shape[0], n_merge))\n new_names = np.empty(n_merge, dtype=object)\n\n for i, idx in enumerate(indices):\n new_feat[:,i] = np.dot(feat_vec[:,idx], np.ones(len(idx)))\n\n new_name = names[idx[0]]\n for j in range(1, len(idx)):\n new_name += '+' + names[idx[j]]\n\n new_names[i] = new_name\n\n\n return new_feat, new_names\n\ndef get_ses(feat_vec, energies):\n\n reg = linear_model.LinearRegression(fit_intercept=False)\n reg.fit(feat_vec, energies)\n\n pred = reg.predict(feat_vec)\n err = energies - pred\n\n sq_err = np.dot(err, err)\n mse = sq_err / (feat_vec.shape[0] - feat_vec.shape[1])\n\n mat = np.dot(feat_vec.T, feat_vec)\n\n var = np.linalg.inv(mat) * mse\n\n return reg, np.sqrt(np.diag(var))\n\n\n\n## Plot transferability, (assume M3...)\n\nmpl.rcParams.update({'axes.labelsize': 45})\nmpl.rcParams.update({'xtick.labelsize': 30})\nmpl.rcParams.update({'ytick.labelsize': 30})\nmpl.rcParams.update({'axes.titlesize':40})\nmpl.rcParams.update({'legend.fontsize':14})\n\nhomedir = os.environ['HOME']\n\nindices_to_delete = np.array([5,6,7,8,9,10,13,14,15,16,17])\npruned_indices = np.array([5,6])\n\nenergies, feat_vec, states, names = extract_from_ds_simple('sam_data_total.dat.npz')\n_, full_feat_vec, _, full_names = extract_from_ds('sam_data_total.dat.npz')\n\n\nindicator = np.zeros((energies.size, 3))\nindicator[:884, -1] = 1\nindicator[884:-228, -2] = 1\nindicator[-228:, -3] = 1\n\n\nfeat_m1, names_m1 = gen_multi_feat(feat_vec, names, [(0,)])\nfeat_m1 = np.append(feat_m1, indicator, axis=1)\n\nfeat_m2, names_m2 = gen_multi_feat(feat_vec, names, [(0,), (1,)])\nfeat_m2 = np.append(feat_m2, indicator, axis=1)\n\nfeat_m3, names_m3 = gen_multi_feat(feat_vec, names, [(0,), (1,2), (2,3)])\nfeat_m3 = np.append(feat_m3, indicator, axis=1)\n\n# ko, noo, nooo_c, nooo_b, nooo_e, noco_b, noco_e\nfeat_m7, names_m7 = gen_multi_feat(feat_vec, names, [(0,), (1,), (2,), (3,), (4,), (11,), (12,)])\nfeat_m7 = np.append(feat_m7, indicator, axis=1)\n\nfull_indices = [(0,), (1,), (2,), (3,), (4,), (5,), (12,), (13,), (19,), (20,),\n (21,), (22,), (28,), (29,), (33,), (34,), (36,), (37,)]\n\nfeat_m18, names_m18 = gen_multi_feat(full_feat_vec, full_names, full_indices)\nfeat_m18 = np.append(feat_m18, indicator, axis=1)\n\n\nrperf_ann = 2.86\n\nperf_m1, err, reg_m1 = fit_multi_k_fold(feat_m1, energies, fit_intercept=False)\nrperf_m1 = np.sqrt(perf_m1)\n\nperf_m2, err, reg_m2 = fit_multi_k_fold(feat_m2, energies, fit_intercept=False)\nrperf_m2 = np.sqrt(perf_m2)\n\nperf_m3, err, reg_m3 = fit_multi_k_fold(feat_m3, energies, fit_intercept=False)\nrperf_m3 = np.sqrt(perf_m3)\n\nperf_m7, err, reg_m7 = fit_multi_k_fold(feat_m7, energies, fit_intercept=False)\nrperf_m7 = np.sqrt(perf_m7)\n\nperf_m18, err, reg_m18 = fit_multi_k_fold(feat_m18, energies, fit_intercept=False)\nrperf_m18 = np.sqrt(perf_m18)\n\n\nindicator = indicator[:9]\n\n\nspec_energies, spec_feat_vec, spec_states, names = extract_from_ds_simple('sam_data_special.dat.npz')\n_, spec_full_feat_vec, _, full_names = extract_from_ds('sam_data_special.dat.npz')\n\n\nspec_feat_m1, names_m1 = gen_multi_feat(spec_feat_vec, names, [(0,)])\nspec_feat_m1 = np.append(spec_feat_m1, indicator, axis=1)\n\nspec_feat_m2, names_m2 = gen_multi_feat(spec_feat_vec, names, [(0,), (1,)])\nspec_feat_m2 = np.append(spec_feat_m2, indicator, axis=1)\n\nspec_feat_m3, names_m3 = gen_multi_feat(spec_feat_vec, names, [(0,), (1,2), (2,3)])\nspec_feat_m3 = np.append(spec_feat_m3, indicator, axis=1)\n\n# ko, noo, nooo_c, nooo_b, nooo_e, noco_b, noco_e\nspec_feat_m7, names_m7 = gen_multi_feat(spec_feat_vec, names, [(0,), (1,), (2,), (3,), (4,), (11,), (12,)])\nspec_feat_m7 = np.append(spec_feat_m7, indicator, axis=1)\n\nfull_indices = [(0,), (1,), (2,), (3,), (4,), (5,), (12,), (13,), (19,), (20,),\n (21,), (22,), (28,), (29,), (33,), (34,), (36,), (37,)]\n\nspec_feat_m18, names_m18 = gen_multi_feat(spec_full_feat_vec, full_names, full_indices)\nspec_feat_m18 = np.append(spec_feat_m18, indicator, axis=1)\n\n\nsort_idx = np.argsort(spec_energies)\npred_m1 = reg_m1.predict(spec_feat_m1)\npred_m2 = reg_m2.predict(spec_feat_m2)\npred_m3 = reg_m3.predict(spec_feat_m3)\npred_m7 = reg_m7.predict(spec_feat_m7)\npred_m18 = reg_m18.predict(spec_feat_m18)\n\nrmse_m1 = np.sqrt(np.mean((spec_energies-pred_m1)**2))\nrmse_m2 = np.sqrt(np.mean((spec_energies-pred_m2)**2))\nrmse_m3 = np.sqrt(np.mean((spec_energies-pred_m3)**2))\nrmse_m7 = np.sqrt(np.mean((spec_energies-pred_m7)**2))\nrmse_m18 = np.sqrt(np.mean((spec_energies-pred_m18)**2))\n\n\nindices = np.arange(5)\nfig, ax = plt.subplots()\n\ncolors = [color_m1, color_m2, color_m3, color_m7, 'orange']\nax.bar(indices, [rmse_m1, rmse_m2, rmse_m3, rmse_m7, rmse_m18], color=colors)\nax.set_xticks([])\nax.plot([-1,6], [rperf_m3.mean(), rperf_m3.mean()], '--', color=color_m3, label='M3', linewidth=4)\nax.plot([-1,6], [rperf_m2.mean(), rperf_m2.mean()], '--', color=color_m2, label='M2', linewidth=4)\nax.plot([-1,6], [rperf_m1.mean(), rperf_m1.mean()], '--', color=color_m1, label='M1', linewidth=4)\nax.plot([-1,6], [rperf_m7.mean(), rperf_m7.mean()], '--', color=color_m7, label='M7', linewidth=4)\nax.plot([-1,6], [rperf_m18.mean(), rperf_m18.mean()], '--', color='orange', label='M18', linewidth=4)\nax.plot([-1,6], [rperf_ann, rperf_ann], '--', color='k', label='ANN', linewidth=4)\nplt.legend()\n\n\n\n\nimport pandas\n\n#feat_m2 = pandas.DataFrame(feat_m2, columns=np.append(names_m2, ['d1', 'd2', 'd3']))\n#ols = sm.OLS(energies, feat_m2)\n#ols_res = ols.fit()\n#ols_res.summary()\n\n#feat_m3 = pandas.DataFrame(feat_m3, columns=np.append(names_m3, ['d1', 'd2', 'd3']))\n#ols = sm.OLS(energies, feat_m3)\n#ols_res = ols.fit()\n#ols_res.summary()\n\nindices = np.arange(feat_m3.shape[0])\nindices_66 = indices[:884]\nindices_49 = indices[884:-228]\nindices_44 = indices[-228:]\n\nfeat = feat_m3\nfeat_66 = np.delete(feat[indices_66], [-2,-3], axis=1)\nfeat_49 = np.delete(feat[indices_49], [-1,-3], axis=1)\nfeat_44 = np.delete(feat[indices_44], [-1,-2], axis=1)\n\nn_feat = feat.shape[1] - 3\n\nreg_all, ses_all = get_ses(feat, energies)\nreg_66, ses_66 = get_ses(feat_66, energies[indices_66])\nreg_49, ses_49 = get_ses(feat_49, energies[indices_49])\nreg_44, ses_44 = get_ses(feat_44, energies[indices_44])\n\nidx = np.arange(4)\n\n\ncolors = ['k', 'yellow', 'blue', 'green']\n\nfor i in range(n_feat):\n plt.close('all')\n fig, ax = plt.subplots()\n ax.bar(idx+i*5*n_feat, [reg_all.coef_[i], reg_66.coef_[i], reg_49.coef_[i], reg_44.coef_[i]],\n yerr=[ses_all[i], ses_66[i], ses_49[i], ses_44[i]], width=1, color=colors)\n\n if i == 0:\n #ax.set_ylim([3.0,5.0])\n ax.set_ylim([4.5,6.0])\n ax.set_xticks([])\n fig.tight_layout()\n plt.tight_layout()\n\n plt.savefig(\"/Users/nicholasrego/Desktop/fig_{}\".format(i), transparent=True)\n\n","sub_path":"scratch/sam/gen_figs/old_plot_fig4.py","file_name":"old_plot_fig4.py","file_ext":"py","file_size_in_byte":7312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"31974137","text":"from django.db.models.fields.related import ManyToManyField\r\n\r\ndef to_dict(instance):\r\n \"\"\" Returns a provided Django Model Instance as python dictionary. \"\"\"\r\n opts = instance._meta\r\n data = {}\r\n for f in opts.concrete_fields + opts.many_to_many:\r\n if isinstance(f, ManyToManyField):\r\n if instance.pk is None:\r\n data[f.name] = []\r\n else:\r\n data[f.name] = []\r\n # Following line eventually not working; To check if m2m-Relationship is required.\r\n # data[f.name] = list(f.value_from_object(instance).values_list('pk', flat=True))\r\n else:\r\n data[f.name] = getattr(instance, f.name)\r\n if \"id\" in data:\r\n data[\"pk\"] = data[\"id\"]\r\n return data\r\n","sub_path":"helpers/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"331725493","text":"import numpy as np\n\n\ndef is_out_of_bounds(coords, S):\n return np.sum(np.where(np.abs(coords) > S)) != 0\n\ndef move_in_time(coords, velocities, time, S, ignore_bounds = False):\n next_coords = np.copy(coords) + time * velocities\n if ignore_bounds or not is_out_of_bounds(next_coords, S):\n return next_coords\n else:\n return None\n\ndef calculate_distances_from_origin(coords):\n return np.hypot(coords[:,0], coords[:,1])\n\ndef find_big_bang(coords, velocities, S):\n stds = np.array([])\n stds = np.append(stds, np.std(calculate_distances_from_origin(coords)))\n next_coords = np.copy(coords)\n last_coords = np.copy(coords)\n while(True):\n next_coords = move_in_time(next_coords, velocities, -1, S)\n if next_coords is None or np.all(last_coords == next_coords):\n break\n stds = np.append(stds, np.std(calculate_distances_from_origin(next_coords)))\n last_coords = np.copy(next_coords)\n return np.argmin(stds)\n\ndef find_world_hits(coords, velocities, N, S, T, P):\n coords = move_in_time(coords, velocities, T, None, ignore_bounds=True)\n coords = coords[np.any(np.abs(coords) > S, axis=1), :]\n stable_particles = float(N - coords.shape[0])\n angles = np.arctan2(coords[:, 1], coords[:, 0]).reshape((coords.shape[0], 1))\n starting_points = np.zeros(coords.shape)\n starting_points = starting_points + \\\n np.array([S, 0]) * np.logical_and(angles>=-np.pi/4, angles<=np.pi/4) + \\\n np.array([-S, 0]) * np.logical_or(angles<=-3*np.pi/4, angles>=3*np.pi/4) + \\\n np.array([0, S]) * np.logical_and(angles>=np.pi/4, angles<=3*np.pi/4) + \\\n np.array([0, -S]) * np.logical_and(angles>=-3*np.pi/4, angles<=-np.pi/4)\n starting_points = starting_points + np.hstack((\n (((starting_points[:, 1] - coords[:, 1]) / np.tan(angles).reshape(angles.shape[0]) + coords[:, 0]) * (starting_points[:, 0] == 0)).reshape((angles.shape[0], 1)),\n ((np.tan(angles).reshape(angles.shape[0]) * (starting_points[:, 0] - coords[:, 0]) + coords[:, 1]) * (starting_points[:, 1] == 0)).reshape((angles.shape[0], 1))\n ))\n wall_bangs = np.ones(coords.shape[0])\n old_wall_bangs = np.zeros(coords.shape[0])\n coords = coords - starting_points # now coords holds the remaining path of point\n while np.any(wall_bangs != old_wall_bangs):\n new_angles = - angles * np.where(np.abs(starting_points[:, 1]) == S, 1, 0).reshape((angles.shape[0],1)) + \\\n (np.pi - angles) * np.where(np.abs(starting_points[:, 0]) == S, 1, 0).reshape((angles.shape[0],1)) * (angles >= 0).reshape((angles.shape[0],1)) + \\\n (-np.pi - angles) * np.where(np.abs(starting_points[:, 0]) == S, 1, 0).reshape((angles.shape[0],1)) * (angles < 0).reshape((angles.shape[0],1))\n rotation_angles = new_angles - angles\n angles = new_angles\n new_starting_points = np.zeros(coords.shape)\n for i in range(rotation_angles.shape[0]):\n coords[i] = np.array([[np.cos(rotation_angles[i][0]), -np.sin(rotation_angles[i][0])], [np.sin(rotation_angles[i][0]), np.cos(rotation_angles[i][0])]]) @ coords[i,:]\n x_plus_S = np.repeat((1*(starting_points[:, 0] != S)).reshape((starting_points.shape[0],1)), 2, axis=1) * np.hstack((\n (S * np.ones(angles.shape[0])).reshape((angles.shape[0], 1)),\n (np.tan(angles).reshape(angles.shape[0]) * (S - coords[:, 0] - starting_points[:, 0]) + coords[:, 1] + starting_points[:, 1]).reshape((angles.shape[0], 1))\n ))\n x_plus_S = x_plus_S * np.repeat(np.logical_and(x_plus_S[:, 1] <= S, x_plus_S[:, 1] >= -S).reshape((x_plus_S.shape[0],1)), 2, axis=1) * np.repeat((np.linalg.norm(x_plus_S - starting_points, ord=2, axis=1) <= np.linalg.norm(coords, ord=2, axis=1)).reshape((angles.shape[0],1)), 2, axis=1)\n x_minus_S = np.repeat((1*(starting_points[:, 0] != -S)).reshape((starting_points.shape[0],1)), 2, axis=1) * np.hstack((\n (-S * np.ones(angles.shape[0])).reshape((angles.shape[0], 1)),\n (np.tan(angles).reshape(angles.shape[0]) * (-S - coords[:, 0] - starting_points[:, 0]) + coords[:, 1] + starting_points[:, 1]).reshape((angles.shape[0], 1))\n ))\n x_minus_S = x_minus_S * np.repeat(np.logical_and(x_minus_S[:, 1] <= S, x_minus_S[:, 1] >= -S).reshape((x_minus_S.shape[0],1)), 2, axis=1) * np.repeat((np.linalg.norm(x_minus_S - starting_points, ord=2, axis=1) <= np.linalg.norm(coords, ord=2, axis=1)).reshape((angles.shape[0],1)), 2, axis=1)\n y_plus_S = np.repeat((1*(starting_points[:, 1] != S)).reshape((starting_points.shape[0],1)), 2, axis=1) * np.hstack((\n ((S - coords[:, 1] - starting_points[:, 1]) / np.tan(angles).reshape(angles.shape[0]) + coords[:, 0] + starting_points[:, 0]).reshape((angles.shape[0], 1)),\n (S * np.ones(angles.shape[0])).reshape((angles.shape[0], 1))\n ))\n y_plus_S = y_plus_S * np.repeat(np.logical_and(y_plus_S[:, 0] <= S, y_plus_S[:, 0] >= -S).reshape((y_plus_S.shape[0],1)), 2, axis=1) * np.repeat((np.linalg.norm(y_plus_S - starting_points, ord=2, axis=1) <= np.linalg.norm(coords, ord=2, axis=1)).reshape((angles.shape[0],1)), 2, axis=1)\n y_minus_S = np.repeat((1*(starting_points[:, 1] != -S)).reshape((starting_points.shape[0],1)), 2, axis=1) * np.hstack((\n ((-S - coords[:, 1] - starting_points[:, 1]) / np.tan(angles).reshape(angles.shape[0]) + coords[:, 0] + starting_points[:, 0]).reshape((angles.shape[0], 1)),\n (-S * np.ones(angles.shape[0])).reshape((angles.shape[0], 1))\n ))\n y_minus_S = y_minus_S * np.repeat(np.logical_and(y_minus_S[:, 0] <= S, y_minus_S[:, 0] >= -S).reshape((y_minus_S.shape[0],1)), 2, axis=1) * np.repeat((np.linalg.norm(y_minus_S - starting_points, ord=2, axis=1) <= np.linalg.norm(coords, ord=2, axis=1)).reshape((angles.shape[0],1)), 2, axis=1)\n new_starting_points = x_minus_S + x_plus_S + y_minus_S + y_plus_S\n coords = coords - (new_starting_points - starting_points) * np.logical_not(np.repeat(np.all(new_starting_points == 0, axis=1).reshape((new_starting_points.shape[0],1)), 2, axis=1))\n coords = coords * np.repeat(np.any(np.abs(coords + starting_points) > S, axis=1).reshape((coords.shape[0], 1)), 2, axis=1)\n starting_points = new_starting_points\n old_wall_bangs = wall_bangs.copy()\n wall_bangs = wall_bangs + np.any(np.abs(coords + starting_points) > S, axis=1)\n return int(np.sum(wall_bangs)), stable_particles + np.sum(np.power(P, wall_bangs))\n\nif __name__ == '__main__':\n np.warnings.filterwarnings('ignore')\n nstp = input().split()\n N = int(nstp[0])\n S = int(nstp[1])\n T = int(nstp[2])\n P = float(nstp[3])\n coords = np.empty([0, 2])\n velocities = np.empty([0, 2])\n for i in range(N):\n pv = input().split()\n coords = np.append(coords, [[float(pv[0]), float(pv[1])]], axis=0)\n velocities = np.append(velocities, [[float(pv[2]), float(pv[3])]], axis=0)\n world_hit_data = find_world_hits(coords, velocities, N, S, T, P)\n print(\"{0} {1} {2}\".format(find_big_bang(coords, velocities, S), world_hit_data[0], world_hit_data[1]))","sub_path":"psiml data/submissions/Big Bang/136075-1457780.py3","file_name":"136075-1457780.py3","file_ext":"py3","file_size_in_byte":7153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"19836985","text":"from django.db import models\nfrom django.core.urlresolvers import reverse\n\nclass Folder(models.Model):\n\tname = models.CharField(u\"Folder name\", max_length=100, unique = True)\n\tdate_created = models.DateField(u\"Creation date\", auto_now_add = True)\n\t\n\tdef __unicode__(self):\n\t\treturn u\"%s\" % self.name\n\t\n\tdef get_absolute_url(self):\n\t\treturn reverse('folder_explore', kwargs={'folder_id':self.id})\n\t\n\nclass XlsFile(models.Model):\n\tfolder = models.ForeignKey(Folder)\n\twell_name = models.CharField(u\"Well name\", max_length=50)\n\twell_row = models.CharField(u\"Well row\", max_length=1, null = True, blank = True)\n\twell_col = models.IntegerField(u\"Well col\", null = True, blank = True)\n\tis_duplicate = models.BooleanField(u\"Is a merge of duplicates?\", default = False)\n\t\n\tclass Meta:\n\t\tordering = ('folder', '-is_duplicate', 'well_row', 'well_col')\n\t\n\tdef __unicode__(self):\n\t\treturn u\"%s > %s\" % (self.folder.name, self.well_name)\n\t\n\tdef get_header(self):\n\t\theader = []\n\t\tfor data in self.data_set.filter(row_index = 0):\n\t\t\tif not data.col_name in header:\n\t\t\t\theader.append( data.col_name )\n\t\t\t\n\t\treturn header\n\t\n\tdef get_rows(self):\n\t\trows, row = [], []\n\t\trow_idx = 0\n\t\t\n\t\tfor item in self.data_set.all():\n\t\t\tif row_idx != item.row_index:\n\t\t\t\trows.append( row )\n\t\t\t\trow = []\n\t\t\t\trow_idx = item.row_index\n\t\t\trow.append( item )\n\t\trows.append( row )\n\t\treturn rows\n\t\n\tdef get_values_for(self, column, filter_on_col, filters = {}):\n\t\tif not filters or not filter_on_col:\n\t\t\tvalues = map(\n\t\t\t\tfloat,\n\t\t\t\tself.data_set.filter(\n\t\t\t\t\tcol_name = column, \n\t\t\t\t\tis_enabled = True\n\t\t\t\t).values_list('value', flat=True)\n\t\t\t)\n\t\t\treturn values\n\t\t\n\t\tif 'filter_min_val' in filters and filters['filter_min_val']:\n\t\t\tmin_val = filters['filter_min_val']\n\t\telse:\n\t\t\tmin_val = None\n\t\t\n\t\tif 'filter_max_val' in filters and filters['filter_max_val']:\n\t\t\tmax_val = filters['filter_max_val']\n\t\telse:\n\t\t\tmax_val = None\n\t\t\n\t\tif 'filter_up_perc' in filters and filters['filter_up_perc']:\n\t\t\tup_perc = float(filters['filter_up_perc'])\n\t\telse:\n\t\t\tup_perc = None\n\t\t\n\t\tif 'filter_dn_perc' in filters and filters['filter_dn_perc']:\n\t\t\tdn_perc = float(filters['filter_dn_perc'])\n\t\telse:\n\t\t\tdn_perc = None\n\t\t\n\t\tvalues_for_filter = map(\n\t\t\tfloat,\n\t\t\tself.data_set.filter(\n\t\t\t\tcol_name = filter_on_col,\n\t\t\t\tis_enabled = True\n\t\t\t).values_list('value', flat=True)\n\t\t)\n\t\t\n\t\tif min_val:\n\t\t\tvalues_for_filter = filter(lambda item: min_val < item, values)\n\t\tif max_val:\n\t\t\tvalues_for_filter = filter(lambda item: item < max_val, values)\n\t\t\n\t\tnb_val = len(values_for_filter)\n\t\tvalues_for_filter.sort()\n\t\tif up_perc:\n\t\t\tup_idx = int(nb_val * (1.0 - up_perc / 100.0))\n\t\t\tvalues_for_filter = values_for_filter[:up_idx+1]\n\t\tif dn_perc:\n\t\t\tdn_idx = int(nb_val * dn_perc / 100.0)\n\t\t\tvalues_for_filter = values_for_filter[dn_idx:]\n\t\t\n\t\t# Retrieve row_idx returned by the filtering\n\t\trow_idx = self.data_set.filter(\n\t\t\tcol_name = filter_on_col,\n\t\t\tvalue__in = map(unicode, values_for_filter)\n\t\t).values_list('row_index', flat = True)\n\t\t\n\t\t# Return values from actual interesting column (not filtered one)\n\t\tvalues_for_col = self.data_set.filter(\n\t\t\tcol_name = column,\n\t\t\trow_index__in = row_idx\n\t\t).values_list('value', flat = True)\n\t\t\n\t\treturn map( float, values_for_col )\n\t\n\nclass Data(models.Model):\n\txls_file = models.ForeignKey(XlsFile)\n\tcol_index = models.IntegerField(u\"Column index\")\n\trow_index = models.IntegerField(u\"Row index\")\n\t\n\tcol_name = models.CharField(u\"Column name\", max_length=100)\n\tvalue = models.CharField(\"string value\", max_length=100)\n\t\n\tis_enabled = models.BooleanField(u\"Is enabled?\", default = True)\n\t\n\tclass Meta:\n\t\tordering = ['row_index', 'col_index']\n\t\n\tdef __unicode__(self):\n\t\treturn u\"%s|%s:%s\" % (self.row_index, self.col_index, self.value)\n","sub_path":"data/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"274700978","text":"# Fake Binary Search May Long 2018\n\n \nt=int(input())\nwhile(t!=0):\n t-=1\n n,q=[int(x) for x in input().strip().split()]\n a=[int(x) for x in input().strip().split()]\n sorted_a=sorted(a) #sorted list \n indexOf={} #retrieving index of elements in O(1) lookup\n index=0\n for ele in a:\n indexOf[ele]=index \n index+=1\n \n index=0 #reset \n countlessthan={} #to predict number of elements less than ele\n countgreaterthan={} #to predict number of elements greater than ele\n for ele in sorted_a:\n countlessthan[ele]=index \n countgreaterthan[ele]=n-1-index\n index+=1 \n \n while(q!=0):\n q=q-1\n x=int(input())\n low=0\n high=n-1 \n swapGreaterYes=0\n swapGreaterNo=0 \n swapLesserYes=0\n swapLesserNo=0 \n while(low<=high):\n \n mid=(low+high)//2 \n if(a[mid]==x):\n break\n elif(a[mid]mid):\n low=mid+1\n swapLesserNo+=1 \n elif(a[mid]x and indexOf[x]x and indexOf[x]>mid): #FakeBS case 2\n swapLesserYes+=1\n low=mid+1 \n \n \n neededswaps=max(swapLesserYes,swapGreaterYes) \n ans=neededswaps\n\n #not enough elements in pool required for swapping\n if((swapLesserYes>countlessthan[x]- swapLesserNo) or (swapGreaterYes>countgreaterthan[x]- swapGreaterNo)):\n ans=-1 \n \n print(ans)\n","sub_path":"FAKEBS.py","file_name":"FAKEBS.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"183869182","text":"st = 'python'\r\nst1 = 'level'\r\ndef reverse(st):\r\n str = ''\r\n for i in st:\r\n str = i + str\r\n return str\r\n\r\ndef palindrome(st):\r\n a = reverse(st)\r\n if a == st:\r\n return True\r\n else:\r\n return False\r\n\r\nobj1 = reverse(st)\r\nprint(obj1)\r\nobj2 = palindrome(st)\r\nprint(obj2)\r\nobj3 = reverse(st1)\r\nprint(obj3)\r\nobj4 = palindrome(st1)\r\nprint(obj4)","sub_path":"reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"488815821","text":"# -*- coding: utf-8 -*-\n##############################################################################\n# \n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see . \n#\n##############################################################################\n\nfrom datetime import datetime\nfrom tools.translate import _\n\nfrom osv import osv, fields\n\nclass cl_changeadress(osv.osv_memory):\n _name = \"cl_change.adress\"\n\n def default_get(self, cr, uid, fields, context=None):\n ret = super(cl_changeadress,self).default_get(cr, uid, fields, context=context)\n country_id = context.get('country_id',False)\n if country_id:\n ret['country_id'] = country_id\n return ret\n\n _columns = {\n 'check_city': fields.boolean('city Check'),\n 'city': fields.char('city', size=64),\n 'street': fields.char('street', size=64),\n 'street_id': fields.many2one('wf.zip_street', 'Street'),\n 'city_id': fields.many2one('wf.city', 'City'),\n 'zip': fields.char('zip', size=64, required=True),\n 'street_no': fields.char('street_no', size=64),\n 'country_id': fields.many2one('res.country', 'Country', required=True),\n 'street_no_not_required': fields.related('country_id', 'wf_without_street_no', type='boolean', string='no Street No'),\n }\n _defaults = {\n 'check_city': True,\n }\n\n def onchange_zip(self, cr, uid, ids, zip=None, country_id=False, context=None):\n domain = {}\n values = {}\n street_obj = self.pool.get('wf.zip_street')\n\n if zip:\n street_ids = street_obj.search(cr, uid, [('zip','=', zip), ('country_id', '=', country_id)], context=context)\n city_ids = []\n for street in street_obj.browse(cr,uid, street_ids, context=context):\n city_ids.append(street.wf_city_id.id)\n if city_ids:\n values.update({'check_city': True, 'city_id': False, 'street_id': False})\n domain = {'city_id':[('id','in',city_ids)]}\n else:\n values.update({'check_city': False, 'city_id': False, 'street_id': False})\n elif country_id:\n street_ids = street_obj.search(cr, uid, [('country_id', '=', country_id)], count=True, context=context)\n country = self.pool.get('res.country').browse(cr,uid,country_id,context)\n if street_ids > 0:\n values.update({'check_city': True, 'city_id': False, 'street_id': False, 'street_no_not_required': country.wf_without_street_no})\n else:\n values.update({'check_city': False, 'city_id': False, 'street_id': False, 'street_no_not_required': country.wf_without_street_no})\n\n return {'value': values, 'domain':domain}\n\n\n def onchange_city(self, cr, uid, ids, city_id=False, context=None):\n domain = {}\n values = {}\n street_ids = []\n city_obj = self.pool.get('wf.city')\n if city_id:\n city=city_obj.browse(cr,uid, city_id, context=context)\n for street in city.wf_zip_street_ids:\n street_ids.append(street.id)\n values = {'city': city.city, 'street_id': False, 'street_no': False}\n domain = {'street_id':[('id','in',street_ids)]}\n return {'value': values, 'domain':domain}\n\n def onchange_street(self, cr, uid, ids, street_id=False, context=None):\n values = {}\n if street_id:\n street=self.pool.get('wf.zip_street').browse(cr,uid, street_id, context=context)\n values = {'street': street.street, 'street_no': False}\n return {'value': values}\n\n\n # Schreiben! \n def adress_set(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n\n rec_ids = context and context.get('active_ids',[])\n partner_obj = self.pool.get('res.partner')\n values = {}\n\n for datas in self.browse(cr, uid, ids, context=context):\n if datas.city_id and datas.street_id:\n values.update({'country_id':datas.country_id.id, 'zip': datas.zip, 'city': datas.city_id.city, 'street': datas.street_id.street, 'wf_street_no': datas.street_no})\n elif datas.city and datas.street:\n values.update({'country_id':datas.country_id.id, 'zip': datas.zip, 'city': datas.city, 'street': datas.street, 'wf_street_no': datas.street_no})\n else:\n raise osv.except_osv(_('Address incomplete'), _('City or Street is empty'))\n partner_obj.write(cr, uid, [rec_ids[0]], values, context)\n\n return {'type': 'ir.actions.act_window_close'}\n\ncl_changeadress()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n\n","sub_path":"wf_strassen/wizard/cl_changeadress.py","file_name":"cl_changeadress.py","file_ext":"py","file_size_in_byte":5476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"433369751","text":"from abc import ABC, abstractmethod\nimport numpy as np\nimport copy\n\nclass RecordingExtractor(ABC):\n '''A class that contains functions for extracting important information\n from recorded extracellular data. It is an abstract class so all\n functions with the @abstractmethod tag must be implemented for the\n initialization to work.\n\n\n '''\n\n extractor_name = ''\n has_default_locations = False\n installed = False # check at class level if installed or not\n _gui_params = []\n installation_mesg = \"\" # error message when not installed\n\n def __init__(self):\n self._epochs = {}\n self._channel_properties = {}\n\n @abstractmethod\n def get_traces(self, channel_ids=None, start_frame=None, end_frame=None):\n '''This function extracts and returns a trace from the recorded data from the\n given channels ids and the given start and end frame. It will return\n traces from within three ranges:\n\n [start_frame, t_start+1, ..., end_frame-1]\n [start_frame, start_frame+1, ..., final_recording_frame - 1]\n [0, 1, ..., end_frame-1]\n [0, 1, ..., final_recording_frame - 1]\n\n if both start_frame and end_frame are given, if only start_frame is\n given, if only end_frame is given, or if neither start_frame or end_frame\n are given, respectively. Traces are returned in a 2D array that\n contains all of the traces from each channel with dimensions\n (num_channels x num_frames). In this implementation, start_frame is inclusive\n and end_frame is exclusive conforming to numpy standards.\n\n Parameters\n ----------\n start_frame: int\n The starting frame of the trace to be returned (inclusive).\n end_frame: int\n The ending frame of the trace to be returned (exclusive).\n channel_ids: array_like\n A list or 1D array of channel ids (ints) from which each trace will be\n extracted.\n\n Returns\n ----------\n traces: numpy.ndarray\n A 2D array that contains all of the traces from each channel.\n Dimensions are: (num_channels x num_frames)\n '''\n pass\n\n @abstractmethod\n def get_num_frames(self):\n '''This function returns the number of frames in the recording.\n\n Returns\n -------\n num_frames: int\n Number of frames in the recording (duration of recording).\n '''\n pass\n\n @abstractmethod\n def get_sampling_frequency(self):\n '''This function returns the sampling frequency in units of Hz.\n\n Returns\n -------\n fs: float\n Sampling frequency of the recordings in Hz.\n '''\n pass\n\n @abstractmethod\n def get_channel_ids(self):\n '''Returns the list of channel ids. If not specified, the range from 0 to num_channels - 1 is returned.\n\n Returns\n -------\n channel_ids: list\n Channel list\n\n '''\n pass\n\n def get_num_channels(self):\n '''This function returns the number of channels in the recording.\n\n Returns\n -------\n num_channels: int\n Number of channels in the recording.\n '''\n # print('WARNING: this is a temporary warning. You should use get_channel_ids() to iterate through the channels. '\n # 'This warning will be removed in future versions of SpikeInterface.')\n return len(self.get_channel_ids())\n\n def frame_to_time(self, frame):\n '''This function converts a user-inputted frame index to a time with units of seconds.\n\n Parameters\n ----------\n frame: float\n The frame to be converted to a time.\n\n Returns\n -------\n time: float\n The corresponding time in seconds.\n '''\n # Default implementation\n return frame / self.get_sampling_frequency()\n\n def time_to_frame(self, time):\n '''This function converts a user-inputted time (in seconds) to a frame index.\n\n Parameters\n -------\n time: float\n The time (in seconds) to be converted to frame index.\n\n Returns\n -------\n frame: float\n The corresponding frame index.\n '''\n # Default implementation\n return time * self.get_sampling_frequency()\n\n def get_snippets(self, *, reference_frames, snippet_len, channel_ids=None):\n '''This function returns data snippets from the given channels that\n are starting on the given frames and are the length of the given snippet\n lengths before and after.\n\n Parameters\n ----------\n snippet_len: int or tuple\n If int, the snippet will be centered at the reference frame and\n and return half before and half after of the length. If tuple,\n it will return the first value of before frames and the second value\n of after frames around the reference frame (allows for asymmetry)\n reference_frames: array_like\n A list or array of frames that will be used as the reference frame of\n each snippet\n channel_ids: array_like\n A list or array of channel ids (ints) from which each trace will be\n extracted.\n\n Returns\n ----------\n snippets: numpy.ndarray\n Returns a list of the snippets as numpy arrays.\n The length of the list is len(reference_frames)\n Each array has dimensions: (num_channels x snippet_len)\n Out-of-bounds cases should be handled by filling in zeros in the snippet.\n '''\n # Default implementation\n if isinstance(snippet_len, (tuple, list, np.ndarray)):\n snippet_len_before = snippet_len[0]\n snippet_len_after = snippet_len[1]\n else:\n snippet_len_before = int((snippet_len + 1) / 2)\n snippet_len_after = snippet_len - snippet_len_before\n\n if channel_ids is None:\n channel_ids = self.get_channel_ids()\n\n num_snippets = len(reference_frames)\n num_channels = len(channel_ids)\n num_frames = self.get_num_frames()\n snippet_len_total = snippet_len_before + snippet_len_after\n # snippets = []\n snippets = np.zeros((num_snippets, num_channels, snippet_len_total))\n #TODO extract all waveforms in a chunk\n pad_first = False\n pad_last = False\n pad_samples_first = 0\n pad_samples_last = 0\n snippet_idxs = np.array([], dtype=int)\n for i in range(num_snippets):\n snippet_chunk = np.zeros((num_channels, snippet_len_total))\n if (0 <= reference_frames[i]) and (reference_frames[i] < num_frames):\n snippet_range = np.array(\n [int(reference_frames[i]) - snippet_len_before, int(reference_frames[i]) + snippet_len_after])\n snippet_buffer = np.array([0, snippet_len_total])\n # The following handles the out-of-bounds cases\n if snippet_range[0] < 0:\n snippet_buffer[0] -= snippet_range[0]\n snippet_range[0] -= snippet_range[0]\n if snippet_range[1] >= num_frames:\n snippet_buffer[1] -= snippet_range[1] - num_frames\n snippet_range[1] -= snippet_range[1] - num_frames\n snippet_chunk[:, snippet_buffer[0]:snippet_buffer[1]] = self.get_traces(channel_ids=channel_ids,\n start_frame=snippet_range[0],\n end_frame=snippet_range[1])\n snippets[i] = snippet_chunk\n return snippets\n\n def set_channel_locations(self, channel_ids, locations):\n '''This function sets the location properties of each specified channel\n id with the corresponding locations of the passed in locations list.\n\n Parameters\n ----------\n channel_ids: array_like\n The channel ids (ints) for which the locations will be specified\n locations: array_like\n A list of corresonding locations (array_like) for the given channel_ids\n '''\n if len(channel_ids) == len(locations):\n for i in range(len(channel_ids)):\n if isinstance(locations[i],(list,np.ndarray)):\n location = np.asarray(locations[i])\n self.set_channel_property(channel_ids[i], 'location', location.astype(float))\n else:\n raise ValueError(str(locations[i]) + \" must be an array_like\")\n else:\n raise ValueError(\"channel_ids and locations must have same length\")\n\n def get_channel_locations(self, channel_ids=None):\n '''This function returns the location of each channel specifed by\n channel_ids\n\n Parameters\n ----------\n channel_ids: array_like\n The channel ids (ints) for which the locations will be returned\n\n Returns\n ----------\n locations: array_like\n Returns a list of corresonding locations (floats) for the given\n channel_ids\n '''\n if channel_ids is None:\n channel_ids = self.get_channel_ids()\n locations = []\n for channel_id in channel_ids:\n location = self.get_channel_property(channel_id, 'location')\n locations.append(location)\n return locations\n\n def set_channel_groups(self, channel_ids, groups):\n '''This function sets the group property of each specified channel\n id with the corresponding group of the passed in groups list.\n\n Parameters\n ----------\n channel_ids: array_like\n The channel ids (ints) for which the groups will be specified\n groups: array_like\n A list of corresonding groups (ints) for the given channel_ids\n '''\n if len(channel_ids) == len(groups):\n for i in range(len(channel_ids)):\n if isinstance(groups[i], (int, np.integer)):\n self.set_channel_property(channel_ids[i], 'group', groups[i])\n else:\n raise ValueError(str(groups[i]) + \" must be an int\")\n else:\n raise ValueError(\"channel_ids and groups must have same length\")\n\n def get_channel_groups(self, channel_ids=None):\n '''This function returns the group of each channel specifed by\n channel_ids\n\n Parameters\n ----------\n channel_ids: array_like\n The channel ids (ints) for which the groups will be returned\n\n Returns\n ----------\n groups: array_like\n Returns a list of corresonding groups (ints) for the given\n channel_ids\n '''\n if channel_ids is None:\n channel_ids = self.get_channel_ids()\n groups = []\n for channel_id in channel_ids:\n group = self.get_channel_property(channel_id, 'group')\n groups.append(group)\n return groups\n\n def set_channel_property(self, channel_id, property_name, value):\n '''This function adds a property dataset to the given channel under the\n property name.\n\n Parameters\n ----------\n channel_id: int\n The channel id for which the property will be added\n property_name: str\n A property stored by the RecordingExtractor (location, etc.)\n value:\n The data associated with the given property name. Could be many\n formats as specified by the user.\n '''\n if isinstance(channel_id, (int, np.integer)):\n if channel_id in self.get_channel_ids():\n if channel_id not in self._channel_properties:\n self._channel_properties[channel_id] = {}\n if isinstance(property_name, str):\n self._channel_properties[channel_id][property_name] = value\n else:\n raise ValueError(str(property_name) + \" must be a string\")\n else:\n raise ValueError(str(channel_id) + \" is not a valid channel_id\")\n else:\n raise ValueError(str(channel_id) + \" must be an int\")\n\n def get_channel_property(self, channel_id, property_name):\n '''This function returns the data stored under the property name from\n the given channel.\n\n Parameters\n ----------\n channel_id: int\n The channel id for which the property will be returned\n property_name: str\n A property stored by the RecordingExtractor (location, etc.)\n\n Returns\n ----------\n property_data\n The data associated with the given property name. Could be many\n formats as specified by the user.\n '''\n if isinstance(channel_id, (int, np.integer)):\n if channel_id in self.get_channel_ids():\n if channel_id not in self._channel_properties:\n self._channel_properties[channel_id] = {}\n if isinstance(property_name, str):\n if property_name in list(self._channel_properties[channel_id].keys()):\n return self._channel_properties[channel_id][property_name]\n else:\n raise ValueError(str(property_name) + \" has not been added to channel \" + str(channel_id))\n else:\n raise ValueError(str(property_name) + \" must be a string\")\n else:\n raise ValueError(str(channel_id) + \" is not a valid channel_id\")\n else:\n raise ValueError(str(channel_id) + \" must be an int\")\n\n def get_channel_property_names(self, channel_id=None):\n '''Get a list of property names for a given channel, or for all channels if channel_id is None\n Parameters\n ----------\n channel_id: int\n The channel id for which the property names will be returned\n If None (default), will return property names for all channels\n Returns\n ----------\n property_names\n The list of property names\n '''\n if channel_id is None:\n property_names = []\n for channel_id in self.get_channel_ids():\n curr_property_names = self.get_channel_property_names(channel_id=channel_id)\n for curr_property_name in curr_property_names:\n property_names.append(curr_property_name)\n property_names = sorted(list(set(property_names)))\n return property_names\n if isinstance(channel_id, (int, np.integer)):\n if channel_id in self.get_channel_ids():\n if channel_id not in self._channel_properties:\n self._channel_properties[channel_id] = {}\n property_names = sorted(self._channel_properties[channel_id].keys())\n return property_names\n else:\n raise ValueError(str(channel_id) + \" is not a valid channel_id\")\n else:\n raise ValueError(str(channel_id) + \" must be an int\")\n\n def copy_channel_properties(self, recording, channel_ids=None):\n '''Copy channel properties from another recording extractor to the current\n recording extractor.\n\n Parameters\n ----------\n recording: RecordingExtractor\n The recording extractor from which the properties will be copied\n channel_ids: (array_like, int)\n The list (or single value) of channel_ids for which the properties will be copied.\n '''\n if channel_ids is None:\n channel_ids = recording.get_channel_ids()\n if isinstance(channel_ids, int):\n curr_property_names = recording.get_channel_property_names(channel_id=channel_ids)\n for curr_property_name in curr_property_names:\n value = recording.get_channel_property(channel_id=channel_ids, property_name=curr_property_name)\n self.set_channel_property(channel_id=channel_ids, property_name=curr_property_name, value=value)\n else:\n for channel_id in channel_ids:\n curr_property_names = recording.get_channel_property_names(channel_id=channel_id)\n for curr_property_name in curr_property_names:\n value = recording.get_channel_property(channel_id=channel_id, property_name=curr_property_name)\n self.set_channel_property(channel_id=channel_id, property_name=curr_property_name, value=value)\n\n def add_epoch(self, epoch_name, start_frame, end_frame):\n '''This function adds an epoch to your recording extractor that tracks\n a certain time period in your recording. It is stored in an internal\n dictionary of start and end frame tuples.\n\n Parameters\n ----------\n epoch_name: str\n The name of the epoch to be added\n start_frame: int\n The start frame of the epoch to be added (inclusive)\n end_frame: int\n The end frame of the epoch to be added (exclusive)\n\n '''\n # Default implementation only allows for frame info. Can override to put more info\n if isinstance(epoch_name, str):\n self._epochs[epoch_name] = {'start_frame': int(start_frame), 'end_frame': int(end_frame)}\n else:\n raise ValueError(\"epoch_name must be a string\")\n\n def remove_epoch(self, epoch_name):\n '''This function removes an epoch from your recording extractor.\n\n Parameters\n ----------\n epoch_name: str\n The name of the epoch to be removed\n '''\n if isinstance(epoch_name, str):\n if epoch_name in list(self._epochs.keys()):\n del self._epochs[epoch_name]\n else:\n raise ValueError(\"This epoch has not been added\")\n else:\n raise ValueError(\"epoch_name must be a string\")\n\n def get_epoch_names(self):\n '''This function returns a list of all the epoch names in your recording\n\n Returns\n ----------\n epoch_names: list\n List of epoch names in the recording extractor\n '''\n epoch_names = list(self._epochs.keys())\n if not epoch_names:\n pass\n else:\n epoch_start_frames = []\n for epoch_name in epoch_names:\n epoch_info = self.get_epoch_info(epoch_name)\n start_frame = epoch_info['start_frame']\n epoch_start_frames.append(start_frame)\n epoch_names = [epoch_name for _, epoch_name in sorted(zip(epoch_start_frames, epoch_names))]\n return epoch_names\n\n def get_epoch_info(self, epoch_name):\n '''This function returns the start frame and end frame of the epoch\n in a dict.\n\n Parameters\n ----------\n epoch_name: str\n The name of the epoch to be returned\n\n Returns\n ----------\n epoch_info: dict\n A dict containing the start frame and end frame of the epoch\n '''\n # Default (Can add more information into each epoch in subclass)\n if isinstance(epoch_name, str):\n if epoch_name in list(self._epochs.keys()):\n epoch_info = self._epochs[epoch_name]\n return epoch_info\n else:\n raise ValueError(\"This epoch has not been added\")\n else:\n raise ValueError(\"epoch_name must be a string\")\n\n def get_epoch(self, epoch_name):\n '''This function returns a SubRecordingExtractor which is a view to the\n given epoch\n\n Parameters\n ----------\n epoch_name: str\n The name of the epoch to be returned\n\n Returns\n ----------\n epoch_extractor: SubRecordingExtractor\n A SubRecordingExtractor which is a view to the given epoch\n '''\n epoch_info = self.get_epoch_info(epoch_name)\n start_frame = epoch_info['start_frame']\n end_frame = epoch_info['end_frame']\n from .SubRecordingExtractor import SubRecordingExtractor\n return SubRecordingExtractor(parent_recording=self, start_frame=start_frame,\n end_frame=end_frame)\n\n @classmethod\n def gui_params(self):\n return copy.deepcopy(self._gui_params)\n\n @staticmethod\n def write_recording(recording, save_path):\n '''This function writes out the recorded file of a given recording\n extractor to the file format of this current recording extractor. Allows\n for easy conversion between recording file formats. It is a static\n method so it can be used without instantiating this recording extractor.\n\n Parameters\n ----------\n recording: RecordingExtractor\n An RecordingExtractor that can extract information from the recording\n file to be converted to the new format.\n\n save_path: string\n A path to where the converted recorded data will be saved, which may\n either be a file or a folder, depending on the format.\n '''\n raise NotImplementedError(\"The write_recording function is not \\\n implemented for this extractor\")\n","sub_path":"spikeextractors/RecordingExtractor.py","file_name":"RecordingExtractor.py","file_ext":"py","file_size_in_byte":21454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"80880006","text":"from vega3 import VegaLite\n\n\nclass Axes(object):\n \"\"\"Class representing a pdvega plot axes\"\"\"\n def __init__(self, spec=None, data=None):\n self._spec = spec or {}\n self._data = data\n\n if spec is not None:\n # If spec is specified, we need to immediately instantiate the\n # VegaLite object, because it will make some modifications to\n # the spec that we'd like to be able to see by doing ax.spec\n self._vlspec = VegaLite(spec, data)\n else:\n # If the spec is not specified, we set the vegalite object to None\n # and compute it on demand. This allows us to instantiate an empty\n # axis and build from there.\n self._vlspec = None\n\n @property\n def spec(self):\n if self._vlspec is not None:\n return self._vlspec.spec\n else:\n return self._spec\n\n @spec.setter\n def spec(self, spec):\n if self._vlspec is not None:\n self._vlspec.spec = spec\n else:\n # if we are setting the spec, then we can instantiate the\n # VegaLite object.\n self._spec = spec\n self._vlspec = VegaLite(self._spec, self._data)\n\n @property\n def data(self):\n if self._vlspec is not None:\n return self._vlspec.data\n else:\n return self._data\n\n @data.setter\n def data(self):\n if self._vlspec is not None:\n self._vlspec.data = data\n else:\n self._data = data\n\n @property\n def spec_no_data(self):\n return {key: val for key, val in self.spec.items() if key != 'data'}\n\n def _ipython_display_(self):\n if self._vlspec is None:\n self._vlspec = VegaLite(self._spec, self._data)\n return self._vlspec._ipython_display_()\n\n def display(self):\n if self._vlspec is None:\n self._vlspec = VegaLite(self._spec, self._data)\n return self._vlspec.display()\n\n def _add_layer(self, spec, data=None):\n \"\"\"Add spec as a layer to the current axes.\n\n Parameters\n ----------\n spec : dictionary\n the spec to be added. If this is the first spec in the axis, every\n part of it will be added. Otherwise, only the 'encoding', 'mark',\n and 'data', 'transform', and 'description' attributes will be added.\n data : dataframe, optional\n if specified, add this data to the layer.\n\n Returns\n -------\n self : Axes instance\n \"\"\"\n spec = VegaLite(spec, data).spec\n if not self.spec:\n # current axes spec is empty; replace it entirely with the new one\n self.spec = spec\n else:\n if 'layer' not in self.spec:\n # current axes spec is unlayered; move it to a layer\n keys = ['encoding', 'mark', 'data', 'transform', 'description', 'selection']\n self.spec['layer'] = [{key: self.spec.pop(key)\n for key in keys if key in self.spec}]\n # Competing selections in a single layer cause problems, so we\n # limit selections to the first layer for simplicity.\n keys = ['encoding', 'mark', 'data', 'transform', 'description']\n self.spec['layer'].append({key: spec[key]\n for key in keys if key in spec})\n # TODO: vega/vega3 raises an error without data defined at top level.\n # This needs an upstream fix; in the meantime we get around it this way:\n if 'data' not in self.spec:\n self.spec['data'] = {'name': 'no-toplevel-data'}\n return self\n","sub_path":"pdvega/_axes.py","file_name":"_axes.py","file_ext":"py","file_size_in_byte":3694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"269568924","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions.categorical import Categorical\n\nimport sys\nsys.path.insert(0, '../')\nfrom networks.quant_layer import Linear_Q, Conv2d_Q\n\n\n# batch convolution (do not share convolution kernel across batch images)\n# is implemented in efficient way using group\ndef batch_conv(x, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):\n if bias is None:\n assert x.shape[0] == weight.shape[0], \"dim=0 of x must be equal in size to dim=0 of weight\"\n else:\n assert x.shape[0] == weight.shape[0] and bias.shape[0] == weight.shape[\n 0], \"dim=0 of bias must be equal in size to dim=0 of weight\"\n\n b_i, b_j, c, h, w = x.shape\n b_i, out_channels, in_channels, kernel_height_size, kernel_width_size = weight.shape\n\n out = x.permute([1, 0, 2, 3, 4]).contiguous().view(b_j, b_i * c, h, w)\n weight = weight.view(b_i * out_channels, in_channels, kernel_height_size, kernel_width_size)\n\n out = F.conv2d(out, weight=weight, bias=None, stride=stride, dilation=dilation, groups=b_i,\n padding=padding)\n out = out.view(b_j, b_i, out_channels, out.shape[-2], out.shape[-1])\n out = out.permute([1, 0, 2, 3, 4])\n\n if bias is not None:\n out = out + bias.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)\n\n return out\n\n# use batch convolution here to improve efficiency\ndef update_Fisher(model):\n #for m in model.features.modules():\n for m in model.modules():\n if isinstance(m, Conv2d_Q):\n # conv layer\n # smart batch conv version\n # extend state to (N,1,C,H,W)\n N = m._state.shape[0]\n m._state = m._state.unsqueeze(1)\n\n # quantize weight\n # sync_weight is already called\n weight_q = m.weight.detach()\n\n # extend weight to (N, C_out, C_in, K_h, K_w)\n # multiply by the scaling factor here\n ext_weight = weight_q.unsqueeze(0).repeat(N,1,1,1,1)\n ext_weight.requires_grad_(True)\n if m.bias is not None:\n # extend bias to (N, C_out)\n ext_bias = m.bias.detach().unsqueeze(0).repeat(N,1)\n ext_bias.requires_grad_(True)\n\n f_state = batch_conv(m._state, ext_weight, bias=ext_bias, \\\n stride=m.stride, padding=m.padding, dilation=m.dilation)\n H = (f_state.squeeze(1)*m._costate).sum()\n\n if m.bias is not None:\n ext_grad_w, ext_grad_b = torch.autograd.grad(H, (ext_weight, ext_bias))\n else:\n ext_grad_w = torch.autograd.grad(H, ext_weight)[0]\n\n with torch.no_grad():\n # record fisher information\n m.Fisher_w.data.add_((ext_grad_w.pow_(2)).sum(dim=0).data)\n if m.bias is not None:\n m.Fisher_b.data.add_((ext_grad_b.pow_(2)).sum(dim=0).data)\n\n elif isinstance(m, Linear_Q):\n # extend state to (N, 1, C_in)\n N = m._state.shape[0]\n m._state = m._state.unsqueeze(1)\n\n # quantize weight\n # sync_weight is already called\n weight_q = m.weight.detach()\n\n # extend weight to (N, C_out, C_in)\n ext_weight = weight_q.unsqueeze(0).repeat(N,1,1)\n ext_weight.requires_grad_(True)\n ext_weight = ext_weight.permute(0,2,1) # (N, C_in, C_out)\n\n f_state = torch.bmm(m._state, ext_weight).squeeze(1) # (N, C_out)\n if m.bias is not None:\n bias_q = m.bias.detach()\n ext_bias = bias_q.unsqueeze(0).repeat(N,1).requires_grad_(True) # (N, C_out)\n f_state += ext_bias\n H = (f_state*m._costate).sum()\n\n if m.bias is not None:\n ext_grad_w,ext_grad_b = torch.autograd.grad(H, (ext_weight,ext_bias))\n else:\n ext_grad_w = torch.autograd.grad(H, ext_weight)[0]\n\n ext_grad_w = ext_grad_w.permute(0,2,1) # (N, C_out, C_in)\n\n with torch.no_grad():\n # record fisher information\n m.Fisher_w.data.add_((ext_grad_w.pow_(2)).sum(dim=0).data)\n if m.bias is not None:\n m.Fisher_b.data.add_((ext_grad_b.pow_(2)).sum(dim=0).data)\n\ndef estimate_fisher(task, device, net, x, y, batch_size=100, num_batch=80, num_round=1):\n net.eval()\n\n def _save_state(module, input, results):\n module._state = input[0].clone()\n\n def _save_costate(module, grad_input, grad_output):\n module._costate = grad_output[0].clone()\n\n # register hooks\n for m in net.modules():\n if isinstance(m, Linear_Q) or isinstance(m, Conv2d_Q):\n m.handle_forward = m.register_forward_hook(_save_state)\n m.handle_backward = m.register_backward_hook(_save_costate)\n\n criterion = nn.CrossEntropyLoss()\n\n for _ in range(num_round):\n r=np.arange(x.size(0))\n np.random.shuffle(r)\n r=torch.LongTensor(r).to(device)\n\n for i in range(0, num_batch):\n assert (i+1)*batch_size <= len(r)\n\n b=r[i*batch_size : (i+1)*batch_size]\n images = x[b].to(device)\n outputs = net(images)[task]\n\n # sample computation target\n # if we simple use actual label directly here\n # then we're calculating ``empirical fisher'' instead of ``fisher''\n targets = Categorical(logits=outputs).sample()\n\n loss = criterion(outputs, targets)\n net.zero_grad()\n loss.backward()\n update_Fisher(net)\n net.zero_grad()\n\n total_data = num_round*batch_size*num_batch\n for m in net.modules():\n if isinstance(m, Linear_Q) or isinstance(m, Conv2d_Q):\n m.Fisher_w /= total_data\n if m.bias is not None:\n m.Fisher_b /= total_data\n m.handle_forward.remove()\n m.handle_backward.remove()\n","sub_path":"ImageClassification/src/approaches/blip_utils.py","file_name":"blip_utils.py","file_ext":"py","file_size_in_byte":5992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"362426931","text":"from Jumpscale import j\n\n\nclass nodes(j.baseclasses.threebot_actor):\n def _init(self, **kwargs):\n\n bcdb = j.data.bcdb.get(\"tf_directory\")\n self.node_model = bcdb.model_get(url=\"tfgrid.node.2\")\n self.farm_model = bcdb.model_get(url=\"tfgrid.farm.1\")\n\n def _find(self, node_id):\n nodes = self.node_model.find(node_id=node_id)\n if len(nodes) <= 0:\n return None\n return nodes[0]\n\n def add(self, node, schema_out):\n \"\"\"\n ```in\n node = (O) !tfgrid.node.2\n ```\n\n ```out\n node = (O) !tfgrid.node.2\n ```\n\n \"\"\"\n # TODO check the sender is actually the node itself\n validation_errors = []\n if not node.node_id:\n validation_errors.append(\"node_id\")\n if not node.os_version:\n validation_errors.append(\"os_version\")\n if not node.farmer_id:\n validation_errors.append(\"farmer_id\")\n if not node.location:\n validation_errors.append(\"location\")\n if validation_errors:\n raise Exception(\"Can not create node without {}\".format(\" or \".join(validation_errors)))\n old_node = self._find(node.node_id)\n if old_node:\n return self.node_model.set_dynamic(node._ddict, obj_id=old_node.id)\n return self.node_model.new(data=node).save()\n\n def list(self, farmer_id, country, city, cru, sru, mru, hru, schema_out):\n \"\"\"\n ```in\n farmer_id = (S)\n country = (S)\n city = (S)\n cru = -1 (I)\n mru = -1 (I)\n sru = -1 (I)\n hru = -1 (I)\n ```\n\n ```out\n nodes = (LO) !tfgrid.node.2\n ```\n \"\"\"\n\n output = schema_out.new()\n for node in self.node_model.iterate():\n if farmer_id and farmer_id != node.farmer_id:\n continue\n if country != \"\" and node.location.country != country:\n continue\n if city != \"\" and node.location.city != city:\n continue\n if cru > -1 and node.total_resource.cru < cru:\n continue\n if mru > -1 and node.total_resource.mru < mru:\n continue\n if sru > -1 and node.total_resource.sru < sru:\n continue\n if hru > -1 and node.total_resource.hru < hru:\n continue\n output.nodes.append(node)\n\n return output\n\n def get(self, node_id, schema_out):\n \"\"\"\n ```in\n node_id = (S)\n ```\n\n ```out\n node = (O) !tfgrid.node.2\n ```\n \"\"\"\n return self._find(node_id)\n\n def update_total_capacity(self, node_id, resource):\n \"\"\"\n ```in\n node_id = (S)\n resource = (O) !tfgrid.node.resource.1\n ```\n\n \"\"\"\n node = self._find(node_id)\n if not node:\n raise j.exceptions.NotFound(\"node %s not found\" % id)\n node.total_resource.mru = resource.mru\n node.total_resource.cru = resource.cru\n node.total_resource.hru = resource.hru\n node.total_resource.sru = resource.sru\n node.save()\n return True\n\n def update_reserved_capacity(self, node_id, resource):\n \"\"\"\n ```in\n node_id = (S)\n resource = (O) !tfgrid.node.resource.1\n ```\n \"\"\"\n node = self._find(node_id)\n if not node:\n raise j.exceptions.NotFound(\"node %s not found\" % id)\n node.reserved_resource.mru = resource.mru\n node.reserved_resource.cru = resource.cru\n node.reserved_resource.hru = resource.hru\n node.reserved_resource.sru = resource.sru\n\n node.save()\n return True\n\n def update_used_capacity(self, node_id, resource):\n \"\"\"\n ```in\n node_id = (S)\n resource = (O) !tfgrid.node.resource.1\n ```\n \"\"\"\n\n node = self._find(node_id)\n if not node:\n raise j.exceptions.NotFound(\"node %s not found\" % id)\n node.used_resource.mru = resource.mru\n node.used_resource.cru = resource.cru\n node.used_resource.hru = resource.hru\n node.used_resource.sru = resource.sru\n node.save()\n return True\n","sub_path":"ThreeBotPackages/threefold/directory/actors/nodes.py","file_name":"nodes.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"127008957","text":"from typing import List\nfrom bisect import bisect\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def firstElement(self, x):\n return x[1]\n\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n lists = [list for list in lists if list != None]\n if len(lists) == 0:\n return None\n rawValues = [(i, x.val) for i, x in enumerate(lists)] #(index, value)\n sortedTuples = sorted(rawValues, key=self.firstElement)\n tupleToAdd = sortedTuples.pop(0) #(index, value)\n # sorted tuples is still sorted by value\n nodeToAdd = lists[tupleToAdd[0]]\n if nodeToAdd.next:\n indexOfLists = tupleToAdd[0]\n lists[indexOfLists] = nodeToAdd.next\n valueOfNewNode = lists[indexOfLists].val\n # sortedTuples is out of date now\n # need to add this new element\n indexOfSortedTuples = bisect([x[1] for x in sortedTuples] , valueOfNewNode)\n sortedTuples.insert(indexOfSortedTuples, (indexOfLists, valueOfNewNode))\n # sortedTuples is sorted again\n elif nodeToAdd.next == None:\n indexOfLists = tupleToAdd[0]\n del lists[indexOfLists]\n sortedTuples = [(i, x) if i < indexOfLists else (i-1, x) for i, x in sortedTuples]\n head = nodeToAdd\n current = head\n while len(lists) > 0:\n tupleToAdd = sortedTuples.pop(0) #(index, value)\n # sorted values is still sorted by value\n nodeToAdd = lists[tupleToAdd[0]]\n if nodeToAdd.next:\n indexOfLists = tupleToAdd[0]\n lists[indexOfLists] = nodeToAdd.next\n valueOfNewNode = lists[indexOfLists].val\n # sortedTuples is out of date now\n # need to add this new element\n indexOfSortedTuples = bisect([x[1] for x in sortedTuples] , valueOfNewNode)\n sortedTuples.insert(indexOfSortedTuples, (indexOfLists, valueOfNewNode))\n # sortedTuples is sorted again\n elif nodeToAdd.next == None:\n indexOfLists = tupleToAdd[0]\n del lists[indexOfLists]\n sortedTuples = [(i, x) if i < indexOfLists else (i-1, x) for i, x in sortedTuples]\n current.next = nodeToAdd\n current = current.next\n return head\n \ndef printList(l: ListNode) -> None:\n while l:\n print(l.val, \"->\")\n l = l.next\n\nl1 = ListNode(1)\nl1.next = ListNode(4)\nl1.next.next = ListNode(5)\n\nl2 = ListNode(1)\nl2.next = ListNode(3)\nl2.next.next = ListNode(4)\n\nl3 = ListNode(2)\nl3.next = ListNode(6)\n\nlists = [l1,l2,l3]\ns = Solution()\nprintList(s.mergeKLists(lists))\n","sub_path":"23.py","file_name":"23.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"553148131","text":"#!/usr/local/bin/python\nfrom matplotlib import pyplot as plt\nfrom matplotlib.colors import LogNorm\nfrom matplotlib import cm\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\n\nsp = \"100\"\nlp = \"100\"\nfname = \"soft_pf0.2_sp\" + sp + \"_lp\" + lp + \"_condensed.density\"\ndf = pd.read_csv(fname, delim_whitespace=True, header=None)\nfig, ax = plt.subplots(1, 2, figsize=(8, 3))\n# sns.heatmap(df,cmap=cm.viridis,ax=ax[0])\ndata = df.replace(0, 1e-10)\ndata = data / data.sum().sum()\nmin_data = data.min().min()\nif min_data == 0:\n min_data = 1\nmax_data = data.max().max()\nlog_norm = LogNorm(vmin=min_data, vmax=max_data)\ncbar_ticks = [\n 10 ** i\n for i in range(\n int(np.floor(np.log10(min_data))), 1 + int(np.ceil(np.log10(max_data)))\n )\n]\nsns.heatmap(\n data, norm=log_norm, cmap=cm.viridis, ax=ax[0], cbar_kws={\"ticks\": cbar_ticks}\n)\nfft_data = np.fft.fftshift(np.fft.fft2(df))\ndata = np.abs(fft_data)\n# data=data/data.sum().sum()\nmin_data = data.min().min()\nif min_data == 0:\n min_data = 1\nmax_data = data.max().max()\nlog_norm = LogNorm(vmin=min_data, vmax=max_data)\ncbar_ticks = [\n 10 ** i\n for i in range(\n int(np.floor(np.log10(min_data))), 1 + int(np.ceil(np.log10(max_data)))\n )\n]\nsns.heatmap(\n data, norm=log_norm, cmap=cm.viridis, ax=ax[1], cbar_kws={\"ticks\": cbar_ticks}\n)\nsavename = \"sp\" + sp + \"_lp\" + lp\nfig.savefig(savename + \".png\", dpi=300)\nf = open(savename + \"_fft_max.txt\", \"w\")\nf.write(str(np.max(data[data.shape[0] // 2])))\nf.close()\n","sub_path":"analysis/makeFFTPlots.py","file_name":"makeFFTPlots.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"176378133","text":"import heuristic # For ai_turn\nimport time\n\n# Variables\n# game_board = 2d array to simulate the game board\n# player = 1 or 2 based on the player turn. 1 = player1, 2 = player2\n# s_column = standard column\n# s_row = standard row\n# last_column = the column in which the last slot filled\n# last_row = the row in which the last slot filled\n# General Notes\n# Board format = board[column][row]\n\n\ndef test_print(board, s_column, s_row):\n # Print board in raw array format\n # for i in board:\n # print(str(i))\n # Print board in simulated format\n for row in range(s_row):\n print()\n for column in range(s_column):\n print(board[column][row], \"| \", end=\"\")\n print()\n\n\ndef generate_board(s_column, s_row):\n # Populate board, fills the 2d array with 0 as the place holder value\n generated_board = [[0 for i in range(0, s_row)] for x in range(0, s_column)]\n return generated_board\n\n\ndef fill(board, column, s_row, player):\n row = int\n\n # Determines where is the bottom of the row\n for curr_row in range(0, s_row):\n if board[column][curr_row] != 0:\n continue\n else:\n row = curr_row\n board[column][row] = player\n\n\ndef get_lastcolumn(oldboard, newboard):\n for curr_column in range(0, len(newboard)):\n if newboard[curr_column] != oldboard[curr_column]:\n return curr_column\n else: continue\n\ndef get_lastrow(oldboard, newboard):\n for curr_column in range(0, len(newboard)):\n for curr_row in range(0, len(newboard[curr_column])):\n if newboard[curr_column][curr_row] != oldboard[curr_column][curr_row]:\n return curr_row\n else: continue\n\n\ndef check(board,column): # Checks if the column is full\n # Checks if the top of the column is not 0, meaning a full column\n if board[column][0] != 0:\n return False\n else:\n return True\n\n\ndef player_turn(board, s_column, s_row, player):\n # User input for which column\n while True:\n column = int(input(\"Player \" + str(player) + \" select column: \")) - 1\n if column not in range(0, s_column):\n print(\"Column does not exist\")\n if not check(board,column):\n print(\"Slot is full, try another\")\n else:\n break\n if check(board,column):\n fill(board, column, s_row, player) # 1 meaning player1 for player\n\n\n####### PASS SCORE TO PROCESS#############\ndef ai_turn(board, s_row, player, curr_score):\n start = time.time()\n decision = heuristic.terminal(board, curr_score, 0, len(board)-1, [board], s_row, 0, player, 1, 2)\n end = time.time()\n print(end - start)\n # decision = heuristic.minimax(board, s_row, player, curr_score, True, 0, 4)\n column = decision\n print(column)\n # curr_score[column] += decision[column] - curr_score[column]\n if check(board, column):\n fill(board, column, s_row, player)\n","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"415795462","text":"\n\nimport json\n\n\nfrom Librarues.JSON.Model.Human import Human\nfrom Librarues.JSON.Model.Town import Town\n# SQL - MySQL, PostgreSQL..\n# NoSQL - MongoDB, MariaDB\n# Key:Value - JSON, XML\n\n# WEB - REST(JSON), SOAP(XML)\n\nclass App():\n\n\n @staticmethod\n def run():\n\n town = App.uploadData()\n\n print(\"Привет странник!\", \"Ты попал в город \"+town.name)\n print(\"Здесь проживает \"+str(len(town.citizen))+\" людей\")\n print(\"А точнее: \")\n for abc in town.citizen:\n print(abc.name, abc. account)\n\n\n\n\n\n\n\n @staticmethod\n def uploadData():\n\n file = open(\"town.json\", 'r')\n data = json.load(file)\n\n town = Town(data[\"name\"])\n\n for cit in data[\"citizen\"]:\n human = Human(cit[\"name\"], cit[\"account\"])\n town.addCitizen(human)\n\n return town\n\n @staticmethod\n def saveTown(town, path):\n file = open(path, 'w')\n dict_town = {}\n dict_town[\"name\"] = town.name\n dict_town[\"citizen\"] = []\n\n for citizen in town.citizen:\n dict_town[\"citizen\"].append(citizen.__dict__)\n\n json.dump(dict_town, file)\n file.close()\n\n\n\n\n\nApp.run()\n\n\n\n\n\n\n","sub_path":"Controller/les.py","file_name":"les.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"339746897","text":"import ujson as json\nimport os\nimport uuid\nfrom typing import List, Tuple\n\nfrom autologging import logged, traced\n\nfrom solving.models import Task, Vehicle, DistanceMatrix, Tour, Solution\n\n\n@traced\ndef run_genetic_solver(\n tasks: List[Task], vehicles: List[Tuple[int, Vehicle]], matrix: DistanceMatrix\n):\n proc_id = uuid.uuid4()\n file = f\"{proc_id}\"\n generate_json(file, tasks, vehicles, matrix)\n os.system(\n f\"vrp-cli solve pragmatic {file}.json -m {file}_routing_matrix.json -o {file}_solution.json\"\n f\"--log --max-time=600\"\n )\n solution = convert_json(f\"{file}_solution\")\n\n # os.remove(f'{file}.json')\n # os.remove(f'{file}_routing_matrix.json')\n # os.remove(f'{file}_solution.json')\n\n return solution\n\n\n@traced\ndef convert_json(file_name: str):\n with open(f\"{file_name}.json\", \"r\") as f:\n solution = json.load(f)\n _statistics = solution[\"statistic\"]\n _tours = []\n for tour in solution[\"tours\"]:\n _tour = Tour(\n tour[\"vehicleId\"], tour[\"typeId\"], tour[\"stops\"], tour[\"statistic\"]\n )\n _tours.append(_tour)\n _unassigned = None if \"unassigned\" not in solution else solution[\"unassigned\"]\n return Solution(_statistics, _tours, _unassigned)\n\n\n@traced\ndef generate_json(\n file: str,\n tasks: List[Task],\n vehicles: List[Tuple[int, Vehicle]],\n matrix: DistanceMatrix,\n):\n jobs = []\n for i, task in enumerate(tasks):\n job = {\n \"id\": f\"job{i}\",\n \"deliveries\": [\n {\n \"places\": [\n {\n \"location\": {\"index\": task.id},\n \"duration\": task.delay,\n # \"times\": [[task.tw_start, task.tw_end]],\n }\n ],\n \"demand\": [10],\n }\n ],\n }\n jobs.append(job)\n cars = []\n for i, (num, vehicle) in enumerate(vehicles):\n car = {\n \"typeId\": f\"car_{i}\",\n \"vehicleIds\": [f\"car_{i}_{j}\" for j in range(num)],\n \"profile\": \"normal_car\",\n \"costs\": vehicle.costs,\n \"shifts\": [\n {\n \"start\": {\n \"earliest\": vehicle.start_time,\n \"location\": {\"index\": vehicle.start_place},\n }\n }\n ],\n \"capacity\": [vehicle.value],\n }\n cars.append(car)\n\n profiles = [{\"name\": \"normal_car\", \"type\": \"car\"}]\n problem = {\n \"plan\": {\"jobs\": jobs},\n \"fleet\": {\"vehicles\": cars, \"profiles\": profiles},\n }\n\n with open(f\"{file}.json\", \"w\") as f:\n json.dump(problem, f)\n\n size = len(matrix.dist_matrix)\n travel_times, time_m = [], matrix.time_matrix\n for i in range(size):\n for j in range(size):\n travel_times.append(int(time_m[i][j]))\n distances, distance_m = [], matrix.dist_matrix\n for i in range(size):\n for j in range(size):\n distances.append(int(distance_m[i][j]))\n routing = {\n \"profile\": \"normal_car\",\n \"travelTimes\": travel_times,\n \"distances\": distances,\n }\n\n with open(f\"{file}_routing_matrix.json\", \"w\") as f:\n json.dump(routing, f)\n","sub_path":"customer_cases/krugoreys/solving/genetic_runner.py","file_name":"genetic_runner.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"620433298","text":"\"\"\"File Input/Output Module.\"\"\"\nimport os\nimport numpy as np\nfrom PIL import Image\n\n\ndef load_data(root, reduce_scale):\n \"\"\"Load ORL (or Extended YaleB) dataset to numpy array.\"\"\"\n images, labels = [], []\n\n for i, person in enumerate(sorted(os.listdir(root))):\n\n if not os.path.isdir(os.path.join(root, person)):\n continue\n\n for fname in os.listdir(os.path.join(root, person)):\n\n # Remove background images in Extended YaleB dataset.\n if fname.endswith(\"Ambient.pgm\"):\n continue\n\n if not fname.endswith(\".pgm\"):\n continue\n\n # load image.\n img = Image.open(os.path.join(root, person, fname))\n img = img.convert(\"L\") # grey image.\n\n # reduce computation complexity.\n img = img.resize([s // reduce_scale for s in img.size])\n\n # convert image to numpy array.\n img = np.asarray(img).reshape((-1, 1))\n\n # collect data and label.\n images.append(img)\n labels.append(i)\n\n # concate all images and labels.\n images = np.concatenate(images, axis=1)\n labels = np.array(labels)\n\n return images, labels\n","sub_path":"nmf/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"623072570","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\nfrom selenium import webdriver\n\n\ndef get_html(url):\n r = requests.get(url)\n return r.text\n\n\ndef write_csv(data):\n with open('result.csv', 'a') as f:\n writer = csv.writer(f)\n\n writer.writerow([data['name'],\n data['address'],\n data['contact'],\n data['phone'],\n data['fax'],\n data['mail'],\n data['member'],\n data['telex'],\n data['website'],\n data['subsidary'],\n data['management'],\n data['business'],\n data['import_product'],\n data['export_product'],\n data['sub_product']])\n\n\ndef get_data(html):\n soup = BeautifulSoup(html, 'lxml')\n tds = soup.find('table', class_='list').find_all('td')\n\n for td in tds:\n link = 'http://www.icchk.org.hk/' + td.find('a').get('href')\n driver = webdriver.Chrome(executable_path='/Users/serg/Downloads/chromedriver')\n driver.get(link)\n html = driver.page_source\n company_page = BeautifulSoup(html, 'lxml')\n trs = company_page.find('div', class_='details').find('table').find('tr').find('table').find_all('tr')\n name = ' '.join((trs[1].find_all('td')[1].text).split())\n address = ' '.join((trs[2].find_all('td')[1].text).split())\n contact = ' '.join((trs[3].find_all('td')[1].text).split())\n phone = ' '.join((trs[4].find_all('td')[1].text).split())\n fax = ' '.join((trs[5].find_all('td')[1].text).split())\n mail = ' '.join((trs[6].find_all('td')[1].text).split())\n member = ' '.join((trs[7].find_all('td')[1].text).split())\n telex = trs[8].find_all('td')[1].text\n website = trs[10].find_all('td')[1].text\n subsidary = trs[12].find_all('td')[1].text\n management = ' '.join((trs[14].find_all('td')[1].text).split())\n\n business = trs[16].find_all('td')[1]\n result1 = []\n for i in business:\n result1.append(i)\n business = result1[2].strip()\n\n import_product = trs[18].find_all('td')[1]\n result2 = []\n for i in import_product:\n result2.append(i)\n import_product = result2[1].strip()\n\n export_product = trs[20].find_all('td')[1]\n result3 = []\n for i in export_product:\n result3.append(i)\n export_product = result3[1].strip()\n\n sub_product = trs[22].find_all('td')[1]\n result4 = []\n for i in sub_product:\n result4.append(i)\n sub_product = result4[1].strip()\n\n data = {'name': name,\n 'address': address,\n 'contact': contact,\n 'phone': phone,\n 'fax': fax,\n 'mail': mail,\n 'member': member,\n 'telex': telex,\n 'website': website,\n 'subsidary': subsidary,\n 'management': management,\n 'business': business,\n 'import_product': import_product,\n 'export_product': export_product,\n 'sub_product': sub_product}\n write_csv(data)\n\n\ndef main():\n for i in range(1, 37):\n url = 'http://www.icchk.org.hk/business_directory.php?page={}&companyname=&membertype=&char='.format(i)\n get_data(get_html(url))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"104992839","text":"# ---------------------------------------------------------------------------- #\n# parrots.py\n# George Corser, January 28, 2013\n# Simulation of PARROTS, a VANET privacy model, wirtten in Python 2.7\n# PARROTS: Position Altered Random Repetition of Transporation Signature\n#\n# See the \"Main\" section at the bottom of this file to change parameters.\n# This simulation assumes a grid of roads 100m apart on a 3000mx3000m area\n# ---------------------------------------------------------------------------- #\n\ndef PARROTS(t, v, parrotee_percent, parroter_percent):\n # Function arguments ----------------------------------------------------- #\n # t is number of time slices. Each time slice is: comfreq = 300 ms\n # v is number of vehicles in simulation\n # parrotee_percent is the ratio of vehicles that wish to request parroting\n # parroter_percent is the ratio of vehicles that volunteer to be parrots\n \n ret_list = list()\n # ret_list is the list of integers output by each iteration of PARROTS()\n \n # ------------------------------------------------------------------------ #\n # SECTION 0: DECLARATIONS\n # ------------------------------------------------------------------------ #\n \n # ----- General Declarations --------------------------------------------- #\n import time # for timestamp\n import decimal # needed for Decimal() function\n import math # needed for sqrt() function\n import random # needed for pseudo random numbers\n random.seed(1) # initialize pseudo random number generator\n topspeed = 30 # meters per second: 30 m/s ~= 108 kph ~= 67 mph\n comfreq = 300 # milliseconds between time intervals\n comrange = 300 # meters: max range of wireless communications\n xmax = 3000 # meters: boundary of traffic grid from (0,0) to (xmax,ymax)\n ymax = 3000\n # t = 600 # number of time slices # function argument\n ti = 0 # index for looping 0-t-1\n # v = 500 # number of vehicles in simulation # function argument\n vi = 0 # index for looping 0 to v-1\n x = list() # list of current x coordinates\n y = list() # list of current y coordinates\n xprior = list() # list of prior x coordinates\n yprior = list() # list of prior y coordinates\n xdir = list() # x direction (-1 = left, 1 = right)\n ydir = list() # y direction (-1 = down, 1 = up)\n leader = list() # vehicle number of group leader\n anonymity = list() # anonymity set size =groupsize of leader, if self only =1\n cum_anonymity = 0 # accumulator to calculate average anonymity set size\n dflag = list() #0 for being on x logs, 1 for y logs\n \n spcp = 0 # 1 = spcp = synch pseudo change protocoal, 0 = aosa = anon online svc access protocol\n\n # ----- Parroting Declarations ------------------------------------------- #\n # parrotee_percent = 50 # percent chance a vehicle will request parroting # function argument\n # parroter_percent = 50 # percent chance a vehicle will perform parroting # function argument\n # only vehicles desiring more privacy will request parroting\n # only vehicles willing to assist others will perform parroting\n # vehicles may do one, the other, both or neither\n # if a vehicle requests a parrot, all willing vehicles in group will parrot...\n # ... after they change group leaders, not before...\n # this increases the anonymity set size for the parrotee\n # the new anonymity set is the parrottee's current group size plus \n # the sum of the group sizes of all the parroter's groups\n parroted_id = list() # the id parroted by this vehicle (increases anon set of other vehicle)\n p_anonymity = list() # additional anonymity offered by parroted group\n parrotee = list() # 0 default; 1 if this vehicle requests parroting\n parroter = list() # 0 default; 1 if this vehicle performs parroting\n v_timeout = list() # let timeout be the number of time slices remaining for validity of id\n g_timeout = list() # time slices left for group # for spcp\n p_timeout = list() \n cum_parrotees = 0\n cum_parroters = 0\n\n total_pirates = 0\n pirate_sizes = 0\n normal_sizes = 0\n pirate_entropy = 0\n\n # ------------------------------------------------------------------------ #\n # SECTION 1: INITIALIZATION\n # ------------------------------------------------------------------------ #\n # Assume 15 city blocks per mile, 30 blocks per 2 miles ~= 10000 ft ~= 3000m\n # below: assume roads between blocks at 0, 100, 200, ..., 3000 for xmax=3000m\n # Each car starts at a random intersection on the road grid\n # ------------------------------------------------------------------------ #\n # Step 1.a. Initialize vehicle locations\n # ------------------------------------------------------------------------ #\n for ti in range(1): # initialize vehicles at random coordinates on road grid\n for vi in range(v):\n \n # ----- Vehicles, Groups and Leaders ----------------------------- #\n if vi % 2 == 0: # if vi is even, let x be an even 100 and y be random\n lrand = (100*random.randint(0,xmax/100))\n x.append((1.305897**(lrand/100))-1)\n y.append(random.randint(0,ymax))\n dflag.append(0)\n else:\n lrand = (100*random.randint(0,ymax/100))\n x.append(random.randint(0,xmax))\n y.append((1.305897**(lrand/100))-1)\n dflag.append(1)\n xprior.append(0)\n yprior.append(0)\n xdir.append((-1)**random.randint(1,2)) # randomly select -1 or 1 \n ydir.append((-1)**random.randint(1,2))\n leader.append(-1) # -1 means has not been set\n anonymity.append(-1)\n\n # ----- Parroting ------------------------------------------------ #\n parroted_id.append(-1) # no parroting at initialization\n p_anonymity.append(0) # at first there are zero parroters from previous groups (PAS)\n \n if random.randint(1,101) < parrotee_percent:\n parrotee.append(1)\n else:\n parrotee.append(0)\n \n if random.randint(1,101) < parroter_percent:\n parroter.append(1)\n else:\n parroter.append(0)\n\n # ----- Parroting Timeout lists ---------------------------------- #\n v_timeout.append(1+random.randint(1,2000)) # use 1+ to prevent <0 later (when decrementing)\n # 2000 is 10 minutes worth of time slices at 300ms per time slice\n p_timeout.append(-1) # none because parroting has not started yet\n \n # ------------------------------------------------------------------------ #\n # Step 1.b. Initialize group leader for each vehicle\n # ------------------------------------------------------------------------ #\n # No parroting on initialization\n for ti in range(1): \n for vi in range(v):\n cur_dist = comrange + 1 # no distance set yet\n \n if leader[vi] > -1: # if this vehicle already has a group leader\n # check if group leader in still in communications range\n cur_dist = math.sqrt((x[leader[vi]]-x[vi])**2+(y[leader[vi]]-y[vi])**2)\n # if leader is still in commuications range, do nothing for this vi\n if cur_dist > comrange: # if group leader is out of range\n anonymity[leader[vi]] -= 1 # decrement anonymity set for leader\n leader[vi] = -1 # establish that vi has no leader\n anonymity[vi] = 0\n \n if leader[vi] == -1: # if vi has no leader\n leader[vi] = vi # if no leader found, leader defaults to self\n anonymity[vi] = 1 # if self is leader then anonymity set = 1 (assume anon set = size of group)\n for di in range (vi): # find lowest-numbered vehicle di < vi that is already a group leader\n if leader[di] == di: # if lower numbered vehicle is already a group leader (leads itself)\n cur_dist = math.sqrt((x[di]-x[vi])**2+(y[di]-y[vi])**2) # compute euclidean distance\n if cur_dist < comrange: # if in comrange\n leader[vi] = di # set leader\n anonymity[leader[vi]] += 1 # increment anonymity set of leader\n # anonymity set of this follower will be updated later, in Step 1.c.\n break # break out of \"for\" loop: stop looking for more leaders for this vi\n if spcp == 1:\n if leader[vi] == vi:\n g_timeout.append(1+random.randint(1,1000)) # set group timeout\n else:\n g_timeout.append(0) # set group timeout 0 for non-leaders\n v_timeout[vi] = g_timeout[leader[vi]]\n \n # ------------------------------------------------------------------------ #\n # Step 1.c. Initialize anonymity set values for all vehicles\n # ------------------------------------------------------------------------ #\n for ti in range(1): \n for vi in range(v):\n anonymity[vi] = anonymity[leader[vi]] # vehicle's anonymity set size equals leader's group size\n\n # ------------------------------------------------------------------------ #\n # SECTION 2: VEHICLE MOVEMENTS -- INCLUDES GROUP LEADER CHANGES, TIMEOUTS,\n # ANONYMITY SET (A.S.) CHANGES, PARROTING CHANGES, AND PARROT A.S. CHANGES\n # ------------------------------------------------------------------------ #\n\n for ti in range(t): # move vehicles to nearby coordinates on road grid\n\n total_pirates = 0\n pirate_sizes = 0\n normal_sizes = 0\n pirate_entropy = 0\n \n # -------------------------------------------------------------------- # \n # Decrement timeout for all vehicles and parrots\n # -------------------------------------------------------------------- #\n for vi in range(v):\n v_timeout[vi] -= 1\n if v_timeout[vi] == 0: # if vehicle pseudo-id times out, reset timer\n v_timeout[vi] = random.randint(1,2000)\n if p_timeout[vi] != -1:\n p_timeout[vi] -= 1\n if p_timeout[vi] == 0: \n p_timeout[vi] = -1 # end parroting\n parroted_id[vi] = -1\n \n # -------------------------------------------------------------------- #\n # Initialize accumulators for output\n # -------------------------------------------------------------------- #\n entropy = 0.0\n as1 = 0.0\n cum_anonymity = 0 # initialize anonymity set accumulator\n # cum_anonymity is used to calculate the overall A. S. size at end of program\n cum_parrotees = 0\n cum_parroters = 0\n cum_p_anonymity = 0\n parrot_counter = 0\n \n # -------------------------------------------------------------------- #\n # Step 2.a. Vehicle movements - randomize vehicles as they traverse the road grid\n # -------------------------------------------------------------------- #\n for vi in range(v):\n new_group_leader = 0\n xprior[vi] = x[vi]\n yprior[vi] = y[vi]\n increment = random.randint(0,topspeed*comfreq/1000)\n if dflag[vi] == 0: # if vehicle is on vertical road stay vertical\n x[vi] = x[vi]\n if y[vi] + ydir[vi] * increment > ymax:\n y[vi] = ymax\n ydir[vi] = - ydir[vi]\n elif y[vi] + ydir[vi] * increment < 0:\n y[vi] = 0\n ydir[vi] = - ydir[vi]\n else:\n y[vi] = y[vi] + ydir[vi] * increment\n \n nextstreet = 0\n step = 0\n while y[vi] >= nextstreet:\n nextstreet = (1.305897**(step/100))-1\n step += 100\n \n if nextstreet - y[vi] < 10: # if close to horizontal\n y[vi] = nextstreet # then switch to vertical next time\n x[vi] = x[vi] - 1\n dflag[vi] = 1\n if x[vi] < 0:\n x[vi] = 1\n \n else:\n y[vi] = y[vi]\n if x[vi] + xdir[vi] * increment > xmax:\n x[vi] = xmax\n xdir[vi] = - xdir[vi]\n elif x[vi] + xdir[vi] * increment < 0:\n x[vi] = 0\n xdir[vi] = - xdir[vi]\n else:\n x[vi] = x[vi] + xdir[vi] * increment\n\n nextstreet = 0\n step = 0\n while x[vi] >= nextstreet:\n nextstreet = (1.305897**(step/100))-1\n step += 100\n \n if nextstreet - x[vi] < 10: # if close to horizontal\n x[vi] = nextstreet # then switch to vertical next time\n y[vi] = y[vi] - 1\n dflag[vi] = 0\n if y[vi] < 0:\n y[vi] = 1\n \n # ---------------------------------------------------------------- #\n # Step 2.b. Group Leader updates - vehicles change group leaders\n # depending on transmission range (default comrange = 300m)\n # ---------------------------------------------------------------- #\n cur_dist = comrange + 1 # no distance set yet\n\n if leader[vi] > -1: # if this vehicle already has a group leader\n # check if group leader in still in communications range\n cur_dist = math.sqrt((x[leader[vi]]-x[vi])**2+(y[leader[vi]]-y[vi])**2)\n # if leader is still in commuications range, do nothing for this vi\n if cur_dist > comrange: # if group leader is out of range\n anonymity[leader[vi]] -= 1 # decrement anonymity set for leader\n leader[vi] = -1 # establish that vi has no leader\n anonymity[vi] = 0\n if spcp == 1:\n g_timeout[vi] = 1+random.randint(1,1000) # set group timeout for spcp\n \n if leader[vi] == -1: # if vi has no leader\n leader[vi] = vi # if no leader found, leader defaults to self\n anonymity[vi] = 1 # if self is leader then anonymity set = 1 (assume anon set = size of group)\n for di in range (vi): # find lowest-numbered vehicle di < vi that is already a group leader\n if leader[di] == di: # if lower numbered vehicle is already a group leader (leads itself)\n cur_dist = math.sqrt((x[di]-x[vi])**2+(y[di]-y[vi])**2) # compute euclidean distance\n if cur_dist < comrange: # if in comrange\n leader[vi] = di # set leader\n anonymity[leader[vi]] += 1 # increment anonymity set of leader\n # anonymity set of this follower will be updated later, in Step 2.c.\n\n break # break out of \"for\" loop: stop looking for more leaders for this vi\n if spcp == 1:\n if leader[vi] == vi:\n g_timeout[vi] -= g_timeout[vi] # set group timeout\n if g_timeout < 0:\n exit(1)\n else:\n g_timeout.append(0) # set group timeout 0 for non-leaders\n v_timeout[vi] = g_timeout[leader[vi]] # v_timeout decrements with g_timeout\n\n # ---------------------------------------------------------------- #\n # Step 2.c. Update anonymity set (A. S.) \n # ---------------------------------------------------------------- #\n anonymity[vi] = anonymity[leader[vi]] # vehicle's anonymity set size equals leader's group size\n\n # ---------------------------------------------------------------- #\n # Step 2.d. Update parroting status\n # parrots identified while in same group, though\n # parroting only occurs when parroter NOT in same group as parrotee\n # parroter parrots only one parrotee at any given time\n # max parrots = v, the number of vehicles\n # This simulation assume a parrot parrots only ONE other vehicle\n # ---------------------------------------------------------------- #\n # Find a parrot... vi is parroter and pi is parrotee\n for pi in range (v): # search all vehicles\n if leader[pi] == leader[vi]: # find vehicle in same group\n if pi != vi: # but not self same vehicle\n if parrotee[pi] == 1: # pi wants a parrot\n if parroter[vi] == 1: # vi wants to be a parrot\n if parroted_id[vi] == -1: # no parrot set yet\n parroted_id[vi] = pi # vi parrots for pi\n p_timeout[vi] = v_timeout[parroted_id[vi]]\n # parroting times out when parrotee's pseudoid times out\n p_anonymity[parroted_id[vi]] += 1 #gpc\n parrot_counter += 1\n break # get out of \"for\" loop\n \n # ---------------------------------------------------------------- #\n # Step 2.e. Update all parrot anonymity sets (P. A. S.) \n # ---------------------------------------------------------------- #\n # what if two vehicles in same group parroting same parrotee? IT could happen...\n for pi in range(v): # parrotee's p_anonymity equals sum of all parrots' group sizes\n p_anonymity[pi]= 0\n for pi in range(v):\n if parroted_id[pi] > -1:\n p_anonymity[parroted_id[pi]] += anonymity[pi] \n\n # -------------------------------------------------------------------- #\n # Increment accumulators for output\n # -------------------------------------------------------------------- #\n #if leader[vi] == vi:\n #temp_entropy = 1 / (anonymity[vi]+p_anonymity[vi]+.0000000000001)\n #temp_entropy_log = 0.0\n #temp_entropy_log = math.log(temp_entropy,2) # log of fraction is negative\n #entropy = entropy - temp_entropy_log\n entropy = entropy + math.log(anonymity[vi]+p_anonymity[vi],2) # see eq p.101\n if anonymity[vi] + p_anonymity[vi] == 1: # as1 ia number of cars with as=1\n as1 = as1 + 1\n cum_anonymity += anonymity[vi] # add this vi's A. S. to the total A. S.\n cum_parrotees += parrotee[vi]\n cum_parroters += parroter[vi]\n cum_p_anonymity += p_anonymity[vi]\n if parroted_id[vi] > -1:\n parrot_counter += 1\n\n normal_sizes += (p_anonymity[vi] + anonymity[vi])\n isparroted = 0\n for (check) in range(v):\n if parroted_id[check] == vi:\n isparroted = 1\n\n if isparroted == 1:\n pirate_sizes = pirate_sizes + (p_anonymity[vi] + anonymity[vi])\n pirate_entropy = pirate_entropy + math.log(anonymity[vi]+p_anonymity[vi],2)\n total_pirates += 1\n \n if ((ti+1) % 100) == 0: # on last iteration print output\n ret_list = list()\n ret_list.append(t) # total time slices\n ret_list.append(v) # total vehicles\n ret_list.append(cum_anonymity) # sum(AS): sum of all anonymity set values\n ret_list.append(parrotee_percent) # PEP: parrotee percent\n ret_list.append(parroter_percent) # PRP: parroter percent\n ret_list.append(cum_parrotees) # sum(PR): number of potetial parrotees\n ret_list.append(cum_parroters) # sum(PR): number of potetial parroters\n ret_list.append(cum_p_anonymity) # sum(PAS): sum of AS's of active parroters\n ret_list.append(parrot_counter) # count(PAS): number of active parroters\n ret_list.append(time.clock()) # timestamp\n ret_list.append(as1) # count of vehicles with as size = 1\n ret_list.append(as1 / v) # tracking probability\n ret_list.append(entropy)\n ret_list.append(entropy / v)\n # print \"t, v, cum_anon, ptee_pct, pter_pct, cum_ptees, cum_pters, cum_p_anon, pter_actual, time\"\n\n minass = v + 1 #calculation of a.s.s range\n maxass = 0\n for carz in range(v):\n if anonymity[carz] > maxass:\n maxass = anonymity[carz]\n if anonymity[carz] < minass:\n minass = anonymity[carz]\n\n if total_pirates == 0:\n total_pirates = 1\n \n ret_list.append(v) # active cars at last time step\n ret_list.append(as1 / v) # tracking probability\n ret_list.append(entropy / v) # entropy\n ret_list.append(cum_anonymity) # sum(AS): sum of all anonymity set values\n ret_list.append(float(cum_anonymity+cum_p_anonymity) / float(v)) # average AS size\n ret_list.append(minass)\n ret_list.append(maxass)\n ret_list.append(maxass-minass)\n ret_list.append(normal_sizes/v)\n ret_list.append(pirate_sizes/total_pirates)\n ret_list.append(total_pirates)\n ret_list.append(pirate_entropy/total_pirates)\n\n print (ret_list)\n\n print (\"TIME\" , ti, \"COMPLETE\")\n \n return ret_list # end of Section 2\n\n# ----------------------------------------------------------------------------- #\n# Main\n# ----------------------------------------------------------------------------- #\nprint (PARROTS(2000,400,100,50))\nprint (\"###############################################\")\nprint (PARROTS(2000,800,100,50))\nprint (\"###############################################\")\nprint (PARROTS(2000,1200,100,50))\nprint (\"###############################################\")\nprint (PARROTS(2000,1600,100,50))\nprint (\"###############################################\")\nprint (PARROTS(2000,2000,100,50))\nprint (\"###############################################\")\nprint (PARROTS(2000,2400,100,50))\nprint (\"###############################################\")\nprint (PARROTS(2000,2800,100,50))\nprint (\"###############################################\")\nprint (PARROTS(2000,3200,100,50))\nprint (\"###############################################\")\nprint (PARROTS(2000,3600,100,50))\nprint (\"###############################################\")\nprint (PARROTS(2000,4000,100,50))\n","sub_path":"2nd Round Testing/expparrots - piratesets.py","file_name":"expparrots - piratesets.py","file_ext":"py","file_size_in_byte":23259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"166159654","text":"import faker\nimport requests, json\nfrom datetime import datetime\nimport re\nfrom tiktok_downloader.Except import InvalidUrl\nfrom faker import Faker\nfake = Faker()\nclass info_post(requests.Session):\n '''\n :param url: video url(tiktok)\n '''\n def __init__(self, url: str) -> None:\n super().__init__()\n self.headers={\"sec-ch-ua\": '\"Google Chrome\";v=\"89\", \"Chromium\";v=\"89\", \";Not A Brand\";v=\"99\"',\"sec-ch-ua-mobile\": \"?0\",\"sec-ch-ua-platform\": \"Linux\",\"sec-fetch-dest\": \"document\",\"sec-fetch-mode\": \"navigate\",\"sec-fetch-site\": \"none\",\"sec-fetch-user\": \"?1\",\"upgrade-insecure-requests\": \"1\",\"user-agent\":\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36\", \"X-Forwarded-For\":fake.ipv4()}\n self.html = self.get(url)\n self.js = json.loads(re.search(r'\\>(\\{\\\"props\\\":.*?)\\<\\/script>',self.html.text).group(1))\n self.account = Account(self.js['props']['pageProps']['itemInfo']['itemStruct']['author'])\n self.video = self.js['props']['pageProps']['itemInfo']['itemStruct']\n self.cover = self.video['video']['cover']\n self.music = self.video['music']['title']\n self.caption = self.video['desc']\n self.create = datetime.fromtimestamp(self.video['createTime'])\n self.url = url\n self.id, self.height, self.width, self.duration, self.ratio,self.bitrate = self.video['video']['id'], self.video['video']['height'],self.video['video']['width'],self.video['video']['duration'],self.video['video']['ratio'],self.video['video']['bitrate']\n self.tt_csrf_token=self.js['query']['$initialProps']['$csrfToken']\n self.aftercsrf=self.js['query']['$initialProps']['$encryptedWebid']\n self.tt_webid_v2=self.js['query']['$initialProps']['$logId']\n self.headers.update({'Cookie':f'tt_webid_v2={self.tt_webid_v2}; tt_csrf_token={self.tt_csrf_token}; {self.aftercsrf}'})\n def __str__(self) -> str:\n return f\"<(ID:{self.id})>\"\n def __repr__(self) -> str:\n return self.__str__()\n\nclass Account:\n def __init__(self, js:dict) -> None:\n self.avatar = js['avatarThumb']\n self.username = js['uniqueId']\n self.nickname = js['nickname']\n self.signature = js['signature']\n self.create = datetime.fromtimestamp(js['createTime'])\n self.verified = js['verified']\n self.private = js[\"privateAccount\"]\n def __repr__(self) -> str:\n return f\"<(OWNER:{self.username} VERIFIED:{self.verified})>\"\n def __str__(self) -> str:\n return self.__repr__()\n","sub_path":"tiktok_downloader/scrapper.py","file_name":"scrapper.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"327195072","text":"\r\nimport itertools\r\n\r\nfrom typing import (\r\n Iterable,\r\n Iterator,\r\n TypeVar,\r\n)\r\n\r\n\r\nT = TypeVar(\"T\")\r\n\r\ndef endless_iter(iterable: Iterable[T]) -> Iterator[T]:\r\n \"\"\"Generator that endlessly yields elements from iterable.\r\n If any call to `iter(iterable)` has no elements, then this function raises\r\n ValueError.\r\n >>> x = range(2)\r\n >>> it = endless_iter(x)\r\n >>> next(it)\r\n 0\r\n >>> next(it)\r\n 1\r\n >>> next(it)\r\n 0\r\n \"\"\"\r\n try:\r\n next(iter(iterable))\r\n except StopIteration:\r\n err = ValueError(f\"iterable {iterable} had no elements to iterate over.\")\r\n raise err\r\n\r\n return itertools.chain.from_iterable(itertools.repeat(iterable))","sub_path":"Hiwi-Own/data/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"603975464","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2021 Technische Universität Graz\n#\n# invenio-rdm-pure is free software; you can redistribute it and/or modify it\n# under the terms of the MIT License; see LICENSE file for more details.\n\n\"\"\"Module for creating Pure import file.\"\"\"\n\nimport datetime\nimport json\nimport os\nfrom os.path import dirname\nfrom pathlib import Path\nfrom xml.dom import minidom\nfrom xml.etree import ElementTree as ET\n\nimport requests\nfrom flask import current_app\n\nfrom .utils import get_value\n\nPURE_DATASET_NAMESPACE = \"v1.dataset.pure.atira.dk\"\nPURE_COMMONS_NAMESPACE = \"v3.commons.pure.atira.dk\"\n\n\ndef create_pure_import_file(pure_import_file_path: str) -> None:\n \"\"\"Create Pure Import file.\"\"\"\n page = 1\n next_page = True\n root = create_xml()\n\n while next_page:\n # Get RDM records by page\n records = get_rdm_records_metadata(page)\n if not records:\n if os.path.isfile(pure_import_file_path):\n current_app.logger.info(\"Task correctly finished\\n\")\n else:\n current_app.logger.error(\"Task ended - No xml file created\\n\")\n return\n page += 1\n next_page = process_data(records, root)\n write_xml_to_file(root, pure_import_file_path)\n\n\ndef create_xml() -> ET.Element:\n \"\"\"Creates the xml which will be imported in pure.\"\"\"\n ET.register_namespace(\"v1\", PURE_DATASET_NAMESPACE)\n ET.register_namespace(\"v3\", PURE_COMMONS_NAMESPACE)\n\n root = ET.Element(\"{%s}datasets\" % PURE_DATASET_NAMESPACE)\n return root\n\n\ndef process_data(records, root) -> bool:\n \"\"\"Creates the xml file that will be imported in pure.\"\"\"\n for record in records:\n item_metadata = record[\"metadata\"]\n\n # If the rdm record has a uuid means that it was imported from pure - REVIEW\n if \"uuid\" in item_metadata:\n continue\n\n # Checks if the record was created today\n if record[\"created\"] <= datetime.today().strftime(\"%Y-%m-%d\"):\n return False\n\n # Adds fields to the created xml element\n populate_xml(item_metadata, record, root)\n return True\n\n\ndef populate_xml(item, record, root) -> None:\n \"\"\"Description.\"\"\"\n # Dataset element\n body = ET.SubElement(root, \"{%s}dataset\" % PURE_DATASET_NAMESPACE)\n body.set(\"type\", \"dataset\")\n\n # Title (mandatory field)\n value = get_value(item, [\"titles\", 0, \"title\"])\n if not value:\n return False\n sub_element(body, PURE_DATASET_NAMESPACE, \"title\").text = value\n\n # Managing organisation (mandatory field)\n organisational_unit = sub_element(\n body, PURE_DATASET_NAMESPACE, \"managingOrganisation\"\n )\n add_attribute(\n item,\n organisational_unit,\n \"lookupId\",\n [\"managingOrganisationalUnit_externalId\"],\n )\n\n # Persons (mandatory field)\n add_persons(body, item)\n\n # Available date (mandatory field)\n date = sub_element(body, PURE_DATASET_NAMESPACE, \"availableDate\")\n sub_date = sub_element(date, PURE_COMMONS_NAMESPACE, \"year\")\n sub_date.text = get_value(item, [\"publication_date\"])\n\n # Publisher (mandatory field)\n add_publisher(body, item)\n\n # Description\n value = get_value(item, [\"abstract\"])\n value = \"test description\"\n if value:\n descriptions = sub_element(body, PURE_DATASET_NAMESPACE, \"descriptions\")\n description = sub_element(descriptions, PURE_DATASET_NAMESPACE, \"description\")\n description.set(\"type\", \"datasetdescription\")\n description.text = value\n\n # Links\n add_links(body, record)\n\n # Organisations\n add_organisations(body, item)\n\n\ndef add_publisher(body, item) -> None:\n \"\"\"Add publisher attribute.\"\"\"\n publisher_name = get_value(item, [\"publisherName\"])\n publisher_uuid = get_value(item, [\"publisherUuid\"])\n publisher_type = get_value(item, [\"publisherType\"])\n\n if not publisher_uuid:\n return\n\n publisher = sub_element(body, PURE_DATASET_NAMESPACE, \"publisher\")\n publisher.set(\"lookupId\", publisher_uuid)\n sub_element(publisher, PURE_DATASET_NAMESPACE, \"name\").text = (\n publisher_name if publisher_name else \"\"\n )\n sub_element(publisher, PURE_DATASET_NAMESPACE, \"type\").text = (\n publisher_type if publisher_type else \"\"\n )\n\n\ndef add_organisations(body, item) -> None:\n \"\"\"Add organisations.\"\"\"\n if \"organisationalUnits\" not in item:\n return False\n organisations = sub_element(body, PURE_DATASET_NAMESPACE, \"organisations\")\n\n for unit_data in item[\"organisationalUnits\"]:\n \"\"\"\n Pure dataset documentation:\n Can be both an internal and external organisation, use origin to enforce either internal or external.\n If the organisation is an internal organisation in Pure, then the lookupId attribute must be used.\n If the organisation is an external organisation and id is given, the matching will be done on the id,\n if not found mathching will be done on name, if still not found then an external\n organisation with the specified id and organisation will be created.\n \"\"\"\n organisation = sub_element(\n organisations, PURE_DATASET_NAMESPACE, \"organisation\"\n )\n add_attribute(unit_data, organisation, \"lookupId\", [\"externalId\"])\n name = sub_element(organisation, PURE_DATASET_NAMESPACE, \"name\")\n name.text = get_value(unit_data, [\"name\"])\n\n\ndef add_persons(body, item) -> None:\n \"\"\"Add persons.\"\"\"\n persons = sub_element(body, PURE_DATASET_NAMESPACE, \"persons\")\n\n for person_data in item[\"creators\"]:\n person = sub_element(persons, PURE_DATASET_NAMESPACE, \"person\")\n person.set(\"contactPerson\", \"true\")\n add_attribute(person_data, person, \"id\", [\"identifiers\", \"uuid\"])\n # External id\n person_id = sub_element(person, PURE_DATASET_NAMESPACE, \"person\")\n add_attribute(person_data, person_id, \"lookupId\", [\"identifiers\", \"externalId\"])\n # Role\n role = sub_element(person, PURE_DATASET_NAMESPACE, \"role\")\n role.text = get_value(person_data, [\"pure_personRole\"])\n # Name\n name = sub_element(person, PURE_DATASET_NAMESPACE, \"name\")\n name.text = get_value(person_data, [\"name\"])\n\n\ndef add_links(body, record) -> None:\n \"\"\"Adds relative links for RDM files and api.\"\"\"\n link_files = get_value(record, [\"links\", \"files\"])\n link_self = get_value(record, [\"links\", \"self\"])\n recid = get_value(record, [\"id\"])\n if link_files or link_self:\n links = sub_element(body, PURE_DATASET_NAMESPACE, \"links\")\n # Files\n if link_files:\n link = sub_element(links, PURE_DATASET_NAMESPACE, \"link\")\n link.set(\"id\", \"link_files\")\n sub_element(link, PURE_DATASET_NAMESPACE, \"url\").text = link_files\n sub_element(\n link, PURE_DATASET_NAMESPACE, \"description\"\n ).text = \"Link to record files\"\n # Self\n if link_self:\n link = sub_element(links, PURE_DATASET_NAMESPACE, \"link\")\n link.set(\"id\", \"link_self\")\n url = sub_element(link, PURE_DATASET_NAMESPACE, \"url\").text = link_self\n sub_element(\n link, PURE_DATASET_NAMESPACE, \"description\"\n ).text = \"Link to record API\"\n\n\ndef write_xml_to_file(root, pure_import_file_path: str) -> None:\n \"\"\"Write the xml to a file.\"\"\"\n pure_import_dir = dirname(pure_import_file_path)\n Path(f\"{pure_import_dir}\").mkdir(parents=True, exist_ok=True)\n\n # Wrap it in an ElementTree instance and save as XML\n xml_str = minidom.parseString(ET.tostring(root)).toprettyxml(indent=(4 * \" \"))\n open(pure_import_file_path, \"w\").write(xml_str)\n\n\ndef sub_element(element, namespace: str, sub_element_name: str) -> None:\n \"\"\"Adds the the xml a sub element.\"\"\"\n return ET.SubElement(element, \"{%s}%s\" % (namespace, sub_element_name))\n\n\ndef add_attribute(item: object, sub_element, attribute: str, value_path: list) -> None:\n \"\"\"Gets from the rdm response a value and adds it as attribute to a given xml element.\"\"\"\n value = get_value(item, value_path)\n if value:\n sub_element.set(attribute, value)\n\n\ndef add_text(item: object, sub_element: object, path) -> None:\n \"\"\"Gets from the rdm response a value and adds it as text to a given xml element.\"\"\"\n sub_element.text = get_value(item, path)\n\n\ndef get_rdm_records_metadata(page: int, page_size=50) -> dict:\n \"\"\"Requests to rdm records metadata by page.\"\"\"\n \"\"\"\n # TODO: replace REST with internal API\n from invenio_search import RecordsSearch, current_search_client\n\n class ExampleSearch(RecordsSearch):\n class Meta:\n index = \"marc21records-marc21\" # Search alias of marc21 records\n fields = (\"*\",)\n facets = {}\n\n search = ExampleSearch()\n document = current_search_client.get() # TODO: sort them most-recent-first if not standard\n \"\"\"\n parameters = {\"sort\": \"mostrecent\", \"size\": page_size, \"page\": page}\n response = get_metadata(parameters)\n\n if response.status_code >= 300:\n return None\n\n # Load response\n hits = json.loads(response.content)[\"hits\"][\"hits\"]\n return hits if hits else None\n\n\ndef get_metadata(additional_parameters: dict, recid: str = \"\") -> requests.Response:\n \"\"\"Retrieves metadata from Invenio via its REST API.\"\"\"\n headers = dict()\n headers[\"Content-Type\"] = \"application/json\"\n params = ((\"prettyprint\", \"1\"),)\n if not recid:\n url = str(current_app.config.get(\"INVENIO_PURE_RECORDS_URL\"))\n else:\n rdm_record_url: str = str(current_app.config.get(\"INVENIO_PURE_RECORD_URL\"))\n url = rdm_record_url.format(recid)\n\n # Add parameters to url\n if len(additional_parameters) > 0:\n url += \"?\"\n for key in additional_parameters:\n url += f\"{key}={additional_parameters[key]}&\"\n # Remove last character\n url = url[:-1]\n\n # Sending request\n response = requests.get(url, headers=headers, params=params, verify=False)\n\n return response\n","sub_path":"invenio_rdm_pure/pure/import_records.py","file_name":"import_records.py","file_ext":"py","file_size_in_byte":10122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"478179294","text":"def squareNoNo(n):\n p = 0\n for i in range(1, int(n ** 0.5) + 1, 1):\n p += mobius[i] * (n // (i * i))\n return p\nl, r = 0, 2000000000\nmobius = [0] * 1000001\nk = int(input())\nmobius[1] = 1\nfor i in range(1, 1000001):\n if mobius[i]:\n for j in range(i * 2, 1000001, i):\n mobius[j] -= mobius[i]\nwhile l < r - 1:\n mid = (l + r) // 2\n if squareNoNo(mid) < k:\n l = mid\n else:\n r = mid\nprint(r)","sub_path":"BOJ/이분탐색/1557 제곱 ㄴㄴ/1557제곱ㄴㄴ.py","file_name":"1557제곱ㄴㄴ.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"605254885","text":"import os\r\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'odroidDjangoProject.settings')\r\n\r\nimport django\r\ndjango.setup()\r\n\r\nfrom lac.models import Match\r\nimport sqlite3\r\n\r\ndef add_match(*args):\r\n matchId, p1, p2, p3, p4, p5, p6, p7, p8, p9 = args\r\n \r\n m = Match.objects.get_or_create(matchId=matchId)[0]\r\n m.participant1 = p1\r\n m.participant2 = p2\r\n m.participant3 = p3\r\n m.participant4 = p4\r\n m.participant5 = p5\r\n m.participant6 = p6\r\n m.participant7 = p7\r\n m.participant8 = p8\r\n m.participant9 = p9\r\n return m\r\n \r\ndef populate():\r\n conn = sqlite3.connect('/home/odroid/odroid_project/odroidDjangoProject/lac/matchesDB.db')\r\n c = conn.cursor()\r\n s = c.execute('SELECT * from matches').fetchall()\r\n return s\r\n \r\ndef add_performers():\r\n from oPlayer.models import Performer\r\n from django.db import connection\r\n cursor = connection.cursor()\r\n cursor.execute('SELECT DISTINCT performer from oplayer_song')\r\n performers = cursor.fetchall()\r\n for i in performers:\r\n p = Performer.objects.get_or_create(name=i[0])[0]\r\n p.save()\r\n \r\ndef add_albums():\r\n from oPlayer.models import Album, Performer\r\n from django.db import connection\r\n cursor = connection.cursor()\r\n cursor.execute('select distinct album, performer from oplayer_song')\r\n albums = cursor.fetchall()\r\n for i in albums:\r\n a = Album.objects.get_or_create(name=i[0], performer=Performer.objects.get(name=i[1]))[0]\r\n a.save()\r\n \r\ndef add_tracks():\r\n import datetime\r\n dt1 = datetime.datetime.now()\r\n from oPlayer.models import Track, Album, Performer\r\n from django.db import connection\r\n cursor = connection.cursor()\r\n cursor.execute('select songName, album, performer from oplayer_song')\r\n tracks = cursor.fetchall()\r\n dt2 = dt1\r\n \r\n for i in tracks:\r\n print(i[1])\r\n albums = Album.objects.filter(name=i[1])\r\n performer = Performer.objects.get(name=i[2])\r\n for album in albums:\r\n t = Track.objects.get_or_create(songName=i[0], album=album, performer=performer)[0]\r\n t.save()\r\n dt2 = datetime.datetime.now() - dt2\r\n dt3 = datetime.datetime.now() - dt1\r\n print(dt2)","sub_path":"odroidDjangoProject/tezt.py","file_name":"tezt.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"469112250","text":"'''\r\nCreated on 26 Mar 2017\r\n\r\n@author: Daniele\r\n'''\r\nimport random\r\n\r\ndef merge(l1, l2):\r\n '''Merges two sorted arrays'''\r\n l = []\r\n while len(l1) != 0 and len(l2) != 0:\r\n if l1[0] <= l2[0]:\r\n l.append(l1.pop(0))\r\n else:\r\n l.append(l2.pop(0))\r\n\r\n # This will add any remaining elements from either of the arrays (in the case of two uneven halves).\r\n while len(l1) != 0:\r\n l.append(l1.pop(0))\r\n\r\n while len(l2) != 0:\r\n l.append(l2.pop(0))\r\n return l\r\n\r\ndef merge_sort(A):\r\n '''Recursive merge sort'''\r\n A1 = []\r\n A2 = []\r\n # Eventually we get down to one element in each half\r\n if len(A) > 1:\r\n for j in range(int(len(A)/2)):\r\n A1.append(A[j])\r\n for j in range(int(len(A)/2), len(A)):\r\n A2.append(A[j])\r\n\r\n # Sort each half of the array\r\n A1 = merge_sort(A1)\r\n A2 = merge_sort(A2)\r\n # Merge both halves\r\n A = merge(A1,A2)\r\n return A\r\n\r\ndef merge_sort_alt(A):\r\n '''A more 'Pythonic' way of doing things' '''\r\n # Eventually we get down to one element in each half\r\n if len(A) > 1:\r\n # Split array to two halves\r\n A1 = A[:int(len(A)/2)]\r\n A2 = A[int(len(A)/2):]\r\n\r\n # Sort each half of the array\r\n A1 = merge_sort_alt(A1)\r\n A2 = merge_sort_alt(A2)\r\n # Merge both halves\r\n A = merge(A1,A2)\r\n return A\r\n\r\n\r\n# Quick check to see if our values are sorted!\r\nvalues = [random.randint(0, 1000) for i in range(30)]\r\nprint('Unsorted values', values)\r\nprint('Sorted', merge_sort(values))\r\nprint('Sorted (version 2)', merge_sort(values))\r\n\r\n\r\n","sub_path":"src/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"301355129","text":"from django.db import models\n\nfrom commons.constants import PRIORITY\n\n\nclass Organization(models.Model):\n name = models.CharField(max_length=256)\n logo = models.FileField(upload_to='organizations/organization/')\n url = models.URLField()\n email = models.EmailField(blank=True)\n\n def __str__(self):\n return self.name\n\n def joint_organizations(self):\n return [\n m.organization for m in self.joint_memberships.\n order_by('-amount', 'order', 'organization__name')\n ]\n\n\nclass OrganizationRole(models.Model):\n # Sponsor, Collaborator, Organizer, ...\n name = models.CharField(max_length=256)\n order = models.PositiveIntegerField(\n choices=PRIORITY.CHOICES,\n default=PRIORITY.MEDIUM\n )\n code = models.CharField(max_length=32, unique=True)\n description = models.TextField(blank=True)\n logo = models.FileField(\n upload_to='organizations/organization_role/',\n blank=True\n )\n display_name = models.CharField(max_length=256, blank=True)\n\n def __str__(self):\n return self.display_name or self.name\n\n\nclass OrganizationCategory(models.Model):\n # Jade sponsor, Zafiro sponsor, Diamante sponsor, organizer, ...\n name = models.CharField(max_length=256)\n order = models.PositiveIntegerField(\n choices=PRIORITY.CHOICES,\n default=PRIORITY.MEDIUM\n )\n code = models.CharField(max_length=32, unique=True)\n role = models.ForeignKey(\n OrganizationRole,\n on_delete=models.PROTECT,\n related_name='organization_categories'\n )\n description = models.TextField(blank=True)\n display_name = models.CharField(max_length=256, blank=True)\n\n def __str__(self):\n return self.display_name or self.name\n\n class Meta:\n verbose_name_plural = \"organization categories\"\n\n def organizations(self, exclude_joint_organizations=True):\n memberships = self.memberships.order_by(\n '-amount', 'order', 'organization__name')\n if exclude_joint_organizations:\n memberships = memberships.exclude(joint_organization__isnull=False)\n return [m.organization for m in memberships]\n\n\nclass Membership(models.Model):\n event = models.ForeignKey(\n 'events.Event',\n on_delete=models.PROTECT,\n related_name='memberships'\n )\n organization = models.ForeignKey(\n Organization,\n on_delete=models.PROTECT,\n related_name='memberships'\n )\n category = models.ForeignKey(\n OrganizationCategory,\n on_delete=models.PROTECT,\n related_name='memberships'\n )\n amount = models.DecimalField(max_digits=9, decimal_places=2, default=0)\n description = models.TextField(blank=True)\n order = models.PositiveIntegerField(\n choices=PRIORITY.CHOICES,\n default=PRIORITY.MEDIUM\n )\n management_email = models.EmailField(\n blank=True,\n help_text='Management email of the organization used during the event'\n )\n joint_organization = models.ForeignKey(\n Organization,\n on_delete=models.PROTECT,\n related_name='joint_memberships',\n help_text='Organizations joint with other organizations',\n blank=True,\n null=True\n )\n\n def __str__(self):\n return \"{} {} {}\".format(self.organization, self.category, self.amount)\n\n def get_email(self):\n return self.management_email or self.organization.email\n","sub_path":"organizations/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"308123419","text":"\"\"\"empty message\n\nRevision ID: cfce4d1efeed\nRevises: 466047f04164\nCreate Date: 2019-02-25 02:15:49.091155\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'cfce4d1efeed'\ndown_revision = '466047f04164'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('assqt_checked',\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('assqt_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['assqt_id'], ['assqt.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('user_id', 'assqt_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('assqt_checked')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/cfce4d1efeed_.py","file_name":"cfce4d1efeed_.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"449983060","text":"import os\nimport webapp2\nimport jinja2\nimport datetime\n\n# JINJA TEMPLATE INITIALIZER\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'templates')\njinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape = True)\n\n# WRAPPER CLASS TO HANDLE REQUESTS USING TEMPLATES\nclass Handler(webapp2.RequestHandler):\n\tdef write(self, *a, **params):\n\t\tself.response.out.write(*a, **params)\n\t\t\n\tdef render(self, input_template, **params):\n\t\ttemplate = jinja_env.get_template(input_template)\n\t\tself.write(template.render(**params))\n\nclass UserPage(Handler):\n\t# RENDER THIS PAGE\n\tdef render_page(self):\n\t\tself.render('main.html')\n\t\t\n\tdef get(self):\n\t\t# GET COOKIES\n\t\tsite_cookies_dict = self.request.cookies\n\t\t\n\t\tif site_cookies_dict:\n\t\t\tself.write('Hey there, today we have these cookies: ' + str(self.request.cookies))\n\t\telse:\n\t\t\tself.render_page()\n\t\t\t\n\tdef post(self):\n\t\tcookie_val = self.request.get('cookie_val')\n\t\tself.response.set_cookie('cookie_val', cookie_val)\n\t\tself.redirect('/')\n\t\t\n# WEB APP PATH MAPPINGS\t\t\napp = webapp2.WSGIApplication( [ ('/', UserPage) ],\n\t\t\t\t\t\t\t\tdebug=True)","sub_path":"cookies/cookies.py","file_name":"cookies.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"467171304","text":"import time\r\nimport math\r\n\r\ndef twelve():\r\n\t# def triangleNumber( value ):\r\n\t# \t# The variable to store the answer.\r\n\t# \tret_val = 0\r\n\r\n\t# \t# Keep adding values.\r\n\t# \tfor i in range( 0, value + 1 ):\r\n\t# \t\tret_val += i\r\n\r\n\t# \t# Return the triangle value.\r\n\t# \treturn ret_val\r\n\r\n\tdef divisorCount( value ):\r\n\t\t# If the value is 1, then there is only 1.\r\n\t\tif value == 1:\r\n\t\t\treturn 1\r\n\r\n\t\t# If the value is less than 1, then invalid.\r\n\t\tif value < 1:\r\n\t\t\treturn 0\r\n\r\n\t\t# The variable to store the number of divisors.\r\n\t\tret_val = 0\r\n\r\n\t\t# The largest divisor is the sqare root, other than the value itself.\r\n\t\tmax_value = (int)(math.floor( math.sqrt( value ) ))\r\n\r\n\t\t# When the square root is an integer, that value is a divisor.\r\n\t\tif math.floor( math.sqrt( value ) ) == math.ceil( math.sqrt( value ) ):\r\n\t\t\tret_val += 1\r\n\r\n\t\t# Run a loop that spans from 2 to the square root.\r\n\t\tfor i in range( 2, max_value ):\r\n\t\t\t# Since if a result is found, that implies there is a value greater\r\n\t\t\t# than the square root that is a divisor.\r\n\t\t\tif value % i == 0:\r\n\t\t\t\tret_val += 2\r\n\r\n\t\t# The +2 accounts for 1 and itself.\r\n\t\treturn ret_val + 2\r\n\r\n\t#####################################################\r\n\r\n\tstart = time.time();\r\n\r\n\tanswer = 0\r\n\tcurrTriangleNumber = 0\r\n\ti = 0\r\n\r\n\tprint( \"Now solving #12:\" )\r\n\r\n\twhile currTriangleNumber < 500:\r\n\t\t# answer = triangleNumber( i )\r\n\r\n\t\tanswer += i\r\n\t\tcurrTriangleNumber = divisorCount( answer )\r\n\t\ti += 1\r\n\r\n\tprint( answer )\r\n\r\n\tprint( \"It took: \" + (str)( time.time() - start ) + \" seconds.\" )","sub_path":"11~20/twelve.py","file_name":"twelve.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"610972261","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 27 09:54:07 2019\r\n\r\n@author: leila\r\n\"\"\"\r\nfrom random import uniform \r\nfrom scipy.special import erf\r\nfrom scipy.special import erfinv\r\nfrom scipy.special import kolmogorov\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport scipy.stats as st\r\nimport csv\r\n\r\n\r\n#%%\r\n\r\n#Ejer 2\r\nprint ('Ejercicio 2')\r\nwith open('Barriosmuestras - Hoja1.CSV', newline='') as csvfile:#importa csv pares\r\n barrioscsv = list(csv.reader(csvfile))\r\n\r\n\r\nnummuestras=7 #cantidad de txt \r\n\r\nABC=['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']\r\nabc=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\r\n\r\ndef patentes(lista):\r\n patente=[]\r\n for l in range(7):\r\n patente.append(lista[l])\r\n a,b,c,d,e,f,g=patente\r\n rta=[]\r\n for m in range(len(ABC)):\r\n if a==ABC[m] or a==abc[m]:\r\n rta0=m+1\r\n if b==ABC[m] or b==abc[m]:\r\n rta1=m+1\r\n rta.append(rta0)\r\n rta.append(rta1)\r\n rta.append(int(c))\r\n rta.append(int(d))\r\n rta.append(int(e))\r\n for m in range(len(ABC)):\r\n if f==ABC[m] or f==abc[m]:\r\n rta5=m+1 \r\n if g==ABC[m] or g==abc[m]:\r\n rta6=m+1 \r\n rta.append(rta5)\r\n rta.append(rta6)\r\n num=rta[6]*26**0+(rta[5]-1)*26**1+rta[4]*26**2+rta[3]*10*26**2+rta[2]*10*10*26**2+(rta[1]-1)*10**3*(26**2)+(rta[0]-1)*10**3*(26**3)\r\n return (num,rta)\r\n\r\nnum, rta=patentes('ZZ999ZZ')\r\nprint(num)\r\n\r\n#Separado por barrio (tengo 7 muestras):\r\nmuestrascrudas=[] #no les aplique la funcion patente todavia\r\nmuestrasnum=[] #me quede con el num cuando le aplique la funcion patente\r\nmuestrasrta=[] #me quede con el rta cuando le aplique la funcion patente\r\n\r\nlargomuestras=[]\r\n\r\n#Junto todos los barrios: \r\nmuestratotcrudo=[]\r\nmuestratotnum=[]\r\nmuestratotrta=[]\r\nnummuestra=[]\r\nbarrio=[]\r\n\r\nfor i in range(nummuestras):\r\n muestracrudai =[]\r\n muestranumi=[]\r\n \r\n muestrartai=[]\r\n\r\n with open('muestra%i.txt' %(i+1), \"r\") as f:\r\n for line in f:\r\n patente=line\r\n muestracrudai.append(patente)\r\n numi,rtai=patentes(patente)\r\n muestranumi.append(numi)\r\n muestrartai.append(rtai)\r\n muestratotcrudo.append(patente)\r\n muestratotnum.append(numi)\r\n muestratotrta.append(rtai)\r\n nummuestra.append(i+1)\r\n barrio.append(barrioscsv[i+1][1])\r\n muestrascrudas.append(muestracrudai)\r\n muestrasnum.append(muestranumi)\r\n muestrasrta.append(muestrartai)\r\n largomuestras.append(len(muestranumi))\r\n \r\n#%%\r\n\"\"\" \r\n#Ejer 3.a:version discreta\r\n\r\n#Uniforme escalonada: (teórica)\r\n \r\nxt=np.linspace(1,2428510,2428510)\r\nyt=np.linspace(0,1,2428510)\r\n\r\nplt.figure()\r\n\r\nplt.step(xt,yt,'r')\r\n\r\nmuestratotnum_sort=[1]+list(np.sort(muestratotnum))+[2428510]\r\n\r\npk =[0]+list(np.linspace(1/len(muestratotnum),1,len(muestratotnum)))+[1]\r\n\r\nplt.step(muestratotnum_sort,pk,'b',where='post')\r\nplt.grid(1)\r\n\r\n#Para calcular el máximo de la resta hay que rellenar la Kolmogorov:\r\n \r\npktot=[] \r\nfor i in range(len(muestratotnum_sort)-1):\r\n pktot=pktot+list(np.ones(muestratotnum_sort[i+1]-muestratotnum_sort[i])*pk[i]) \r\npktot=pktot+[1]\r\nplt.plot(xt,pktot,'k.')\r\n \r\n#Calcular la resta:\r\nlistadn=np.abs(np.array(pktot)-yt)\r\nprint(np.max(listadn))\r\n \r\nlistapos=np.where(listadn==np.amax(listadn))[0]\r\n \r\ntablakolm=1.36*len(muestratotnum)**-0.5\r\n\r\n#Agregar el estadístico al gráfico\r\n\"\"\"\r\n\r\n#%%\r\n \r\n#Ejer 3a\r\nprint ('Ejercicio 3.1')\r\npatmax=patentes('AD592MF')[0] \r\nlendata=len(muestratotnum)\r\n\r\n#Uniforme escalonada: (teórica)\r\n \r\n \r\n\r\ndef unifcdf(x): #Se asume que los x a los que se les aplica la función estan en el intervalo [1,patmax]\r\n return ((x-1)/(patmax-1))\r\n\r\nks,pvp=st.kstest(muestratotnum,unifcdf)\r\n\r\n\r\ntablakolm=1.36*lendata**-0.5\r\n\"\"\"\r\nif ksalpha:\r\n print ('No puedo rechazar la hipótesis nula')\r\nelse:\r\n print ('Rechazo la hipótesis nula')\r\n\r\nplt.figure()\r\n\r\nx=np.linspace(0,0.15,2000)\r\ny=kolmogorov(x*lendata**0.5)\r\n\r\nplt.plot(x,y,'r.')\r\nplt.figure()\r\n\r\n\r\n\r\nplt.plot([-115000,1,patmax,2600000],[0,0,1,1],'r-',label='CDF teórica uniforme')\r\n\r\nmuestratotnum_sort=[-115000]+list(np.sort(muestratotnum))+[2600000]\r\n\r\npk =[0]+list(np.linspace(1/len(muestratotnum),1,len(muestratotnum)))+[1]\r\nplt.xlim([-110000,2500000])\r\nplt.step(muestratotnum_sort,pk,'b',where='post',label='EDF')\r\nplt.legend(loc=2)\r\nplt.grid(1)\r\n\r\n\r\n#%%\r\n\r\n#Ejer 3.b: H1: exponencial = kolmogorov\r\nprint ('Ejercicio 3.2')\r\n#Exponencial: (teórica)\r\n#Ejer 3.b: Calculo la potencia del test de Kolmogorov del item anterior (es decir, considerando que H0 es la distribución uniforme): Debo calcular la probabilidad de rechazar H0 (ksexp>tablakolm) dado que los datos poseen una distribución exponencial con parámetro lambdaa=4*10**-7 (o sea, fueron generados con dicha distribución).\r\n\r\nlambdaa=4*10**-7\r\n\r\ntotintentos=10**4\r\nrech=0\r\nfor i in range(totintentos):\r\n muestraexp=np.random.exponential(lambdaa**-1,lendata)\r\n ksexp,pvexp=st.kstest(muestraexp,unifcdf)\r\n if ksexp>tablakolm:\r\n rech=rech+1\r\nPotencia=rech/totintentos\r\nprint (Potencia)\r\n\r\n\r\n\r\n#%%\r\n \r\n#Ejer 4.a: La patente más nueva\r\nprint ('Ejercicio 4.1')\r\n#binomial negativa\r\n#m = el natural correspondiente a la patente más nueva observada\r\n#k = número total de patentes nuevas observadas\r\n#n = número de autos con patentes nuevas en circulación (es decir, hay n patentes disponibles para observar)\r\n\r\ndef distribuciondem(m,k,n1): #distribución de probabilidad de m \r\n if k==1:\r\n return 1/n1\r\n else:\r\n return k*(m-k+1)*distribuciondem(m,k-1,n1)*((k-1)*(n1-k+1))**(-1)\r\n\r\ndef experimento(N,k,a,b):\r\n maxi=[]\r\n for i in range(N):\r\n x=[]\r\n for i in range(k):\r\n x.append(int(uniform(a,b+1))) #patentes observadas (de las más nuevas) #k=uniform(0,n) #puedo observar cero patentes nuevas y puedo observar todas las patentes nuevas que existen en circulacion (n)\r\n maxi.append(np.max(x))\r\n return maxi\r\n\r\n#Tomo desde la patente AD200AA en adelante: patentes(['A','D',2,0,0,'A','A'])=2163201. \r\n#La ultima patente que existe= 2428510 entonces 2428510-2163201=265309=n1\r\n\r\na=patentes('AD200AA')[0]\r\nb=patentes('AD592MF')[0]\r\na1=1\r\nb1=b-a\r\n\r\n\r\n\"\"\"\r\nk=25 #observo 25 patentes de las más grandes y me quedo con el máximo \r\nn1=265309 #Me quedo con los últimos 265309 valores posibles de las patentes (los 265309 valores más grandes)\r\nN=10 #repito lo de observar 25 patentes y quedarme con el máximo de aquéllas unas 1000 veces\r\n\"\"\"\r\n\r\nn1=b-a\r\nk=1000\r\nN=1000\r\n\r\ny=experimento(N,k,a,b)#máximos de cada muestra \r\n\r\n\r\n\r\nplt.figure()\r\n\r\nbins=np.linspace(np.min(y),np.max(y),50)\r\nn, bins, patches = plt.hist(y, bins, normed=1, facecolor='green', alpha=1, edgecolor='black', linewidth=0.5, label='Histograma con %i intentos' %(N))\r\n\r\n\r\n#m=np.linspace(a,b,b-a+1) #m>=k, n1>=k, m<=n1\r\nnpuntos=4000 #Para cubrir lo necesario del gráfico\r\nm=np.linspace(n1-npuntos,n1,npuntos+1) #m>=k, n1>=k, m<=n1\r\ny2=distribuciondem(m,k,n1)\r\nm=m+a\r\n#y2=[]\r\n#for i in m:\r\n# y2.append(distribuciondem(i,k,n1))\r\n# print (i)\r\n\"\"\"\r\n#m=np.linspace(a,b,b-a+1) #m>=k, n1>=k, m<=n1\r\nm=np.linspace(k,n1,n1-k+1) #m>=k, n1>=k, m<=n1\r\ny2=[]\r\nfor i in m:\r\n y2.append(distribuciondem(i,k,n1))\r\n print (i)\r\n\"\"\"\r\nplt.plot(m,y2,'r.')\r\nplt.grid(1)\r\n\r\n#El gráfico tiende asintóticamente al valor de n \r\n\r\n#%% \r\n\r\n#Ejer 4.b: La patente más nueva\r\n\r\nprint ('Ejercicio 4.2')\r\n\r\ndef distribuciondem(m,k,n1): #distribución de probabilidad de m variando k \r\n if k==1:\r\n return 1/n1\r\n else:\r\n return k*(m-k+1)*distribuciondem(m,k-1,n1)*((k-1)*(n1-k+1))**(-1)\r\nk=277 \r\nm=265109\r\nprobmfijodadon=[]\r\nncotasup=265109+264+8000\r\nlistan=np.linspace(m,ncotasup,ncotasup-m+1)\r\n\"\"\"\r\nfor n in listan:\r\n a1=1\r\n \r\n k=277\r\n N=1500\r\n \r\n y=distribuciondem(m,k,n) \r\n \r\n cantidaddem=len(np.where(np.array(y)==m)[0])\r\n probmfijodadon.append(cantidaddem/N)\r\n print (n-listan[0])\r\n\r\n listatotaln=listatotaln+list(n*np.ones(cantidaddem))\r\n\"\"\"\r\n\r\ny=distribuciondem(m,k,listan)\r\nynorm=y/np.sum(y)\r\n\r\nesperanza=np.sum(listan*ynorm)\r\nstd=(np.sum(ynorm*(listan-esperanza)**2))**0.5\r\n\r\nesperteor=(m-1)*(k-1)/(k-2)\r\n\r\nstdteor=((k-1)*(m-1)*(m-k+1)*(k-2)**-2/(k-3))**0.5\r\n\r\nplt.figure()\r\n\r\nplt.plot(listan,ynorm,'b.') \r\nplt.grid(1)\r\n\r\n#%%\r\n\r\n#Intento del 4b con experimento\r\n#Ahora queremos estimar el n1 (número total de patentes que existen) utilizando distribucion de m\r\n\r\ndef experimento(N,k,a,b):\r\n maxi=[]\r\n for i in range(N):\r\n x=[]\r\n for i in range(k):\r\n x.append(int(uniform(a,b+1))) #patentes observadas (de las más nuevas) #k=uniform(0,n) #puedo observar cero patentes nuevas y puedo observar todas las patentes nuevas que existen en circulacion (n)\r\n maxi.append(np.max(x))\r\n return maxi\r\n\r\na=2163201\r\nb=2428510\r\nk=25 #observo 25 patentes de las más grandes y me quedo con el máximo \r\nN=10\r\n\r\nsimu=[]\r\nM=30\r\nfor i in range(M):\r\n simu.append(np.max(experimento(N,k,a,b+i)))\r\n\r\n#Un prior no informativo es una uniforme. \r\n \r\n#%%\r\n\r\n\r\n#Ejer 5\r\n\r\nprint ('Ejercicio 5.1') \r\n\r\npatentesanmartin=[] #San Martin es el Barrio 0\r\n\r\nwith open('patentes san martin.txt', \"r\") as f:\r\n for line in f:\r\n patentesanmartin.append(patentes(line)[0])\r\n \r\n \r\npatentesrecoleta=muestrasnum[0]+muestrasnum[1]+muestrasnum[2]+muestrasnum[4]+muestrasnum[5]+muestrasnum[6] #Recoleta es el Barrio 1\r\n\r\npatentesanmartin=list(dict.fromkeys(patentesanmartin)) #Elimino las patentes repetidas\r\npatentesrecoleta=list(dict.fromkeys(patentesrecoleta)) #Elimino las patentes repetidas\r\n \r\n\r\npatentestot=np.array(patentesanmartin+patentesrecoleta)\r\npatentestot_sort=np.sort(patentestot)\r\n\r\nbarriosort=[]\r\n\r\n#rank=0\r\n\r\nrankrecoleta=0\r\nranksanmartin=0\r\nfor i in range(len(patentestot)):\r\n posiciones=np.where(patentestot==patentestot_sort[i])[0]\r\n if len(posiciones)==1:\r\n posicion=posiciones[0]\r\n if posicion>=len(patentesanmartin):\r\n barriosort.append(1)\r\n rankrecoleta=rankrecoleta+i\r\n else:\r\n barriosort.append(0)\r\n ranksanmartin=ranksanmartin+i\r\n else:\r\n print ('Hay un problema. Hay repetidos')\r\n \r\n# if barriosort[i]!=barriosort[i-1]:\r\n# rank=rank+1\r\n\r\nif len(patentesanmartin)<=len(patentesrecoleta):\r\n w=ranksanmartin\r\n n=len(patentesanmartin)\r\n m=len(patentesrecoleta)\r\nelse:\r\n w=rankrecoleta\r\n m=len(patentesanmartin)\r\n n=len(patentesrecoleta) \r\n\r\n\r\nprint (w)\r\n\r\nEw=(n+m+1)*n/2\r\nVw=n*m*(n+m+1)/12\r\nsw=Vw**0.5\r\n\r\ndef Qsn(p): #Defino la quantile function de la standard normal distribution N(0,1)\r\n return(2**0.5*erfinv(2*p-1))\r\n\r\n\r\nif w>Ew:\r\n z=(w-Ew-1/2)/sw\r\n\r\nif w800:\n SATScores[x][0]=800\n elif SATScoresRaw[x][0]<200:\n SATScores[x][0]=200\n else:\n SATScores[x][0]=SATScoresRaw[x][0]\n if SATScoresRaw[x][1]>800:\n SATScores[x][1]=800\n elif SATScoresRaw[x][1]<200:\n SATScores[x][1]=200\n else:\n SATScores[x][1]=SATScoresRaw[x][1]\ndef round_to(n, precision):\n correction = 0.5 if n >= 0 else -0.5\n return int( n/precision+correction ) * precision\n\ndef round_to_20(n):\n return round_to(n, 20)\n\nfor x in range(len(SATScores)):\n SATScores[x][0]=round_to_20(SATScores[x][0])\n SATScores[x][1]=round_to_20(SATScores[x][1])\n\n\nunskilled_params=[wage_coeffs_full[-1][0],unskilled_var[0][0]]\n\n\nSATMath=[x[0] for x in SATScores] # NEED TO MULTIPLY EACH ENTRY [a*x], not [a]*x\nSATVerbal=[x[1] for x in SATScores]\n\n# Generate Initial Ability Endowments (stochastic)\n# For each exogenous score combination, generate 50 of each individual's\n# possible ability endowments. Thus, here I have 1000 outcomes\n\nSATMathCol = {'SATM':SATMath*100}\nSATVerbalCol = {'SATV':SATVerbal*100}\n\n#Ability=np.random.multivariate_normal([0,0],cov=Sigma_Ability,size=10)\nAbility=[[0,0]]*200\nAbilityS=[x[0] for x in Ability]\nAbilityN=[x[1] for x in Ability]\n\nAbilitySCol={'A_S':AbilityS*200}\nAbilityNCol={'A_N':AbilityN*200}\n\nCharsList=[[x,y] for x in SATScores for y in Ability]\ncol1=np.empty(20000)\ncol2=np.empty(20000)\ncol3=np.empty(20000)\ncol4=np.empty(20000)\nfor x in range(len(CharsList)):\n col1[x]=CharsList[x][0][0]\n col2[x]=CharsList[x][0][1]\n col3[x]=CharsList[x][1][0]\n col4[x]=CharsList[x][1][1]\n\nDFChars=pd.DataFrame({'SAT_M':col1,'SAT_V':col2,'A_S':col3,'A_N':col4})\n# add tuition\nDFChars['tuition']=pd.Series([2]*20000)\n\n# generate combinations\nchoose=simplexmap.pascal(grad_horizon+4,sectors)\n\n\n# CODE STARTS HERE\n\nLaborGradeInt=np.array([int(x) for x in LaborGradeRange*100])\nLaborFinal=np.linspace(200,400,21,dtype=np.int64)\n\n\n\n\n## Generate Shocks\n\n# grade shocks\nShockGradeRaw=np.random.normal(0,1,(20000,4))\n\n# preference shocks\nShockPref=np.random.gumbel(size=(20000,grad_horizon+4,max([sectors+1,4])))\n\n# wage shocks\nShockSkilledWage=np.random.multivariate_normal([0]*sectors,\\\n cov=skilled_wage_covar,size=(20000,grad_horizon))\nShockUnskilledWage=(np.random.normal(0,1,(20000,grad_horizon+4))*\n unskilled_var[0][0]**0.5)\n\nShocksDict={'grade':ShockGradeRaw,'pref':ShockPref,'skilled':ShockSkilledWage,\\\n'unskilled':ShockUnskilledWage}\n\n# Fully solve the skilled labor market, over a grid of GPA outcomes\n\nzscores=scipy.stats.norm.ppf(np.array(range(1,normReps+1))/(normReps+1))\nnum_quantiles=20\nnorm_quantiles=scipy.stats.norm.ppf(\n np.array(range(1,num_quantiles))/num_quantiles)\n\nbase_draws=np.matrix.transpose(np.matrix(list(\n itertools.product(zscores,repeat=(sectors)))))\nlmat=np.linalg.cholesky(skilled_wage_covar)\nwage_shocks=np.array(np.transpose(np.matmul(lmat,base_draws)))\n\nSTEM_payouts_raw=np.zeros(11,dtype=np.float64)\nnonSTEM_payouts_raw=np.zeros(11,dtype=np.float64)\n\n# HARD CODED (220 is hard-coded)\n\nwage_coeffs_skilled=wage_coeffs_full[:-1]\nSTEM_emax={}\nnonSTEM_emax={}\n\nfor idx,grade in enumerate(LaborGradeRange):\n stem=EmaxLaborFunctionsJIT(grad_horizon-1,gamma_p,beta,wage_coeffs_skilled,\n 1,grade,flowsSkilled,wage_shocks,choose)\n stem.solveLabor()\n nonstem=EmaxLaborFunctionsJIT(grad_horizon-1,gamma_p,beta,\n wage_coeffs_skilled,0,grade,flowsSkilled,wage_shocks,choose)\n nonstem.solveLabor()\n STEM_emax[int(100*grade)]=stem.EmaxList\n nonSTEM_emax[int(100*grade)]=nonstem.EmaxList\n STEM_payouts_raw[idx]=stem.EmaxList[0,0]\n nonSTEM_payouts_raw[idx]=nonstem.EmaxList[0,0]\n\n\n# Fully solve the unskilled labor market, over the 4 dropout times\ndropout_payouts=np.zeros((4,2),dtype=np.float64)\nunskilled_reps=20\nunskilled_wage_shocks=np.array(np.transpose(np.matrix(scipy.stats.norm.ppf(\n np.array(range(1,unskilled_reps+1))/\n (unskilled_reps+1)))))*unskilled_var[0][0]**0.5\n\nunskilled_Emax={}\nfor drop_time in range(4):\n unskilled=EmaxLaborFunctionsJIT(grad_horizon+4-drop_time-1,gamma_p,\n beta,np.array([wage_coeffs_full[-1]]),0,0,np.array([flowUnskilled]),\n unskilled_wage_shocks,choose)\n unskilled.solveLabor()\n dropout_payouts[drop_time,0]=unskilled.EmaxList[1,0]\n dropout_payouts[drop_time,1]=unskilled.EmaxList[1,1]\n unskilled_Emax[('d'+str(drop_time+1))]=unskilled.EmaxList\n\n# Interpolate values to 0.1 GPA points for Education Emax functions\nSTEM_payouts=scipy.interpolate.interp1d(LaborGradeInt,STEM_payouts_raw,\n kind='cubic')(LaborFinal)\nnonSTEM_payouts=scipy.interpolate.interp1d(LaborGradeInt,\n nonSTEM_payouts_raw,kind='cubic')(LaborFinal)\n\n# Solve education by extracting each unique tuition and generating an\n# Emax function for that.\n\nflow_educ=np.array([flowSTEM,flownonSTEM,flowUnskilled],dtype=np.float64)\n\nSATTuition=list(set(list(DFChars[\n ['A_N','A_S','SAT_M','SAT_V','tuition']].\\\n itertuples(index=False,name=None))))\n\nunskilled_mean=wage_coeffs_full[-1][0]\nunskilled_meanvar=np.array((unskilled_mean,unskilled_var[0][0]),\n dtype=np.float64)\ned_Emax=np.zeros((len(SATTuition),6,81),dtype=np.float64)\n\n\ned_Emax={}\n\nfor idx,x in enumerate(SATTuition):\n Ed=EmaxEducationJIT(dropout_payouts,STEM_payouts,nonSTEM_payouts,\n grade_params,gamma_p,beta,flow_educ,\n np.array(([x[4]]*4),dtype=np.float64),\n np.array((x[2],x[3]),dtype=np.float64),\n np.array((x[0],x[1]),dtype=np.float64),\n unskilled_meanvar, norm_quantiles)\n Ed.solve()\n ed_Emax[x]=Ed.EmaxEducationValues\n\ndef round_to(n, precision):\n correction = 0.5 if n >= 0 else -0.5\n return int( n/precision+correction ) * precision\n\n# censored as well\ndef round_to_5(n):\n if n>=400:\n return 400\n if n<=0:\n return 0\n return round_to(n, 5)\n\n\ndef Egrade(year,major_params,exogChars):\n year_param=major_params[year+1]\n return (major_params[0]*exogChars[0]+major_params[1]*exogChars[1]+\n year_param)\n\n\ndef FutureGrade(current_year,currentGPA,dSTEM,exogChars,ability,\n grade_params,grade_quantiles):\n # STEM\n if dSTEM==1:\n sigma=100*grade_params[0][-1]**0.5\n majorParams=grade_params[0]\n majorAbility=ability[0]\n # nonSTEM\n else:\n sigma=100*grade_params[1][-1]**0.5\n majorParams=grade_params[1]\n majorAbility=ability[1]\n\n meanGrade=Egrade(current_year,majorParams,exogChars)+majorAbility\n\n # generate next grade, rounded to nearest 5, and top/bottom capped\n # at 0, 400 GPA\n semGrades=grade_quantiles*sigma+100*meanGrade\n for x in range(len(semGrades)):\n if semGrades[x]>400:\n semGrades[x]=400\n elif semGrades[x]<0:\n semGrades[x]=0\n\n nextGrades=np.zeros(len(semGrades))\n for x in range(len(semGrades)):\n nextGrades[x]=round_to_5((currentGPA*(current_year-1)+semGrades[x])/\n current_year)\n return nextGrades\n\n\ndef find_nearest(array,value):\n idx = (np.abs(array-value)).argmin()\n return array[idx]\n\ndef exp_grade(current_year,dSTEM,exogChars,ability,grade_params):\n # STEM\n if dSTEM==1:\n sigma=100*grade_params[0][-1]**0.5\n majorParams=grade_params[0]\n majorAbility=ability[0]\n # nonSTEM\n else:\n sigma=100*grade_params[1][-1]**0.5\n majorParams=grade_params[1]\n majorAbility=ability[1]\n\n return Egrade(current_year,majorParams,exogChars)+majorAbility\n\n# wage_coeffs is the vector of sector-specific wage coefficients\n# note that grade is given in units of 100\ndef ElogWage100(experience,wage_coeffs,STEM='nonSTEM',grade=0):\n dSTEM = 1 if STEM=='STEM' else 0\n return wage_coeffs[0]+wage_coeffs[1]*dSTEM+wage_coeffs[2]*grade/100+\\\n wage_coeffs[3]*dSTEM*grade/100+wage_coeffs[4]*experience+\\\n wage_coeffs[5]*experience**2\n\ndef gpa_to_index(gpa):\n return int(gpa/5)\n\ndef tgpa_to_index(gpa):\n return int((gpa-200)/10)\n\n# Simulates full process for individual\n# pulls individual endowment/tuition parameters from DFChars\ndef ForwardSimulate(individual_number,STEM_emax,nonSTEM_emax,unskilled_Emax,\n ed_Emax,DFChars,wage_coeffs_full,unskilled_params,unskilled_var,grade_params,\n flowsFull,ShocksDict,gamma_p,beta,LaborGradeRange):\n (A_N,A_S,SAT_M,SAT_V,tuition)=\\\n (DFChars.loc[individual_number][['A_N','A_S','SAT_M','SAT_V','tuition']])\n ability=(A_S,A_N)\n exogChars=[SAT_M,SAT_V]\n wage_coeffs=wage_coeffs_full\n STEMsd=grade_params[0][-1]**0.5\n nonSTEMsd=grade_params[1][-1]**0.5\n flowsSkilled=flowsFull[:-3]\n [flowSTEM,flownonSTEM,flowUnskilled]=flowsFull[-3:]\n EducationFlows=[flowSTEM,flownonSTEM,flowUnskilled]\n EducationEmax=ed_Emax[(A_N,A_S,SAT_M,SAT_V,tuition)]\n\n end_time=grad_horizon+4\n num_grades=20\n grade_quantiles=scipy.stats.norm.ppf(\n np.array(range(1,num_grades))/num_grades)\n\n\n # Generates labor market decisions\n # current time is the current time\n # major = STEM, nonSTEM, or d1, d2, d3, d4\n # grade = GPA\n # current_endowment is the ability endowment.\n # output is a dict of form {time: {choice: decision,wage: observed log-wage,\n # shock: log-wage shock to particular choice}}\n # wage_coeffs is [[wage coefficients]_sector]\n # (current_time,end_time,major,grade,current_endowment,wage_coeffs)=(2,\\\n # 10,'STEM',300,(0,0,0,0),wage_coeffs_full[:-1])\n #(current_time,end_time,major,grade,current_endowment,wage_coeffs)=(2,\\\n # 10,'d1',0,(0,0),wage_coeffs_full[-1])\n \n def LaborSolve(individual_number,current_time,end_time,major,grade,\n current_endowment,STEM_emax,nonSTEM_emax,unskilled_Emax,ShocksDict,out,\n wage_coeffs,flowsSkilled,flowUnskilled,gamma_p,beta,choose):\n if current_time==end_time:\n if major not in ['STEM','nonSTEM']:\n (laborexp,hp)=current_endowment\n dropout_time=int(major[-1])\n Emax=unskilled_Emax[major]\n workflow=(flowUnskilled+\n gamma_p*np.exp(ElogWage100(laborexp,wage_coeffs)+\n ShocksDict['unskilled'][individual_number][current_time-1])+\n ShocksDict['pref'][individual_number][current_time-1][0])\n hpflow=ShocksDict['pref'][individual_number][current_time-1][1]\n\n choiceSet={'unskilled':workflow,'hp':hpflow}\n\n choice=max(choiceSet,key=choiceSet.get)\n if choice=='hp':\n out[current_time]={'choice':'hp'}\n\n else:\n out[current_time]={'choice':'unskilled','lwage':\\\n ElogWage100(laborexp,wage_coeffs)+\\\n ShocksDict['unskilled'][individual_number][current_time-1],\\\n 'shock':\\\n ShocksDict['unskilled'][individual_number][current_time-1],}\n return\n\n else:\n totalexp=np.sum(current_endowment)\n totalhp=current_endowment[-1]\n expvector=list(current_endowment[:-1])\n if major=='STEM':\n Emax=STEM_emax[grade]\n else:\n Emax=nonSTEM_emax[grade]\n lwage={}\n choiceSet={}\n shock={}\n NextExp={}\n for x in range(len(expvector)):\n CurrentExp=list(expvector)\n laborexp=CurrentExp[x]\n shock['skilled'+str(x+1)]=ShocksDict['skilled'][\\\n individual_number][current_time-5][x]\n lwage['skilled'+str(x+1)]=ElogWage100(laborexp,\n wage_coeffs[x],major,grade)+shock['skilled'+str(x+1)]\n payout=(gamma_p*np.exp(lwage['skilled'+str(x+1)])+\n ShocksDict['pref'][individual_number][\n current_time-1][x]+flowsSkilled[x])\n choiceSet['skilled'+str(x+1)]=payout\n\n choiceSet['hp']=(ShocksDict['pref'][individual_number][\n current_time-1][-1])\n choice=max(choiceSet,key=choiceSet.get)\n if choice=='hp':\n out[current_time]={'choice':'hp'}\n else:\n out[current_time]={'choice':choice,'lwage':lwage[choice],\\\n 'shock':shock[choice]}\n return\n\n # college dropout\n if major not in ['STEM','nonSTEM']:\n (laborexp,hp)=current_endowment\n dropout_time=int(major[-1])\n Emax=unskilled_Emax[major]\n workflow=(beta*Emax[current_time-dropout_time+1,\n simplexmap.combo_to_array(current_time-dropout_time+1,\n (laborexp+1,hp),choose)]+\n flowUnskilled+gamma_p*np.exp(ElogWage100(laborexp,wage_coeffs)+\n ShocksDict['unskilled'][individual_number][current_time-1])+\n ShocksDict['pref'][individual_number][current_time-1][0])\n hpflow=(beta*Emax[current_time-dropout_time+1,\n simplexmap.combo_to_array(current_time-dropout_time+1,\n (laborexp,hp+1),choose)]+\n ShocksDict['pref'][individual_number][current_time-1][1])\n\n choiceSet={'unskilled':workflow,'hp':hpflow}\n\n choice=max(choiceSet,key=choiceSet.get)\n if choice=='hp':\n out[current_time]={'choice':'hp'}\n LaborSolve(individual_number,current_time+1,end_time,major,\n grade,(laborexp,hp+1),STEM_emax,nonSTEM_emax,unskilled_Emax,\n ShocksDict,out,wage_coeffs,flowsSkilled,flowUnskilled,\n gamma_p,beta,choose)\n else:\n out[current_time]={'choice':'unskilled','lwage':\\\n ElogWage100(laborexp,wage_coeffs)+\\\n ShocksDict['unskilled'][individual_number][current_time-1],\\\n 'shock':\\\n ShocksDict['unskilled'][individual_number][current_time-1],}\n LaborSolve(individual_number,current_time+1,end_time,major,grade,\n (laborexp+1,hp),STEM_emax,nonSTEM_emax,unskilled_Emax,\n ShocksDict,out,wage_coeffs,flowsSkilled,flowUnskilled,\n gamma_p,beta,choose)\n\n # skilled sector\n else:\n totalexp=np.sum(current_endowment)\n totalhp=current_endowment[-1]\n expvector=list(current_endowment[:-1])\n if major=='STEM':\n Emax=STEM_emax[grade]\n else:\n Emax=nonSTEM_emax[grade]\n lwage={}\n choiceSet={}\n shock={}\n NextExp={}\n for x in range(len(expvector)):\n CurrentExp=list(expvector)\n laborexp=CurrentExp[x]\n CurrentExp[x]=CurrentExp[x]+1\n NextExp['skilled'+str(x+1)]=tuple(CurrentExp+[totalhp])\n shock['skilled'+str(x+1)]=ShocksDict['skilled'][\\\n individual_number][current_time-5][x]\n lwage['skilled'+str(x+1)]=ElogWage100(laborexp,wage_coeffs[x],\\\n major,grade)+shock['skilled'+str(x+1)]\n payout=(gamma_p*np.exp(lwage['skilled'+str(x+1)])+\n ShocksDict['pref'][individual_number][current_time-1][x]+\n flowsSkilled[x]+beta*Emax[totalexp+1,\n simplexmap.combo_to_array(totalexp+1,\n NextExp['skilled'+str(x+1)],choose)])\n choiceSet['skilled'+str(x+1)]=payout\n\n hpNext=tuple(expvector)+(current_endowment[-1]+1,)\n choiceSet['hp']=(ShocksDict['pref'][individual_number][\n current_time-1][-1]+beta*Emax[totalexp+1,\n simplexmap.combo_to_array(totalexp+1,hpNext,choose)])\n\n choice=max(choiceSet,key=choiceSet.get)\n if choice=='hp':\n out[current_time]={'choice':'hp'}\n LaborSolve(individual_number,current_time+1,end_time,major,\n grade,hpNext,STEM_emax,nonSTEM_emax,unskilled_Emax,\n ShocksDict,out,wage_coeffs,flowsSkilled,flowUnskilled,\n gamma_p,beta,choose)\n\n else:\n out[current_time]={'choice':choice,'lwage':lwage[choice],\\\n 'shock':shock[choice]}\n LaborSolve(individual_number,current_time+1,end_time,major,\n grade,NextExp[choice],STEM_emax,nonSTEM_emax,\n unskilled_Emax,ShocksDict,out,wage_coeffs,flowsSkilled,\n flowUnskilled,gamma_p,beta,choose)\n\n\n\n # output is a dict of the output.\n output={}\n # T=1\n t1STEM=(beta*np.mean([EducationEmax[0,gpa_to_index(x)]\\\n for x in FutureGrade(1,0,1,exogChars,ability,grade_params,\n grade_quantiles)])-gamma_p*tuition+\n ShocksDict['pref'][individual_number][0][0]+flowSTEM)\n\n t1nonSTEM=(beta*np.mean([EducationEmax[1,gpa_to_index(x)]\\\n for x in FutureGrade(1,0,0,exogChars,ability,grade_params,\n grade_quantiles)])-gamma_p*tuition+\n ShocksDict['pref'][individual_number][0][1]+flownonSTEM)\n\n t1work=(beta*dropout_payouts[0][1]+gamma_p*np.exp(unskilled_params[0]+\\\n ShocksDict['unskilled'][individual_number][0])+\\\n ShocksDict['pref'][individual_number][0][2]+flowUnskilled)\n \n t1hp=beta*dropout_payouts[0][0]+ShocksDict['pref'][individual_number][0][3]\n\n # pick the best choice out of T=1\n t1choiceSet={'STEM':t1STEM,'nonSTEM':t1nonSTEM,'unskilled':t1work,\\\n 'hp':t1hp}\n t1choice=max(t1choiceSet, key=t1choiceSet.get)\n \n # Generate a dict of outcomes for year 1.\n # Dict is of form {'choice','grade','wage'} with one of grade/wage empty\n if t1choice=='hp':\n output[1]={'choice':'hp'}\n elif t1choice=='STEM':\n t1grade=round_to_5(100*(exp_grade(1,1,exogChars,ability,grade_params)+\n ShockGradeRaw[individual_number][0]*STEMsd))\n output[1]={'choice':'STEM','grade':t1grade,\\\n 'shock':ShockGradeRaw[individual_number][0]*STEMsd}\n elif t1choice=='nonSTEM':\n t1grade=round_to_5(100*(exp_grade(1,0,exogChars,ability,grade_params)+\n ShockGradeRaw[individual_number][0]*nonSTEMsd))\n output[1]={'choice':'nonSTEM','grade':t1grade,\n 'shock':ShockGradeRaw[individual_number][0]*nonSTEMsd}\n else:\n output[1]={'choice':'unskilled','lwage':unskilled_params[0]+\\\n ShocksDict['unskilled'][individual_number][0],'shock':\\\n ShocksDict['unskilled'][individual_number][0]}\n\n # LOM to t2\n if t1choice=='hp':\n LaborSolve(individual_number,2,end_time,'d1',0,\n (0,1),STEM_emax,nonSTEM_emax,unskilled_Emax,ShocksDict,\n output,wage_coeffs[-1],flowsSkilled,flowUnskilled,gamma_p,beta,\n choose)\n return output \n elif t1choice=='unskilled':\n LaborSolve(individual_number,2,end_time,'d1',0,\n (1,0),STEM_emax,nonSTEM_emax,unskilled_Emax,ShocksDict,\n output,wage_coeffs[-1],flowsSkilled,flowUnskilled,gamma_p,beta,\n choose)\n return output\n\n\n elif t1choice=='STEM':\n t2STEM=(beta*np.mean([EducationEmax[2,gpa_to_index(x)]\\\n for x in FutureGrade(2,t1grade,1,exogChars,ability,grade_params,\n grade_quantiles)])-gamma_p*tuition+\n ShocksDict['pref'][individual_number][1][0]+flowSTEM)\n\n t2nonSTEM=(beta*np.mean([EducationEmax[2,gpa_to_index(x)]\\\n for x in FutureGrade(2,t1grade,0,exogChars,ability,grade_params,\n grade_quantiles)])-gamma_p*tuition+\n ShocksDict['pref'][individual_number][1][1]+flownonSTEM)\n\n else:\n t2STEM=(beta*np.mean([EducationEmax[2,gpa_to_index(x)]\\\n for x in FutureGrade(2,t1grade,1,exogChars,ability,grade_params,\n grade_quantiles)])-gamma_p*tuition+\n ShocksDict['pref'][individual_number][1][0]+flowSTEM)\n\n t2nonSTEM=(beta*np.mean([EducationEmax[3,gpa_to_index(x)]\\\n for x in FutureGrade(2,t1grade,0,exogChars,ability,grade_params,\n grade_quantiles)])-gamma_p*tuition+\n ShocksDict['pref'][individual_number][1][1]+flownonSTEM)\n\n t2work=(beta*dropout_payouts[1,1]+gamma_p*np.exp(\n unskilled_params[0]+ShocksDict['unskilled'][individual_number][1])+\n ShocksDict['pref'][individual_number][1][2]+flowUnskilled)\n \n t2hp=beta*dropout_payouts[1,0]+ShocksDict['pref'][individual_number][1][3]\n\n # pick the best choice out of T=2\n t2choiceSet={'STEM':t2STEM,'nonSTEM':t2nonSTEM,'unskilled':t2work,\\\n 'hp':t2hp}\n t2choice=max(t2choiceSet, key=t2choiceSet.get)\n\n # write down the observation\n if t2choice=='hp':\n output[2]={'choice':'hp'}\n elif t2choice=='STEM':\n t2grade=round_to_5(100*(exp_grade(2,1,exogChars,ability,grade_params)+\n ShockGradeRaw[individual_number][1]*STEMsd))\n output[2]={'choice':'STEM','grade':t2grade,\\\n 'shock':ShockGradeRaw[individual_number][1]*STEMsd}\n elif t2choice=='nonSTEM':\n t2grade=round_to_5(100*(exp_grade(2,0,exogChars,ability,grade_params)+\n ShockGradeRaw[individual_number][1]*nonSTEMsd))\n output[2]={'choice':'nonSTEM','grade':t2grade,\\\n 'shock':ShockGradeRaw[individual_number][1]*\\\n nonSTEMsd}\n else:\n output[2]={'choice':'unskilled','lwage':unskilled_params[0]+\\\n ShocksDict['unskilled'][individual_number][1],'shock':\\\n ShocksDict['unskilled'][individual_number][1]}\n\n\n\n # LOM to t3\n if t2choice=='hp':\n LaborSolve(individual_number,3,end_time,'d2',0,\n (0,1),STEM_emax,nonSTEM_emax,unskilled_Emax,ShocksDict,\n output,wage_coeffs[-1],flowsSkilled,flowUnskilled,gamma_p,beta,\n choose)\n return output \n elif t2choice=='unskilled':\n LaborSolve(individual_number,3,end_time,'d2',0,\n (1,0),STEM_emax,nonSTEM_emax,unskilled_Emax,ShocksDict,\n output,wage_coeffs[-1],flowsSkilled,flowUnskilled,gamma_p,beta,\n choose)\n return output\n\n # meet STEM major requirement\n elif t2choice=='STEM' or t1choice=='STEM':\n t3STEM=(beta*np.mean([EducationEmax[4,gpa_to_index(x)]\\\n for x in FutureGrade(3,(t1grade+t2grade)/2,1,exogChars,ability,\n grade_params,grade_quantiles)])-gamma_p*tuition+\n ShocksDict['pref'][individual_number][2][0]+flowSTEM)\n\n t3nonSTEM=(beta*np.mean([EducationEmax[5,gpa_to_index(x)]\\\n for x in FutureGrade(3,(t1grade+t2grade)/2,0,exogChars,ability,\n grade_params,grade_quantiles)])-gamma_p*tuition+\n ShocksDict['pref'][individual_number][2][1]+flownonSTEM)\n\n\n t3work=(beta*dropout_payouts[2,1]+\n gamma_p*np.exp(unskilled_params[0]+\n ShocksDict['unskilled'][individual_number][2])+\n ShocksDict['pref'][individual_number][2][2]+flowUnskilled)\n \n t3hp=beta*dropout_payouts[2,0]+ShocksDict['pref'][individual_number][2][3]\n\n # pick the best choice out of T=3\n if t2choice=='STEM' or t1choice=='STEM':\n t3choiceSet={'STEM':t3STEM,'nonSTEM':t3nonSTEM,'unskilled':t3work,\\\n 'hp':t3hp}\n else:\n t3choiceSet={'nonSTEM':t3nonSTEM,'unskilled':t3work,'hp':t3hp}\n\n t3choice=max(t3choiceSet, key=t3choiceSet.get)\n\n # write down the observation\n if t3choice=='hp':\n output[3]={'choice':'hp'}\n elif t3choice=='STEM':\n t3grade=round_to_5(100*(exp_grade(3,1,exogChars,ability,grade_params)+\n ShockGradeRaw[individual_number][2]*STEMsd))\n output[3]={'choice':'STEM','grade':t3grade,\n 'shock':ShockGradeRaw[individual_number][2]*STEMsd}\n elif t3choice=='nonSTEM':\n t3grade=round_to_5(100*(exp_grade(3,0,exogChars,ability,grade_params)+\n ShockGradeRaw[individual_number][2]*nonSTEMsd))\n output[3]={'choice':'nonSTEM','grade':t3grade,\n 'shock':ShockGradeRaw[individual_number][2]*nonSTEMsd}\n else:\n output[3]={'choice':'unskilled','lwage':unskilled_params[0]+\n ShocksDict['unskilled'][individual_number][2],\n 'shock':ShocksDict['unskilled'][individual_number][2]}\n\n\n # LOM to T=4\n if t3choice=='hp':\n LaborSolve(individual_number,4,end_time,'d3',0,\n (0,1),STEM_emax,nonSTEM_emax,unskilled_Emax,ShocksDict,\n output,wage_coeffs[-1],flowsSkilled,flowUnskilled,gamma_p,beta,\n choose)\n return output \n elif t3choice=='unskilled':\n LaborSolve(individual_number,4,end_time,'d3',0,\n (1,0),STEM_emax,nonSTEM_emax,unskilled_Emax,ShocksDict,\n output,wage_coeffs[-1],flowsSkilled,flowUnskilled,gamma_p,beta,\n choose)\n return output\n\n elif t3choice=='STEM':\n t4grades=FutureGrade(4,(t1grade+t2grade+t3grade)/3,\n 1,exogChars,ability,grade_params,grade_quantiles)\n payout=[None]*len(t4grades)\n for x in range(len(t4grades)):\n if t4grades[x]<200:\n payout[x]=droput_payouts[3,0]\n else:\n payout[x]=STEM_payouts[tgpa_to_index(t4grades[x])]\n t4STEM=(beta*np.mean(payout)-gamma_p*tuition+\n ShocksDict['pref'][individual_number][3][0]+flowSTEM)\n else:\n t4grades=FutureGrade(4,(t1grade+t2grade+t3grade)/3,\n 0,exogChars,ability,grade_params,grade_quantiles)\n payout=[None]*len(t4grades)\n for x in range(len(t4grades)):\n if t4grades[x]<200:\n payout[x]=dropout_payouts[3,0]\n else:\n payout[x]=nonSTEM_payouts[tgpa_to_index(t4grades[x])]\n t4nonSTEM=(beta*np.mean(payout)-gamma_p*tuition+\n ShocksDict['pref'][individual_number][3][1]+flownonSTEM) \n\n t4work=(beta*dropout_payouts[3,1]+\n gamma_p*np.exp(unskilled_params[0]+\n ShocksDict['unskilled'][individual_number][2])+\n ShocksDict['pref'][individual_number][3][2]+flowUnskilled)\n \n t4hp=beta*dropout_payouts[3,0]+ShocksDict['pref'][individual_number][3][3]\n\n # pick the best choice out of T=4\n if t3choice=='STEM':\n t4choiceSet={'STEM':t4STEM,'unskilled':t4work,'hp':t4hp}\n else:\n t4choiceSet={'nonSTEM':t4nonSTEM,'unskilled':t4work,'hp':t4hp}\n\n t4choice=max(t4choiceSet, key=t4choiceSet.get)\n\n # write down the observation and terminate the model\n if t4choice=='hp':\n output[4]={'choice':'hp'}\n LaborSolve(individual_number,5,end_time,'d4',0,\n (0,1),STEM_emax,nonSTEM_emax,unskilled_Emax,ShocksDict,\n output,wage_coeffs[-1],flowsSkilled,flowUnskilled,gamma_p,beta,\n choose)\n return output\n elif t4choice=='unskilled':\n output[4]={'choice':'unskilled','lwage':unskilled_params[0]+\n ShocksDict['unskilled'][individual_number][3],\n 'shock':ShocksDict['unskilled'][individual_number][3]}\n LaborSolve(individual_number,5,end_time,'d4',0,\n (1,0),STEM_emax,nonSTEM_emax,unskilled_Emax,ShocksDict,\n output,wage_coeffs[-1],flowsSkilled,flowUnskilled,gamma_p,beta,\n choose)\n return output\n\n # if STEM or nonSTEM are chosen, round the grade to the nearest available\n # GPA (see LaborGradeRange) and then solve the dynamic program\n elif t4choice=='STEM':\n t4grade=round_to_5(100*(exp_grade(4,1,exogChars,ability,grade_params)+\n ShockGradeRaw[individual_number][3]*STEMsd))\n output[4]={'choice':'STEM','grade':t4grade,\n 'shock':ShockGradeRaw[individual_number][3]*STEMsd}\n finalGPA=round_to_5((t1grade+t2grade+t3grade+t4grade)/4)\n if finalGPA>=200:\n roundedgrade=int(find_nearest(100*LaborGradeRange,finalGPA))\n LaborSolve(individual_number,5,end_time,'STEM',roundedgrade,\n (0,)*len(wage_coeffs),STEM_emax,nonSTEM_emax,unskilled_Emax,\n ShocksDict,output,wage_coeffs[:-1],flowsSkilled,flowUnskilled,\n gamma_p,beta,choose) \n else:\n LaborSolve(individual_number,5,end_time,'d4',0,\n (0,1),STEM_emax,nonSTEM_emax,unskilled_Emax,ShocksDict,\n output,wage_coeffs[-1],flowsSkilled,flowUnskilled,gamma_p,beta,\n choose)\n return output\n\n elif t4choice=='nonSTEM':\n t4grade=round_to_5(100*(exp_grade(4,0,exogChars,ability,grade_params)+\n ShockGradeRaw[individual_number][3]*nonSTEMsd))\n output[4]={'choice':'nonSTEM','grade':t4grade,\n 'shock':ShockGradeRaw[individual_number][3]*nonSTEMsd}\n finalGPA=round_to_5((t1grade+t2grade+t3grade+t4grade)/4)\n if finalGPA>=200:\n roundedgrade=int(find_nearest(100*LaborGradeRange,finalGPA))\n LaborSolve(individual_number,5,end_time,'nonSTEM',roundedgrade,\n (0,)*len(wage_coeffs),STEM_emax,nonSTEM_emax,unskilled_Emax,\n ShocksDict,output,wage_coeffs[:-1],flowsSkilled,flowUnskilled,\n gamma_p,beta,choose)\n else:\n LaborSolve(individual_number,5,end_time,'d4',0,\n (0,1),STEM_emax,nonSTEM_emax,unskilled_Emax,ShocksDict,\n output,wage_coeffs[-1],flowsSkilled,flowUnskilled,gamma_p,beta,\n choose)\n return output\n\n\n\n# Actually run the code\n# 1000 = hardcoded size of population\nfinaloutput={}\nfor x in range(20000):\n finaloutput[x]=ForwardSimulate(x,STEM_emax,nonSTEM_emax,unskilled_Emax,\n ed_Emax,DFChars,wage_coeffs_full,unskilled_params,unskilled_var,\n grade_params,flowsFull,ShocksDict,gamma_p,beta,LaborGradeRange)\n\nflatdict=[]\n# Turn this output into a dataframe\nfor x in finaloutput.keys():\n for y in finaloutput[x].keys():\n if 'grade' in finaloutput[x][y]:\n flatdict.append({'id':x,'time':y,\\\n 'choice':finaloutput[x][y]['choice'],'type':'grade',\\\n 'outcome':finaloutput[x][y]['grade'],\\\n 'shock':finaloutput[x][y]['shock']})\n elif 'lwage' in finaloutput[x][y]:\n flatdict.append({'id':x,'time':y,\\\n 'choice':finaloutput[x][y]['choice'],'type':'lwage',\\\n 'outcome':finaloutput[x][y]['lwage'],\\\n 'shock':finaloutput[x][y]['shock']})\n else:\n flatdict.append({'id':x,'time':y,\\\n 'choice':finaloutput[x][y]['choice'],'type':'hp',\\\n 'outcome':0,'shock':0})\n\nresults=pd.DataFrame(flatdict)\nresults.sort_values(['id','time'])\nresults=results.set_index('id')\n\n# merge with generating dataset\nDFChars['id']=list(range(20000))\nDFChars=DFChars.set_index('id')\n\nresultsFinal=pd.DataFrame.merge(DFChars,results,how='right',\n left_index=True,right_index=True)\nresultsFinal.to_csv(directory,\n encoding='utf-8',index=True)","sub_path":"ForwardSimJIT.py","file_name":"ForwardSimJIT.py","file_ext":"py","file_size_in_byte":33344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"638098945","text":"from tweepy import OAuthHandler, API\n# from header import consumer_key, consumer_secret, access_token, access_token_secret\nfrom dateutil import parser\nimport os\n\n\nconsumer_key = os.environ.get('consumer_key', None)\nconsumer_secret = os.environ.get('consumer_secret', None)\naccess_token = os.environ.get('access_token', None)\naccess_token_secret = os.environ.get('access_token_secret', None)\n\n\ndef get_tweets():\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = API(auth)\n\n tweets = api.user_timeline(screen_name='TrustyJohn', count=5)\n\n pay_load = []\n for tweet in tweets[:5]:\n tweet_json = {}\n tweet_json['text'] = tweet._json['text']\n date = tweet._json['created_at']\n tweet_json['date'] = parser.parse(date)\n pay_load.append(tweet_json)\n\n return pay_load\n","sub_path":"twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"435689464","text":"#! /usr/bin/env python\n# -*- encoding: utf-8 -*-\n# render_paginator_item.py\nfrom django.template import Library\n\nregister = Library()\n\n\ndef render_paginator_item(context, first_last_amount=2, before_after_amount=4):\n page_obj = context['page']\n paginator = context['paginator']\n page_numbers = []\n\n # Pages before current page\n if page_obj.number > first_last_amount + before_after_amount:\n for i in range(1, first_last_amount + 1):\n page_numbers.append(i)\n\n page_numbers.append(None)\n\n for i in range(page_obj.number - before_after_amount, page_obj.number):\n page_numbers.append(i)\n\n else:\n for i in range(1, page_obj.number):\n page_numbers.append(i)\n\n # Current page and pages after current page\n if page_obj.number + first_last_amount + \\\n before_after_amount < paginator.num_pages:\n for i in range(\n page_obj.number, page_obj.number + before_after_amount + 1):\n page_numbers.append(i)\n\n page_numbers.append(None)\n\n for i in range(\n paginator.num_pages -\n first_last_amount +\n 1,\n paginator.num_pages +\n 1):\n page_numbers.append(i)\n\n else:\n for i in range(page_obj.number, paginator.num_pages + 1):\n page_numbers.append(i)\n\n return {\n 'paginator': paginator,\n 'page': page_obj,\n 'page_numbers': page_numbers\n }\n\nregister.inclusion_tag(\n 'auf_site_institutionnel/pagination.html',\n takes_context=True)(render_paginator_item)\n","sub_path":"project/auf_site_institutionnel/templatetags/render_paginator_item.py","file_name":"render_paginator_item.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"639852712","text":"import os, json, csv, openpyxl, docx, docx.shared\n\n\nclass simplify_str:\n def __init__(self, value: str, casei=False):\n \"\"\"Extract the value in the string and convert it\n into non-reference object(int,float).\"\"\"\n self.v = value\n self.i = casei\n\n def simplify(self):\n try:\n self.v = float(self.v)\n except ValueError:\n pass\n try:\n self.v = int(self.v)\n except ValueError:\n pass\n try:\n self.v = self.__toBool()\n except (ValueError, AttributeError):\n pass\n try:\n self.v = self.__toNone()\n except (ValueError, AttributeError):\n pass\n return self.v\n\n def __toNone(self):\n if self.i:\n if self.v.lower() == 'none':\n return None\n raise ValueError('Unknown None value represation.')\n else:\n if self.v == 'None':\n return None\n raise ValueError('Unknown None value represation.')\n\n def __toBool(self):\n if self.i:\n if self.v.lower() == 'true':\n return True\n elif self.v.lower() == 'false':\n return False\n raise ValueError('Unknown boolean value.')\n else:\n if self.v == 'True':\n return True\n elif self.v == 'False':\n return False\n raise ValueError('Unknown boolean value.')\n\n\nclass BaseFile:\n def __init__(self):\n self._opath = None\n self._path = None\n self._nl = ['\\n']\n self._ne = [' ']\n self._dof = False\n self._ftext = []\n\n @property\n def text(self):\n \"\"\"The string contained in the file.\"\"\"\n with open(str(self._opath))as f:\n return f.read()\n\n def _get_ftext(self):\n temp, temp1, temp2 = [], '', []\n for i in self.text + self._nl[0]:\n if i in self._ne:\n temp2.append(temp1)\n temp1 = ''\n elif i in self._nl:\n temp2.append(temp1)\n temp.append(temp2)\n temp1 = ''\n temp2 = []\n else:\n temp1 += i\n return temp.copy()\n\n def write(self, obj: str):\n \"\"\"Write to the original file.\"\"\"\n with open(str(self._opath), 'w')as f:\n f.write(obj)\n self._ftext = self._get_ftext()\n return len(obj)\n\n def append(self, obj: str):\n \"\"\"Append something to the original file.\"\"\"\n with open(str(self._opath), 'a')as f:\n f.write(obj)\n self._ftext = self._get_ftext()\n return len(obj)\n\n def convert_to_xlsx(self):\n \"\"\"Convert the file into Microsoft spread sheet.\"\"\"\n self.__wb = openpyxl.Workbook()\n self.__sheet = self.__wb.get_sheet_by_name(\"Sheet\")\n temp = 1\n for i in self._ftext:\n self.__write_row(i, temp)\n temp += 1\n self.__wb.save(self._path.full(no_ext=True) + '.xlsx')\n del self.__wb\n del self.__sheet\n\n def convert_to_csv(self, next_cell_token=',', next_row_token='\\n'):\n \"\"\"convert the text file into .csv file using csv module.\"\"\"\n print(self._path.full(no_ext=True))\n writer = csv.writer(open(self._path.full(no_ext=True)+'.csv', 'w', newline=''), delimiter=next_cell_token,\n lineterminator=next_row_token)\n for i in self._ftext:\n writer.writerow(i)\n\n def __write_row(self, list_to_write, start_row, start_column=1):\n try:\n self.__sheet.cell(row=start_row, column=start_column).value = list_to_write[start_column - 1]\n return self.__write_row(list_to_write, start_row, start_column=start_column + 1)\n except IndexError:\n return\n\n def convert_to_json(self):\n \"\"\"Use json module to convert the file into .json file.\"\"\"\n json.dump(self.text,open(self._path.full(no_ext=True)+'.json','w'))\n\n def close(self, ignore_error=False):\n \"\"\"Close the file, return 0 if succeeded.\"\"\"\n if ignore_error:\n try:\n if self._dof:\n os.unlink(str(self._opath))\n return 0\n except:\n return 1\n else:\n if self._dof:\n os.unlink(str(self._opath))\n return 0\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n\n def __str__(self):\n return self.text\n\n def convert_to_docx(self, font=13):\n \"\"\"Convert to a .docx file.\"\"\"\n doc = docx.Document()\n para = doc.add_paragraph(self.text)\n para.font.size = docx.shared.Pt(font)\n doc.save(self._path.full(no_ext=True) + '.docx')\n","sub_path":"pyil/shared/_coll/Class.py","file_name":"Class.py","file_ext":"py","file_size_in_byte":4837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"250095295","text":"# Implementation of the REBUS with only the short term part\nimport pandas as pd\nimport scipy.sparse as sp\nimport random\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nimport dataset\nimport sys\nimport time\nimport math\nimport os\nimport commons\nimport json\n\n\nclass REBUS_ST:\n def __init__(self, dataset, args):\n print('In class REBUS_ST')\n self.dataset = dataset\n self.args = args\n if not os.path.exists(os.path.join(\"tmp\", \"REBUS_ST\"+\"_\"+str(self.args.user_min)+\"_\"+str(self.args.item_min), self.dataset.data_name)):\n os.makedirs(os.path.join(\"tmp\", \"REBUS_ST\"+\"_\"+str(self.args.user_min)+\"_\"+str(self.args.item_min), self.dataset.data_name))\n random_id = str(random.randint(0, 1000000))\n self.path_saver_parameters = os.path.join(\"tmp\", \"REBUS_ST\"+\"_\"+str(self.args.user_min)+\"_\"+str(self.args.item_min), self.dataset.data_name, random_id+\".ckpt\")\n args_dict = vars(args)\n with open(os.path.join(\"tmp\", \"REBUS_ST\"+\"_\"+str(self.args.user_min)+\"_\"+str(self.args.item_min), self.dataset.data_name, random_id+\"-args.json\"), 'w') as fp:\n json.dump(args_dict, fp)\n # Use a training batch to figure out feature dimensionality\n _, _, _, prev_items, _, _, _, _ = self.dataset.generate_train_shuffled_batch_sp_with_prev_items()\n self.feature_dim = prev_items.shape[1]\n print('Feature dimension = ' + str(self.feature_dim))\n\n def initialize_parameters(self):\n var_emb_items = tf.get_variable('emb_items', [self.dataset.nb_items, self.args.num_dims],\n # initializer=tf.random_uniform_initializer( -self.args.init_mean, self.args.init_mean))\n initializer=tf.contrib.layers.xavier_initializer(seed=self.args.seed))\n # Add a null vector for embedding_lookup, the null vector is equal tu nb_items\n var_emb_items = tf.concat((var_emb_items[0:self.dataset.nb_items, :], tf.zeros(shape=[1, self.args.num_dims])), 0)\n\n var_bias_items = tf.get_variable('bias_items', [self.dataset.nb_items, 1],\n # initializer=tf.random_uniform_initializer( -self.args.init_mean, self.args.init_mean))\n initializer=tf.zeros_initializer())\n # Add a null vector for embedding_lookup, the null vector is equal tu nb_items\n var_bias_items = tf.concat((var_bias_items[0:self.dataset.nb_items, :], tf.zeros(shape=[1, 1])), 0)\n\n parameters = {\n \"var_emb_items\": var_emb_items,\n \"var_bias_items\": var_bias_items\n }\n\n return parameters\n\n def create_placeholder(self):\n pl_user_list = tf.placeholder(tf.int32, shape=[None], name='user_list') # List of all users\n pl_prev_items = tf.placeholder(tf.int32, shape=[None, 1], name='prev_items')\n pl_list_fsub_items_id = tf.placeholder(tf.int32, shape=[None, self.args.L], name='list_fsub_items_id')\n pl_list_fsub_items_values = tf.placeholder(tf.float32, shape=[None, self.args.L], name='list_fsub_items_values')\n pl_pos = tf.placeholder(tf.int32, shape=[None], name='pos')\n pl_neg = tf.placeholder(tf.int32, shape=[None], name='neg') # Shape for the neg sparseTensor\n\n placeholders = {\n 'pl_user_list': pl_user_list,\n 'pl_prev_items': pl_prev_items,\n 'pl_list_fsub_items_id': pl_list_fsub_items_id,\n 'pl_list_fsub_items_values': pl_list_fsub_items_values,\n 'pl_pos': pl_pos,\n 'pl_neg': pl_neg,\n }\n\n return placeholders\n\n def create_feed_dict(self, placeholders, users, prev_items, list_fsub_items_id, list_fsub_items_values, list_prev_items_pos, list_prev_items_neg, pos_items, neg_items):\n feed_dict = {\n placeholders['pl_user_list']: users,\n placeholders['pl_prev_items']: prev_items,\n placeholders['pl_list_fsub_items_id']: list_fsub_items_id,\n placeholders['pl_list_fsub_items_values']: list_fsub_items_values,\n placeholders['pl_pos']: pos_items,\n placeholders['pl_neg']: neg_items,\n }\n\n return feed_dict\n\n def get_preds(self, placeholders, parameters):\n\n # Get back variables\n var_emb_items = parameters[\"var_emb_items\"]\n var_bias_items = parameters[\"var_bias_items\"]\n\n # Get item and bias for pos and neg examples\n item_pos = tf.nn.embedding_lookup(var_emb_items, placeholders[\"pl_pos\"])\n bias_pos = tf.nn.embedding_lookup(var_bias_items, placeholders[\"pl_pos\"])\n item_neg = tf.nn.embedding_lookup(var_emb_items, placeholders[\"pl_neg\"])\n bias_neg = tf.nn.embedding_lookup(var_bias_items, placeholders[\"pl_neg\"])\n\n # Lookup items for long and short term\n emb_list_fsub_items_id = tf.nn.embedding_lookup(var_emb_items, placeholders[\"pl_list_fsub_items_id\"])\n\n # Short term\n pl_list_fsub_items_values_reshape = tf.reshape(placeholders[\"pl_list_fsub_items_values\"], [tf.shape(placeholders[\"pl_list_fsub_items_values\"])[0], tf.shape(placeholders[\"pl_list_fsub_items_values\"])[1], 1])\n sum_short = tf.reduce_sum(tf.multiply(emb_list_fsub_items_id, pl_list_fsub_items_values_reshape), axis=1)\n\n # Prediction\n dist_pos = tf.subtract(sum_short, item_pos)\n dist_squared_pos = tf.multiply(dist_pos, dist_pos)\n preds_pos = tf.add(bias_pos, tf.reduce_sum(dist_squared_pos, axis=1, keepdims=True))\n\n dist_neg = tf.subtract(sum_short, item_neg)\n dist_squared_neg = tf.multiply(dist_neg, dist_neg)\n preds_neg = tf.add(bias_neg, tf.reduce_sum(dist_squared_neg, axis=1, keepdims=True))\n\n return -preds_pos, -preds_neg\n\n def get_preds_for_evaluate(self, placeholders, parameters):\n\n # Get back variables\n var_emb_items = parameters[\"var_emb_items\"]\n var_bias_items = parameters[\"var_bias_items\"]\n\n # Get item and bias for pos and neg examples\n item_pos = tf.nn.embedding_lookup(var_emb_items, placeholders[\"pl_pos\"])\n bias_pos = tf.nn.embedding_lookup(var_bias_items, placeholders[\"pl_pos\"])\n\n # Lookup items for long and short term\n emb_list_fsub_items_id = tf.nn.embedding_lookup(var_emb_items, placeholders[\"pl_list_fsub_items_id\"])\n\n # Short term\n pl_list_fsub_items_values_reshape = tf.reshape(placeholders[\"pl_list_fsub_items_values\"], [tf.shape(placeholders[\"pl_list_fsub_items_values\"])[0], tf.shape(placeholders[\"pl_list_fsub_items_values\"])[1], 1])\n sum_short = tf.reduce_sum(tf.multiply(emb_list_fsub_items_id, pl_list_fsub_items_values_reshape), axis=1)\n\n # Prediction\n dist_pos = tf.subtract(sum_short, item_pos)\n dist_squared_pos = tf.multiply(dist_pos, dist_pos)\n preds_pos = tf.add(bias_pos, tf.reduce_sum(dist_squared_pos, axis=1, keepdims=True))\n\n return -preds_pos\n\n def BPR_loss(self, preds_pos, preds_pneg, parameters):\n # Get back variables\n var_emb_items = parameters[\"var_emb_items\"]\n var_bias_items = parameters[\"var_bias_items\"]\n\n l2_reg_emb_items = self.args.emb_reg * tf.reduce_sum(tf.square(var_emb_items))\n l2_reg_bias_items = self.args.bias_reg * tf.reduce_sum(tf.square(var_bias_items))\n\n # BPR training op (add 1e-10 to help numerical stability)\n bprloss_op = tf.reduce_sum(tf.log(1e-10 + tf.sigmoid(preds_pos - preds_pneg))) - l2_reg_emb_items - l2_reg_bias_items\n\n return -bprloss_op # We take the opposite beacuse we minimize\n\n def train(self):\n start_train_time = time.time()\n config = tf.ConfigProto()\n\n ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables\n\n # to keep consistent results\n tf.set_random_seed(1)\n np.random.seed(3)\n\n self.placeholders = self.create_placeholder()\n self.parameters = self.initialize_parameters()\n self.preds_pos, self.preds_neg = self.get_preds(self.placeholders, self.parameters)\n self.preds_eval = self.get_preds_for_evaluate(self.placeholders, self.parameters)\n self.bprloss_op = self.BPR_loss(self.preds_pos, self.preds_neg, self.parameters)\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.args.learning_rate).minimize(self.bprloss_op, global_step=self.global_step)\n self.init = tf.global_variables_initializer()\n self.saver = tf.train.Saver()\n with tf.Session(config=config) as sess:\n sess.run(self.init)\n\n best_epoch = 0\n best_val_auc = -1\n best_test_auc = -1\n best_save_path = self.saver.save(sess, self.path_saver_parameters)\n\n for epoch in range(self.args.max_iters):\n start_time = time.time()\n mini_batch_idx, batch_size, users, prev_items, list_fsub_items_id, list_fsub_items_values, list_prev_items_pos, list_prev_items_neg, pos_items, neg_items = self.dataset.generate_train_shuffled_batch_sp_with_prev_items_fsub()\n num_complete_minibatches = math.floor(batch_size/self.args.mini_batch_size) # number of mini batches of size self.args.mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n mini_batch_users = users[k * self.args.mini_batch_size: k * self.args.mini_batch_size + self.args.mini_batch_size]\n mini_batch_prev_items = prev_items[k * self.args.mini_batch_size: k * self.args.mini_batch_size + self.args.mini_batch_size, :]\n mini_batch_list_fsub_items_id = list_fsub_items_id[k * self.args.mini_batch_size: k * self.args.mini_batch_size + self.args.mini_batch_size, :]\n mini_batch_list_fsub_items_values = list_fsub_items_values[k * self.args.mini_batch_size: k * self.args.mini_batch_size + self.args.mini_batch_size, :]\n mini_batch_list_prev_items_pos = list_prev_items_pos[k * self.args.mini_batch_size: k * self.args.mini_batch_size + self.args.mini_batch_size, :]\n mini_batch_list_prev_items_neg = list_prev_items_neg[k * self.args.mini_batch_size: k * self.args.mini_batch_size + self.args.mini_batch_size, :]\n mini_batch_pos_items = pos_items[k * self.args.mini_batch_size: k * self.args.mini_batch_size + self.args.mini_batch_size]\n mini_batch_neg_items = neg_items[k * self.args.mini_batch_size: k * self.args.mini_batch_size + self.args.mini_batch_size]\n\n feed_dict = self.create_feed_dict(self.placeholders, mini_batch_users, mini_batch_prev_items, mini_batch_list_fsub_items_id, mini_batch_list_fsub_items_values, mini_batch_list_prev_items_pos, mini_batch_list_prev_items_neg, mini_batch_pos_items, mini_batch_neg_items)\n _, bprloss = sess.run([self.optimizer, self.bprloss_op], feed_dict=feed_dict)\n\n if batch_size % self.args.mini_batch_size != 0:\n mini_batch_users = users[num_complete_minibatches * self.args.mini_batch_size: batch_size]\n mini_batch_prev_items = prev_items[num_complete_minibatches * self.args.mini_batch_size: batch_size, :]\n mini_batch_list_fsub_items_id = list_fsub_items_id[num_complete_minibatches * self.args.mini_batch_size: batch_size, :]\n mini_batch_list_fsub_items_values = list_fsub_items_values[num_complete_minibatches * self.args.mini_batch_size: batch_size, :]\n mini_batch_list_prev_items_pos = list_prev_items_pos[num_complete_minibatches * self.args.mini_batch_size: batch_size, :]\n mini_batch_list_prev_items_neg = list_prev_items_neg[num_complete_minibatches * self.args.mini_batch_size: batch_size, :]\n mini_batch_pos_items = pos_items[num_complete_minibatches * self.args.mini_batch_size: batch_size]\n mini_batch_neg_items = neg_items[num_complete_minibatches * self.args.mini_batch_size: batch_size]\n\n feed_dict = self.create_feed_dict(self.placeholders, mini_batch_users, mini_batch_prev_items, mini_batch_list_fsub_items_id, mini_batch_list_fsub_items_values, mini_batch_list_prev_items_pos, mini_batch_list_prev_items_neg, mini_batch_pos_items, mini_batch_neg_items)\n _, bprloss = sess.run([self.optimizer, self.bprloss_op], feed_dict=feed_dict)\n\n print('\\tEpoch: {} BPR-Loss = {} (time : {})'.format(epoch, bprloss, time.time() - start_time))\n\n if epoch % self.args.eval_freq == 0:\n start_eval_time = time.time()\n val_auc = commons.sample_evaluate_valid_faster(self, self.dataset, sess)['AUC']\n test_auc = 0.0\n\n print('\\tEpoch: {} Val AUC = {}, tTest AUC = {} (time : {})'.format(epoch, val_auc, test_auc, time.time() - start_eval_time))\n\n if val_auc > best_val_auc:\n best_epoch = epoch\n best_val_auc = val_auc\n best_test_auc = test_auc\n best_save_path = self.saver.save(sess, self.path_saver_parameters)\n else:\n if epoch >= (best_epoch + self.args.quit_delta):\n print('Overfitted, exiting...')\n break\n\n print('\\tCurrent max = {} at epoch {}'.format(best_val_auc, best_epoch))\n\n time_to_train = time.time() - start_train_time\n\n print(\"Restore best parameters\")\n self.saver.restore(sess, self.path_saver_parameters)\n\n start_eval_time = time.time()\n valid_metrics = commons.evaluate_valid(self, self.dataset, sess)\n test_metrics = commons.evaluate_test(self, self.dataset, sess)\n time_to_eval = time.time() - start_eval_time\n\n print('\\n\\t time_to_train = {}'.format(time_to_train))\n print('\\t time_to_eval = {}'.format(time_to_eval))\n print('\\t Best Epoch = {}'.format(best_epoch))\n print('\\t Sample Validation AUC = {} (estimation with {} random items) '.format(best_val_auc, self.dataset.args.item_per_user))\n print('\\t Validation AUC = {} '.format(valid_metrics['AUC']))\n print('\\t Sample Test AUC = {} (estimation with {} random items)'.format(best_test_auc, self.dataset.args.item_per_user))\n print('\\t Test AUC = {} '.format(test_metrics['AUC']))\n\n if self.dataset.args.cold_start_user:\n cold_metrics = commons.evaluate_cold_start(self, self.dataset, sess)\n print('\\t cold_metrics AUC = {} '.format(cold_metrics['AUC']))\n else:\n cold_metrics = {}\n\n return {\n 'time_to_train': time_to_train,\n 'time_to_eval': time_to_eval,\n 'best_epoch': best_epoch,\n 'best_val_auc': best_val_auc,\n 'best_test_auc': best_test_auc,\n 'valid_metrics': valid_metrics,\n 'test_metrics': test_metrics,\n 'cold_metrics': cold_metrics\n }\n\n def load(self):\n start_train_time = time.time()\n config = tf.ConfigProto()\n\n ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables\n\n # to keep consistent results\n tf.set_random_seed(1)\n np.random.seed(3)\n\n self.placeholders = self.create_placeholder()\n self.parameters = self.initialize_parameters()\n self.preds_pos, self.preds_neg = self.get_preds(self.placeholders, self.parameters)\n self.preds_eval = self.get_preds_for_evaluate(self.placeholders, self.parameters)\n self.bprloss_op = self.BPR_loss(self.preds_pos, self.preds_neg, self.parameters)\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.args.learning_rate).minimize(self.bprloss_op, global_step=self.global_step)\n self.init = tf.global_variables_initializer()\n self.saver = tf.train.Saver()\n\n def predict(self, sess, users_tmp, prev_items_tmp, list_fsub_items_id_tmp, list_fsub_items_values_tmp, pos_items_tmp, list_prev_items_pos_tmp):\n feed_dict = {\n self.placeholders['pl_user_list']: users_tmp,\n self.placeholders['pl_prev_items']: prev_items_tmp,\n self.placeholders['pl_list_fsub_items_id']: list_fsub_items_id_tmp,\n self.placeholders['pl_list_fsub_items_values']: list_fsub_items_values_tmp,\n self.placeholders['pl_pos']: pos_items_tmp,\n }\n return sess.run([self.preds_eval], feed_dict=feed_dict)\n","sub_path":"REBUS_ST.py","file_name":"REBUS_ST.py","file_ext":"py","file_size_in_byte":16854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"627945500","text":"# CREDIT TO ROBIN CAMILLE DAVIS\nimport tweepy, time\n\n# taken from www.apps.twitter.com\n# throwaway twitter account so I don't care if it's public\nCONSUMER_KEY = 'sFLL9lXR0ZVUU5P7F26g7tds5'\nCONSUMER_SECRET = 'oYNS95rzjWoPDXOfGAW1FzsACSaG2iT6pFBZ4rdVTYN25Rz3nl'\nACCESS_KEY = '717037162903040000-rbf0HtqbhnP8howTSXdMlkt02Vp7fk9'\nACCESS_SECRET = 'Sm5EEAuH2pMD3KxxFwfYPAgOs0AhXYloTvUR6aYwfbrmM'\n\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_KEY, ACCESS_SECRET)\napi = tweepy.API(auth)\n\n\nfilename=open('testtweets.txt','r')\nf=filename.readlines()\nfilename.close()\n\n\nfor line in f:\n api.update_status(line)\n print (line)\n time.sleep(3) \n","sub_path":"tweeter.py","file_name":"tweeter.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"294693403","text":"import time\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nfrom tqdm import trange, tqdm\nimport cv2\n\nclass DQNWorker(object):\n \"\"\"\n DQN Worker\n (assuming episode buffer here for drqn)\n \"\"\"\n def __init__(self, model, replay_buffer, optimizer_type = None):\n \"\"\"\n Args:\n model : DQNModel\n model = the model to be updated\n replay_buffer : ReplayBuffer\n replay_buffer = replay buffer\n optimizer_type : string\n optimizer_type = optimizer type (e.g. 'Adam', 'RMSProp')\n \"\"\"\n self.model = model\n self.replay_buffer = replay_buffer\n if (replay_buffer is not None):\n replay_buffer.reset()\n if (optimizer_type is not None):\n self.optimizer = self.get_optimizer(optimizer_type)\n else:\n self.optimizer = None\n self._reset()\n\n def _reset(self):\n \"\"\"\n Reset\n \"\"\"\n self.state_this_episode = []\n self.action_this_episode = []\n self.reward_this_episode = []\n self.done_this_episode = []\n self.train_lstm_state = self.model.zero_lstm_state(\n batch_size = 1)\n\n def get_optimizer(self, optimizer_type):\n \"\"\"\n Get Optimizer\n Args:\n optimizer_type : string\n optimizer_type = optimizer type (e.g. 'Adam', 'RMSProp')\n Returns:\n optimizer : torch.optim\n optimizer = optimizer\n \"\"\"\n if (optimizer_type.lower() == 'adam'):\n optimizer = optim.Adam(self.model.parameters())\n elif (optimizer_type.lower() == 'rmsprop'):\n optimizer = optim.RMSprop(self.model.parameters())\n else:\n raise NotImplementedError\n return optimizer\n\n def update_target_model(self, target, source = None, tau = 1.0):\n \"\"\"\n Update Target Model\n Args:\n target : DQNWorker\n target = target model\n source : same type as target\n source = source model\n tau : float\n tau = target <- tau * source + (1 - tau) * target\n \"\"\"\n if (source is None):\n source = self.model\n for target_param, source_param in zip(\n target.parameters(), source.parameters()):\n target_param.data.copy_(\n target_param.data * (1.0 - tau) +\n source_param.data * tau)\n\n def update_model(self, data_batch, learning_rate,\n grad_clip_norm = None):\n \"\"\"\n Update Model From One Batch\n Args:\n data_batch : dictionary\n data_batch = one batch of data\n 'state_current' : numpy.ndarray\n 'state_current' = sampled current state, shape [seq len * batch_size] + state_shape\n 'action' : numpy.ndarray\n 'action' = sampled action, shape [seq len * batch_size] + action_shape\n 'target_q' : numpy.ndarray\n 'target_q' = target q value, shape [seq len * batch size]\n 'seq_len' : numpy.ndarray\n 'seq_len' = sequence length of each sampled sequences, shape [batch_size]\n learning_rate : float\n learning_rate = value of learning rate\n grad_clip_norm : float or None\n grad_clip_norm = norm size for clipping gradients\n \"\"\"\n loss = self.model.evaluate_loss({\n 'target_q': data_batch['target_q'],\n 'action': data_batch['action'],\n 'img': data_batch['state_current'],\n 'seq_len': data_batch['seq_len'],\n 'lstm_state_input':\n self.model.zero_lstm_state(data_batch['seq_len'].size)},\n seq_len = int(data_batch['target_q'].shape[0] /\n data_batch['seq_len'].size))\n self.optimizer.zero_grad()\n self.optimizer.param_groups[0]['lr'] = learning_rate\n loss['loss'].backward()\n if (grad_clip_norm is not None):\n nn.utils.clip_grad_norm_(\n self.model.parameters(), grad_clip_norm)\n self.optimizer.step()\n\n def sample_trajectory(self, env, exploration_prob,\n num_step, num_repeat):\n \"\"\"\n Sample Trajectories\n Args:\n env : rl_benchmark.env.discrete_action\n env = env used for sampling\n exploration_prob : float\n exploration_prob = probability of exploration\n num_step : int\n num_step = number of sampling steps\n num_repeat : int\n num_repeat = number of repeated actions\n Returns:\n total_step : int\n total_step = total of steps sampled\n finished_episode : int\n finished_episode = number of finished episodes in this sampling\n \"\"\"\n total_step = 0\n finished_episode = 0\n for _ in range(num_step):\n if (env.episode_end()):\n env.new_episode()\n self._reset()\n state_current = env.get_state({'resolution':\n self.model.img_resolution})\n action = self.model.sample_action(state_current,\n lstm_state_input = self.train_lstm_state,\n exploration_prob = exploration_prob)\n r = env.apply_action(\n env.action_set()[action['action_index']], num_repeat)\n self.state_this_episode.append(state_current)\n self.action_this_episode.append(action['action_index'])\n self.reward_this_episode.append(r)\n if (env.episode_end()):\n finished_episode += 1\n self.done_this_episode.append(1)\n self.replay_buffer.append({\n 'state': np.stack(self.state_this_episode, axis = 0),\n 'action': np.array(self.action_this_episode),\n 'reward': np.array(self.reward_this_episode),\n 'done': np.array(self.done_this_episode)\n })\n else:\n self.done_this_episode.append(0)\n state_next = env.get_state({'resolution':\n self.model.img_resolution})\n self.train_lstm_state = action['lstm_state_output']\n total_step += 1\n return total_step, finished_episode\n\n def sample_from_replay_buffer(self, batch_size, seq_len,\n target_model, discount_factor):\n \"\"\"\n Sample One Data Batch From Replay Buffer\n Args:\n batch_size : int\n batch_size = size of a batch\n seq_len : int\n seq_len = length of sequences\n target : DQNWorker\n target = target model\n discount_factor : float\n discount_factor = discount factor\n Returns:\n data_batch : dictionary or None (when not enough data)\n data_batch = one batch of data\n 'state_current' : numpy.ndarray\n 'state_current' = sampled current state, shape [batch_size] + state_shape\n 'action' : numpy.ndarray\n 'action' = sampled action, shape [batch_size] + action_shape\n 'target_q' : numpy.ndarray\n 'target_q' = target q value, shape [batch size]\n 'seq_len' : numpy.ndarray\n 'seq_len' = sequence length of each sampled sequences, shape [batch_size]\n \"\"\"\n if (self.replay_buffer.get_size() < 1):\n return None\n data_raw = self.replay_buffer.sample_batch(batch_size, seq_len)\n state_current = data_raw['state_current'].reshape([-1] +\n list(data_raw['state_current'].shape[2:]))\n state_next = data_raw['state_next'].reshape([-1] +\n list(data_raw['state_next'].shape[2:]))\n action = data_raw['action'].reshape([-1] +\n list(data_raw['action'].shape[2:]))\n reward = data_raw['reward'].reshape([-1] +\n list(data_raw['reward'].shape[2:]))\n done = data_raw['done'].reshape([-1] +\n list(data_raw['done'].shape[2:]))\n q_value_next = target_model.sample_action(\n state_next,\n lstm_state_input =\n self.model.zero_lstm_state(batch_size))['q_value']\n q_value_next = np.max(q_value_next, axis = 1)\n target_q = reward + discount_factor * (1 - done) * q_value_next\n return {'state_current': state_current,\n 'action': action, 'target_q': target_q,\n 'seq_len': data_raw['seq_len']}\n\n def test(self, env_test, test_episode, num_repeat,\n visualize = False,\n visualize_pause = None, visualize_size = [480, 640],\n progress_bar = True, verbose = False):\n \"\"\"\n Test The Model\n Args:\n env_test : rl_benchmark.env\n env_test = environment for testing\n test_episode : int\n test_episode = number of test episodes\n num_repeat : int\n num_repeat = number of repeated actions\n visualize : bool\n visualize = visualize testing \n visualize_pause : None or float\n visualize_pause = when this is None, screen is not shown, otherwise, each frame is shown for visualize_pause ms\n visualize_size : list or tuple\n visualize_size = size of visualization windows, [h, w]\n Returns:\n report : dictionary\n report = collection of statistics\n \"\"\"\n report = {}\n score_list = []\n if (progress_bar):\n ep_range = trange(test_episode)\n else:\n ep_range = range(test_episode)\n for i_ep in ep_range:\n env_test.new_episode()\n test_lstm_state = self.model.zero_lstm_state(batch_size = 1)\n while (not env_test.episode_end()):\n action_info = self.model.sample_action(\n env_test.get_state({'resolution':\n self.model.img_resolution}),\n lstm_state_input = test_lstm_state,\n exploration_prob = 0)\n for _ in range(num_repeat):\n env_test.apply_action(\n env_test.action_set()[\n action_info['action_index']], 1)\n if (env_test.episode_end()):\n break\n if (visualize):\n img_out = cv2.resize(env_test.get_state({\n 'resolution': self.model.img_resolution[:2]}),\n tuple(visualize_size[::-1]))\n cv2.imshow('screen',\n (img_out[..., ::-1] * 255).astype(np.uint8))\n cv2.waitKey(visualize_pause)\n test_lstm_state = action_info['lstm_state_output']\n score_list.append(env_test.episode_total_score())\n if (verbose):\n print(('Episode %d: ' % i_ep) + str(score_list[-1]))\n score_list = np.array(score_list)\n report['score_mean'] = np.mean(score_list)\n report['score_std'] = np.std(score_list)\n report['score_max'] = np.max(score_list)\n report['score_min'] = np.min(score_list)\n return report\n\n","sub_path":"python3/rl_benchmark/algorithm/dqn/torch_v1/worker/lstm_conv_drqn.py","file_name":"lstm_conv_drqn.py","file_ext":"py","file_size_in_byte":11170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"278108167","text":"from enum import Enum\nfrom typing import Optional, List\n\nimport logging\nimport tensorflow as tf\nfrom tensorflow.contrib.rnn import static_rnn, static_bidirectional_rnn\nfrom tensorflow.contrib.framework import arg_scope\nimport tfsnippet as spt\nfrom tfsnippet.bayes import BayesianNet\nfrom tfsnippet.utils import (instance_reuse,\n VarScopeObject,\n reopen_variable_scope)\nfrom tfsnippet.distributions import FlowDistribution, Normal\nfrom tfsnippet.layers import l2_regularizer\n\nimport mltk\nfrom algorithm.recurrent_distribution import RecurrentDistribution\nfrom algorithm.real_nvp import dense_real_nvp\nfrom algorithm.conv1d_ import conv1d, deconv1d\n\n\nclass RNNCellType(str, Enum):\n GRU = 'GRU'\n LSTM = 'LSTM'\n Basic = 'Basic'\n\n\nclass ModelConfig(mltk.Config):\n x_dim: int = -1\n z_dim: int = 3\n u_dim: int = 1\n window_length = 100\n output_shape: List[int] = [25, 25, 50, 50, 100]\n z2_dim: int = 13\n l2_reg = 0.0001\n posterior_flow_type: Optional[str] = mltk.config_field(choices=['rnvp', 'nf'], default='rnvp')\n # can be 'rnvp' for RealNVP, 'nf' for planarNF, None for not using posterior flow.\n posterior_flow_layers = 20\n rnn_cell: RNNCellType = RNNCellType.GRU # can be 'GRU', 'LSTM' or 'Basic'\n rnn_hidden_units = 500\n use_leaky_relu = False\n use_bidirectional_rnn = False # whether to use bidirectional rnn or not\n use_self_attention = False # whether to use self-attention on hidden states before infer qz or not.\n unified_px_logstd = False\n dropout_feature = False # dropout on the features in arnn\n logstd_min = -5.\n logstd_max = 2.\n use_prior_flow = False # If True, use RealNVP prior flow to enhance the representation of p(z).\n prior_flow_layers = 20\n\n connect_qz = True\n connect_pz = True\n\n\n# The final InterFusion model.\nclass MTSAD(VarScopeObject):\n\n def __init__(self, config: ModelConfig, name=None, scope=None):\n self.config = config\n super(MTSAD, self).__init__(name=name, scope=scope)\n\n with reopen_variable_scope(self.variable_scope):\n if self.config.rnn_cell == RNNCellType.Basic:\n self.d_fw_cell = tf.nn.rnn_cell.BasicRNNCell(self.config.rnn_hidden_units, name='d_fw_cell')\n self.a_fw_cell = tf.nn.rnn_cell.BasicRNNCell(self.config.rnn_hidden_units, name='a_fw_cell')\n if self.config.use_bidirectional_rnn:\n self.d_bw_cell = tf.nn.rnn_cell.BasicRNNCell(self.config.rnn_hidden_units, name='d_bw_cell')\n self.a_bw_cell = tf.nn.rnn_cell.BasicRNNCell(self.config.rnn_hidden_units, name='a_bw_cell')\n elif self.config.rnn_cell == RNNCellType.LSTM:\n self.d_fw_cell = tf.nn.rnn_cell.LSTMCell(self.config.rnn_hidden_units, name='d_fw_cell')\n self.a_fw_cell = tf.nn.rnn_cell.LSTMCell(self.config.rnn_hidden_units, name='a_fw_cell')\n if self.config.use_bidirectional_rnn:\n self.d_bw_cell = tf.nn.rnn_cell.LSTMCell(self.config.rnn_hidden_units, name='d_bw_cell')\n self.a_bw_cell = tf.nn.rnn_cell.LSTMCell(self.config.rnn_hidden_units, name='a_bw_cell')\n elif self.config.rnn_cell == RNNCellType.GRU:\n self.d_fw_cell = tf.nn.rnn_cell.GRUCell(self.config.rnn_hidden_units, name='d_fw_cell')\n self.a_fw_cell = tf.nn.rnn_cell.GRUCell(self.config.rnn_hidden_units, name='a_fw_cell')\n if self.config.use_bidirectional_rnn:\n self.d_bw_cell = tf.nn.rnn_cell.GRUCell(self.config.rnn_hidden_units, name='d_bw_cell')\n self.a_bw_cell = tf.nn.rnn_cell.GRUCell(self.config.rnn_hidden_units, name='a_bw_cell')\n else:\n raise ValueError('rnn cell must be one of GRU, LSTM or Basic.')\n\n if self.config.posterior_flow_type == 'nf':\n self.posterior_flow = spt.layers.planar_normalizing_flows(n_layers=self.config.posterior_flow_layers,\n scope='posterior_flow')\n elif self.config.posterior_flow_type == 'rnvp':\n self.posterior_flow = dense_real_nvp(flow_depth=self.config.posterior_flow_layers,\n activation=tf.nn.leaky_relu if self.config.use_leaky_relu else tf.nn.relu,\n kernel_regularizer=l2_regularizer(self.config.l2_reg),\n scope='posterior_flow')\n else:\n self.posterior_flow = None\n\n if self.config.use_prior_flow:\n self.prior_flow = dense_real_nvp(flow_depth=self.config.prior_flow_layers,\n activation=tf.nn.leaky_relu if self.config.use_leaky_relu else tf.nn.relu,\n kernel_regularizer=l2_regularizer(self.config.l2_reg),\n is_prior_flow=True,\n scope='prior_flow')\n else:\n self.prior_flow = None\n\n def _my_rnn_net(self, x, window_length, fw_cell, bw_cell=None,\n time_axis=1, use_bidirectional_rnn=False):\n \"\"\"\n Get the base rnn model.\n :param x: The rnn input.\n :param window_length: The window length of input along time axis.\n :param fw_cell: Forward rnn cell.\n :param bw_cell: Optional. Backward rnn cell, only use when config.use_bidirectional_rnn=True.\n :param time_axis: Which is the time axis in input x, default 1.\n :param use_bidirectional_rnn: Whether or not use bidirectional rnn. Default false.\n :return: Tensor (batch_size, window_length, rnn_hidden_units). The output of rnn.\n \"\"\"\n\n x = tf.unstack(value=x, num=window_length, axis=time_axis)\n\n if use_bidirectional_rnn:\n outputs, _, _ = static_bidirectional_rnn(fw_cell, bw_cell, x, dtype=tf.float32)\n else:\n outputs, _ = static_rnn(fw_cell, x, dtype=tf.float32)\n\n outputs = tf.stack(outputs, axis=time_axis) # (batch_size, window_length, rnn_hidden_units)\n return outputs\n\n @instance_reuse\n def a_rnn_net(self, x, window_length, time_axis=1,\n use_bidirectional_rnn=False, use_self_attention=False, is_training=False):\n \"\"\"\n Reverse rnn network a, capture the future information in qnet.\n \"\"\"\n def dropout_fn(input):\n return tf.layers.dropout(input, rate=.5, training=is_training)\n\n flag = False\n if len(x.shape) == 4: # (n_samples, batch_size, window_length, x_dim)\n x, s1, s2 = spt.ops.flatten_to_ndims(x, 3)\n flag = True\n elif len(x.shape) != 3:\n logging.error('rnn input shape error.')\n\n # reverse the input sequence\n reversed_x = tf.reverse(x, axis=[time_axis])\n\n if use_bidirectional_rnn:\n reversed_outputs = self._my_rnn_net(x=reversed_x, window_length=window_length, fw_cell=self.a_fw_cell,\n bw_cell=self.a_bw_cell, time_axis=time_axis,\n use_bidirectional_rnn=use_bidirectional_rnn)\n else:\n reversed_outputs = self._my_rnn_net(x=reversed_x, window_length=window_length, fw_cell=self.a_fw_cell,\n time_axis=time_axis, use_bidirectional_rnn=use_bidirectional_rnn)\n\n outputs = tf.reverse(reversed_outputs, axis=[time_axis])\n\n # self attention\n if use_self_attention:\n outputs1 = spt.layers.dense(outputs, 500, activation_fn=tf.nn.tanh, use_bias=True, scope='arnn_attention_dense1')\n outputs1 = tf.nn.softmax(spt.layers.dense(outputs1, window_length,\n use_bias=False, scope='arnn_attention_dense2'), axis=1)\n M_t = tf.matmul(tf.transpose(outputs, perm=[0, 2, 1]), outputs1)\n outputs = tf.transpose(M_t, perm=[0, 2, 1])\n\n # feature extraction layers\n outputs = spt.layers.dense(outputs, units=500, activation_fn=tf.nn.leaky_relu if self.config.use_leaky_relu else tf.nn.relu,\n kernel_regularizer=l2_regularizer(self.config.l2_reg), scope='arnn_feature_dense1')\n if self.config.dropout_feature:\n outputs = dropout_fn(outputs)\n outputs = spt.layers.dense(outputs, units=500, activation_fn=tf.nn.leaky_relu if self.config.use_leaky_relu else tf.nn.relu,\n kernel_regularizer=l2_regularizer(self.config.l2_reg), scope='arnn_feature_dense2')\n if self.config.dropout_feature:\n outputs = dropout_fn(outputs)\n\n if flag:\n outputs = spt.ops.unflatten_from_ndims(outputs, s1, s2)\n\n return outputs\n\n @instance_reuse\n def qz_mean_layer(self, x):\n return spt.layers.dense(x, units=self.config.z_dim, scope='qz_mean')\n\n @instance_reuse\n def qz_logstd_layer(self, x):\n return tf.clip_by_value(spt.layers.dense(x, units=self.config.z_dim, scope='qz_logstd'),\n clip_value_min=self.config.logstd_min, clip_value_max=self.config.logstd_max)\n\n @instance_reuse\n def pz_mean_layer(self, x):\n return spt.layers.dense(x, units=self.config.z_dim, scope='pz_mean')\n\n @instance_reuse\n def pz_logstd_layer(self, x):\n return tf.clip_by_value(spt.layers.dense(x, units=self.config.z_dim, scope='pz_logstd'),\n clip_value_min=self.config.logstd_min, clip_value_max=self.config.logstd_max)\n\n @instance_reuse\n def hz2_deconv(self, z2):\n with arg_scope([deconv1d],\n kernel_size=5,\n activation_fn=tf.nn.leaky_relu if self.config.use_leaky_relu else tf.nn.relu,\n kernel_regularizer=l2_regularizer(self.config.l2_reg)):\n h_z = deconv1d(z2, out_channels=self.config.x_dim, output_shape=self.config.output_shape[0], strides=2)\n h_z = deconv1d(h_z, out_channels=self.config.x_dim, output_shape=self.config.output_shape[1], strides=1)\n h_z = deconv1d(h_z, out_channels=self.config.x_dim, output_shape=self.config.output_shape[2], strides=2)\n h_z = deconv1d(h_z, out_channels=self.config.x_dim, output_shape=self.config.output_shape[3], strides=1)\n h_z2 = deconv1d(h_z, out_channels=self.config.x_dim, output_shape=self.config.output_shape[4], strides=2)\n return h_z2\n\n @instance_reuse\n def q_net(self, x, observed=None, u=None, n_z=None, is_training=False):\n # vs.name = self.variable_scope.name + \"/q_net\"\n logging.info('q_net builder: %r', locals())\n\n net = BayesianNet(observed=observed)\n\n def dropout_fn(input):\n return tf.layers.dropout(input, rate=.5, training=is_training)\n\n # use the pretrained z2 which compress along the time dimension\n qz2_mean, qz2_logstd = self.h_for_qz(x, is_training=is_training)\n\n qz2_distribution = Normal(mean=qz2_mean, logstd=qz2_logstd)\n\n qz2_distribution = qz2_distribution.batch_ndims_to_value(2)\n\n z2 = net.add('z2', qz2_distribution, n_samples=n_z, is_reparameterized=True)\n\n # d_{1:t} from deconv\n h_z = self.h_for_px(z2)\n\n # a_{1:t}, (batch_size, window_length, dense_hidden_units)\n arnn_out = self.a_rnn_net(h_z, window_length=self.config.window_length,\n use_bidirectional_rnn=self.config.use_bidirectional_rnn,\n use_self_attention=self.config.use_self_attention,\n is_training=is_training)\n\n if self.config.connect_qz:\n qz_distribution = RecurrentDistribution(arnn_out,\n mean_layer=self.qz_mean_layer, logstd_layer=self.qz_logstd_layer,\n z_dim=self.config.z_dim, window_length=self.config.window_length)\n else:\n qz_mean = spt.layers.dense(arnn_out, units=self.config.z_dim, scope='qz1_mean')\n qz_logstd = tf.clip_by_value(spt.layers.dense(arnn_out, units=self.config.z_dim, scope='qz1_logstd'),\n clip_value_min=self.config.logstd_min, clip_value_max=self.config.logstd_max)\n qz_distribution = Normal(mean=qz_mean, logstd=qz_logstd)\n\n if self.posterior_flow is not None:\n qz_distribution = FlowDistribution(distribution=qz_distribution, flow=self.posterior_flow).batch_ndims_to_value(1)\n else:\n qz_distribution = qz_distribution.batch_ndims_to_value(2)\n\n z1 = net.add('z1', qz_distribution, is_reparameterized=True)\n\n return net\n\n @instance_reuse\n def p_net(self, observed=None, u=None, n_z=None, is_training=False):\n logging.info('p_net builder: %r', locals())\n\n net = BayesianNet(observed=observed)\n\n pz2_distribution = Normal(mean=tf.zeros([self.config.z2_dim, self.config.x_dim]),\n logstd=tf.zeros([self.config.z2_dim, self.config.x_dim])).batch_ndims_to_value(2)\n\n z2 = net.add('z2', pz2_distribution, n_samples=n_z, is_reparameterized=True)\n\n # e_{1:t} from deconv, shared params\n h_z2 = self.h_for_px(z2)\n\n if self.config.connect_pz:\n pz_distribution = RecurrentDistribution(h_z2,\n mean_layer=self.pz_mean_layer, logstd_layer=self.pz_logstd_layer,\n z_dim=self.config.z_dim, window_length=self.config.window_length)\n else:\n # non-recurrent pz\n pz_mean = spt.layers.dense(h_z2, units=self.config.z_dim, scope='pz_mean')\n pz_logstd = tf.clip_by_value(spt.layers.dense(h_z2,\n units=self.config.z_dim, scope='pz_logstd'),\n clip_value_min=self.config.logstd_min,\n clip_value_max=self.config.logstd_max)\n pz_distribution = Normal(mean=pz_mean, logstd=pz_logstd)\n\n if self.prior_flow is not None:\n pz_distribution = FlowDistribution(distribution=pz_distribution, flow=self.prior_flow).batch_ndims_to_value(1)\n else:\n pz_distribution = pz_distribution.batch_ndims_to_value(2)\n\n z1 = net.add('z1', pz_distribution, is_reparameterized=True)\n\n h_z1 = spt.layers.dense(z1, units=self.config.x_dim)\n\n h_z = spt.ops.broadcast_concat(h_z1, h_z2, axis=-1)\n\n h_z = spt.layers.dense(h_z, units=500, activation_fn=tf.nn.leaky_relu if self.config.use_leaky_relu else tf.nn.relu,\n kernel_regularizer=l2_regularizer(self.config.l2_reg), scope='feature_dense1')\n\n h_z = spt.layers.dense(h_z, units=500, activation_fn=tf.nn.leaky_relu if self.config.use_leaky_relu else tf.nn.relu,\n kernel_regularizer=l2_regularizer(self.config.l2_reg), scope='feature_dense2')\n\n x_mean = spt.layers.dense(h_z, units=self.config.x_dim, scope='x_mean')\n if self.config.unified_px_logstd:\n x_logstd = tf.clip_by_value(\n tf.get_variable(name='x_logstd', shape=(), trainable=True, dtype=tf.float32,\n initializer=tf.constant_initializer(-1., dtype=tf.float32)),\n clip_value_min=self.config.logstd_min, clip_value_max=self.config.logstd_max)\n else:\n x_logstd = tf.clip_by_value(spt.layers.dense(h_z, units=self.config.x_dim, scope='x_logstd'),\n clip_value_min=self.config.logstd_min, clip_value_max=self.config.logstd_max)\n\n x = net.add('x',\n Normal(mean=x_mean, logstd=x_logstd).batch_ndims_to_value(2),\n is_reparameterized=True)\n\n return net\n\n def reconstruct(self, x, u, mask, n_z=None):\n with tf.name_scope('model.reconstruct'):\n qnet = self.q_net(x=x, u=u, n_z=n_z)\n pnet = self.p_net(observed={'z1': qnet['z1'], 'z2': qnet['z2']}, u=u)\n return pnet['x']\n\n def get_score(self, x_embed, x_eval, u, n_z=None):\n with tf.name_scope('model.get_score'):\n qnet = self.q_net(x=x_embed, u=u, n_z=n_z)\n pnet = self.p_net(observed={'z1': qnet['z1'], 'z2': qnet['z2']}, u=u)\n score = pnet['x'].distribution.base_distribution.log_prob(x_eval)\n recons_mean = pnet['x'].distribution.base_distribution.mean\n recons_std = pnet['x'].distribution.base_distribution.std\n if n_z is not None:\n score = tf.reduce_mean(score, axis=0)\n recons_mean = tf.reduce_mean(recons_mean, axis=0)\n recons_std = tf.reduce_mean(recons_std, axis=0)\n return score, recons_mean, recons_std\n\n @instance_reuse\n def h_for_qz(self, x, is_training=False):\n with arg_scope([conv1d],\n kernel_size=5,\n activation_fn=tf.nn.leaky_relu if self.config.use_leaky_relu else tf.nn.relu,\n kernel_regularizer=l2_regularizer(self.config.l2_reg)):\n h_x = conv1d(x, out_channels=self.config.x_dim, strides=2) # 50\n h_x = conv1d(h_x, out_channels=self.config.x_dim)\n h_x = conv1d(h_x, out_channels=self.config.x_dim, strides=2) # 25\n h_x = conv1d(h_x, out_channels=self.config.x_dim)\n h_x = conv1d(h_x, out_channels=self.config.x_dim, strides=2) # 13\n\n qz_mean = conv1d(h_x, kernel_size=1, out_channels=self.config.x_dim)\n qz_logstd = conv1d(h_x, kernel_size=1, out_channels=self.config.x_dim)\n qz_logstd = tf.clip_by_value(qz_logstd, clip_value_min=self.config.logstd_min,\n clip_value_max=self.config.logstd_max)\n return qz_mean, qz_logstd\n\n @instance_reuse\n def h_for_px(self, z):\n with arg_scope([deconv1d],\n kernel_size=5,\n activation_fn=tf.nn.leaky_relu if self.config.use_leaky_relu else tf.nn.relu,\n kernel_regularizer=l2_regularizer(self.config.l2_reg)):\n h_z = deconv1d(z, out_channels=self.config.x_dim, output_shape=self.config.output_shape[0], strides=2)\n h_z = deconv1d(h_z, out_channels=self.config.x_dim, output_shape=self.config.output_shape[1], strides=1)\n h_z = deconv1d(h_z, out_channels=self.config.x_dim, output_shape=self.config.output_shape[2], strides=2)\n h_z = deconv1d(h_z, out_channels=self.config.x_dim, output_shape=self.config.output_shape[3], strides=1)\n h_z = deconv1d(h_z, out_channels=self.config.x_dim, output_shape=self.config.output_shape[4], strides=2)\n return h_z\n\n @instance_reuse\n def pretrain_q_net(self, x, observed=None, n_z=None, is_training=False):\n # vs.name = self.variable_scope.name + \"/q_net\"\n logging.info('pretrain_q_net builder: %r', locals())\n\n net = BayesianNet(observed=observed)\n\n def dropout_fn(input):\n return tf.layers.dropout(input, rate=.5, training=is_training)\n\n qz_mean, qz_logstd = self.h_for_qz(x, is_training=is_training)\n\n qz_distribution = Normal(mean=qz_mean, logstd=qz_logstd)\n\n qz_distribution = qz_distribution.batch_ndims_to_value(2)\n\n z = net.add('z', qz_distribution, n_samples=n_z, is_reparameterized=True)\n\n return net\n\n @instance_reuse\n def pretrain_p_net(self, observed=None, n_z=None, is_training=False):\n logging.info('p_net builder: %r', locals())\n\n net = BayesianNet(observed=observed)\n\n pz_distribution = Normal(mean=tf.zeros([self.config.z2_dim, self.config.x_dim]),\n logstd=tf.zeros([self.config.z2_dim, self.config.x_dim]))\n\n pz_distribution = pz_distribution.batch_ndims_to_value(2)\n\n z = net.add('z',\n pz_distribution,\n n_samples=n_z, is_reparameterized=True)\n\n h_z = self.h_for_px(z)\n\n px_mean = conv1d(h_z, kernel_size=1, out_channels=self.config.x_dim, scope='pre_px_mean')\n px_logstd = conv1d(h_z, kernel_size=1, out_channels=self.config.x_dim, scope='pre_px_logstd')\n px_logstd = tf.clip_by_value(px_logstd, clip_value_min=self.config.logstd_min,\n clip_value_max=self.config.logstd_max)\n\n x = net.add('x',\n Normal(mean=px_mean, logstd=px_logstd).batch_ndims_to_value(2),\n is_reparameterized=True)\n\n return net\n","sub_path":"algorithm/InterFusion.py","file_name":"InterFusion.py","file_ext":"py","file_size_in_byte":20945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"277106673","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Hello World client in Python\n# Connects REQ socket to tcp://localhost:5555\n# Sends \"Hello\" to server, expects \"World\" back\n#\n\n#RASPBERRY PI\nimport zmq\nimport threading\n\ncontext = zmq.Context()\n\n# Socket to talk to server\nprint(\"Connecting to 1...\")\nsocket1 = context.socket(zmq.REQ)\nsocket1.connect(\"tcp://192.168.1.4:5555\")\n\nposition = [\"1500\", \"2000\", \"0500\", \"1100\", \"1200\", \"1300\", \"1800\", \"0000\"]\n\n# Send positions\nfor request in position:\n command=str(request)\n print(\"Sending commands %s\" % command)\n socket1.send(\"%s\" % command)\n\n # Get the reply.\n message1 = socket1.recv()\n print(\"Received reply %s [ %s ]\" % (request, message1))\n\n\n","sub_path":"Teststand_CDH2/0MQ-helloworld/test_array.py","file_name":"test_array.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"54372218","text":"import unittest\nimport sys\n\nmodule = sys.argv[-1].split(\".py\")[0]\n\nclass PublicTests(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n global ordena_tipos\n undertest = __import__(module)\n ordena_tipos = getattr(undertest, 'ordena_tipos', None)\n\n\n def test_basico(self):\n assert ordena_tipos(['1a', '2', 'e', '4', '4.4', 'e6', '8']) == ['2', '4', '8', 'e', '1a', '4.4', 'e6']\n\n\nif __name__ == '__main__':\n loader = unittest.TestLoader()\n runner = unittest.TextTestRunner()\n runner.run(loader.loadTestsFromModule(sys.modules[__name__]))\n","sub_path":"miniteste9/OrdenaTipos/public_tests.py","file_name":"public_tests.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"35141486","text":"#!/usr/bin/env python3\n#UDP客户端\n\nfrom socket import *\nimport sys\nif len(sys.argv)<3:\n print(\"argv is error!\")\n#1.创建套接字\nHOST = sys.argv[1]\nPORT = int(sys.argv[2])\nADDR = (HOST,PORT)\nsockfd =socket(AF_INET,SOCK_DGRAM)\n\n#2.收发消息\nwhile True:\n data = input(\"消息:\")\n if not data:\n break\n sockfd.sendto(data.encode(),ADDR)\n data,addr=sockfd.recvfrom(1024)\n print(\"从服务器收到:\",data.decode())\n#3.退出\nsockfd.close()","sub_path":"WLBC/UDP_client.py","file_name":"UDP_client.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"462227723","text":"from . import ProgressiveTest\n\nfrom progressivis import Print\nfrom progressivis.stats import Stats\nfrom progressivis.io import CSVLoader\nfrom progressivis.core.wait import Wait\nfrom progressivis.datasets import get_dataset\n\nimport numpy as np\n\nclass TestStats(ProgressiveTest):\n# def setUp(self):\n# log_level(logging.DEBUG,'progressivis.core')\n\n def test_stats(self):\n s = self.scheduler()\n csv_module = CSVLoader(get_dataset('smallfile'), index_col=False,header=None,\n scheduler=s)\n stats=Stats('_1', name='test_stats', scheduler=s)\n wait=Wait(name='wait', delay=3, scheduler=s)\n wait.input.inp = csv_module.output.table\n stats.input._params = wait.output.out\n stats.input.table = csv_module.output.table\n pr = Print(proc=self.terse, name='print', scheduler=s)\n pr.input.df = stats.output.stats\n s.start()\n s.join()\n table = csv_module.table()\n stable = stats.table()\n last = stable.last()\n tmin = table['_1'].min()\n self.assertTrue(np.isclose(tmin, last['__1_min']))\n tmax = table['_1'].max()\n self.assertTrue(np.isclose(tmax, last['__1_max']))\n\nif __name__ == '__main__':\n ProgressiveTest.main()\n","sub_path":"tests/test_03_stats.py","file_name":"test_03_stats.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"54593440","text":"\"\"\"\nframe.py - Multiwindow display implemented by a list of window instances,\n with a scrolling command region at the bottom of the display.\n\"\"\"\n\nfrom enum import Enum\nimport terminal, terminal_util, display, window\nfrom updates import Op, background_task\n\nnlines, ncols = terminal_util.dimensions()\n\n# Default frame dimensions, might be updated while running, especially cmd_h:\nframe_top = 1 # line number on display of first line of frame\ncmd_h = 2 # default height (lines) of scrolling command region at the bottom\n\n# Assigned by scale()\nwindows_h = None # total number of lines of all windows, including status lines\ncmd_1 = None # line number on display of first line of scrolling command region\ncmd_n = None # \" bottom \"\n\n# First window is assigned from startup after cmd_h etc. are assigned\nifocus = None # index of window with input focus\nwin = None # window with input focus\nwindows = list() # list of windows, windows[ifocus] has input focus\n\nclass Mode(Enum):\n command = 1 # ed command mode\n input = 2 # ed input mode for a,i,c commmands\n display = 3 # edsel display mode\n\nmode = Mode.command\n# window.command_mode() tracks mode\nwindow.command_mode = (lambda: mode == Mode.command)\n\ndef scale(nlines, cmd_h):\n 'Calculate dimensions and location of windows and scrolling command region'\n global cmd_1, cmd_n, windows_h\n cmd_1 = nlines - cmd_h + 1 # scrolling command region, index of first line\n cmd_n = nlines # scrolling command region, last line\n windows_h = nlines - cmd_h # windows with status lines fill remaining space\n\ndef init(buffer):\n 'Initialize frame with one window into buffer'\n global win, ifocus\n # must assign frame size before create first window\n scale(nlines, cmd_h) # default cmd_h, may reassign before first update\n win = window.Window(buffer, frame_top, windows_h-1, ncols) # -1 excl status\n win.focus = True\n windows.append(win)\n ifocus = 0\n\ndef update_windows():\n 'Redraw all windows, called by refresh, for example after resize.'\n for w in windows:\n w.update()\n\ndef put_command_cursor(column=1):\n 'Put cursor at command line in scroll region, at given col (default 1)'\n display.put_cursor(cmd_n, column) # last line on display\n\ndef put_display_cursor(column=1):\n 'Put cursor at dot in current window, at given column (default 1)'\n wdot = win.wline(win.buf.dot)\n display.put_cursor(wdot,column)\n\ndef refresh(column=1):\n 'Clear and update entire frame in command mode, otherwise just the windows'\n if mode == Mode.command:\n display.put_cursor(1,1) # upper left corner\n display.erase()\n update_windows()\n if mode == Mode.command:\n display.set_scroll(cmd_1, cmd_n)\n # update() sets cursor\n elif mode == Mode.display:\n put_display_cursor(column=column)\n\ndef rescale():\n 'Recalculate frame and all window dimensions, then display all.'\n # Makes all windows (almost) the same height, unlike after o2 command\n scale(nlines, cmd_h)\n nwindows = len(windows)\n win_hdiv = windows_h // nwindows\n for iwin, win in enumerate(windows):\n win_h = (win_hdiv if iwin < nwindows-1\n else windows_h - (nwindows-1)*win_hdiv) # including status\n win.resize(frame_top + iwin*win_hdiv, win_h-1, ncols) # -1 excl status\n win.locate_segment(win.buf.dot if win.focus else win.saved_dot)\n refresh()\n\ndef update(op, sourcebuf=None, buffer=None, origin=0, destination=0,\n start=0, end=0, column=1): # display column numbers are 1-based\n 'Update the display: one window, several, or the entire frame.'\n\n global mode, win, ifocus, cmd_h\n\n # Clear display, redraw all the windows and scrolling command region.\n if op == Op.refresh:\n refresh(column=column)\n\n # Restore full screen scrolling, cursor to bottom\n elif op == Op.restore:\n display.set_scroll_all()\n display.put_cursor(nlines,1)\n\n # Rescale frame and window sizes, then refresh.\n elif op == Op.rescale:\n cmd_h = start if start else cmd_h\n rescale()\n\n # Create new buffer in current window, ed B\n # Op.insert case will display its contents.\n elif op == Op.create:\n win.focus = True\n win.buf = buffer\n win.locate_segment(win.buf.dot)\n win.saved_dot = win.buf.dot\n win.reupdate()\n\n # Change buffer in current window, ed b E D\n elif op == Op.select:\n win.focus = True\n win.buf = buffer\n win.reupdate()\n\n # Delete current buffer, ed D\n elif op == Op.remove:\n for w in windows:\n if w.buf == sourcebuf: # deleted buffer\n w.buf = buffer # new current buffer\n w.reupdate()\n\n # Switch to ed input mode, for ed a i c commands\n elif op == Op.input:\n mode = Mode.input\n win.update_for_input()\n wdot = win.wline(win.buf.dot)\n display.put_cursor(wdot+1,1) # +1 so we can't use put_display_cursor\n\n # Switch to ed command mode, ed . while in input mode\n elif op == Op.command:\n mode = Mode.command\n # Overwrite '.' line on display, and lines below.\n win.update_from(win.buf.dot + 1)\n win.set_marker(win.buf.dot)\n\n # Switch to edsel display mode\n elif op == Op.display:\n mode = Mode.display\n win.clear_marker(win.buf.dot)\n\n # Dot moved, ed l command\n elif op == Op.locate:\n win.locate(origin, destination)\n\n # Insert text: ed a i c m r t y commands\n # start, end are after insert, start == destination, end == win.buf.dot\n elif op == Op.insert and origin != background_task:\n if mode != Mode.input: # ed commands m r t y\n win.insert(origin, start, end)\n elif mode == Mode.input: # input mode after ed commands a i c\n # Text at dot is already up-to-date on display, open next line.\n win.update_for_input()\n for w in windows:\n if w.samebuf(win):\n w.adjust_insert(start, end, destination)\n if mode == Mode.input: # can't put input cursor til other windows done\n win.put_cursor_for_input(column=1)\n\n # Background task inserts text by calling buffer write() method.\n # Search for windows (if any) which displays that buffer.\n elif op == Op.insert and origin == background_task:\n for w in windows:\n if w.buf == buffer:\n w.saved_dot = w.buf.dot\n w.insert(origin, start, end)\n if mode == Mode.input: # can't put input cursor til other windows done\n win.put_cursor_for_input(column=column)\n elif mode == Mode.display:\n put_display_cursor(column=column)\n else:\n pass # Mode.commmand handled at the end of this fcn\n\n # Delete text: ed d m command\n # start,end are line numbers before delete, destination == win.buf.dot\n elif op == Op.delete:\n win.delete(destination)\n for w in windows:\n if w.samebuf(win):\n w.adjust_delete(start, end, destination)\n\n # Change text: ed s command\n # Update all lines in start..destination, don't know which lines changed\n elif op == Op.mutate:\n win.mutate(origin, start, destination)\n for w in windows:\n if w.samebuf(win):\n if w.intersects(start, destination):\n w.mutate_lines(start, destination)\n\n # Switch to next window, edda o command\n elif op == Op.next:\n w0 = win\n w0.release_focus()\n w0.update_status()\n ifocus = ifocus+1 if ifocus+1 < len(windows) else 0\n win = windows[ifocus]\n win.set_focus()\n win.update_status()\n\n # Delete all but current window, edda o1 cmd\n elif op == Op.single:\n windows[:] = [win]\n ifocus = 0\n win.resize(frame_top, windows_h-1, ncols) # one window, -1 excl status\n win.reupdate()\n\n # Split window, new window above becomes current window, edda o2 command\n elif op == Op.hsplit:\n win_top = win.top\n win_nlines = win.nlines // 2\n w0 = win\n w0.release_focus()\n w0.resize(win_top + win_nlines, w0.nlines - win_nlines, ncols)\n w0.move_update(w0.saved_dot)\n win = window.Window(win.buf,win_top,win_nlines-1,ncols) #-1 excl status\n win.focus = True\n windows.insert(ifocus, win)\n win.reupdate()\n\n # Update status line for given buffer in all of its windows\n elif op == Op.status:\n for w in windows:\n if w.buf == buffer:\n w.update_status()\n\n # In command mode put ed command cursor back in scrolling command region.\n # Then we can call standard Python input() or Piety Console restart().\n if mode == Mode.command:\n put_command_cursor(column=column) # background task can set column\n # Each Op... case handles other modes, see refresh() and Op.refresh\n # Is that necessary? I recall it's because some cases use default column=1\n\n return win # caller might need to know which window was selected\n","sub_path":"editors/frame.py","file_name":"frame.py","file_ext":"py","file_size_in_byte":9044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"50190832","text":"\"\"\" A class with methods for dimensionless coagulation\n\"\"\"\n\nfrom particula.util.input_handling import in_scalar\nfrom particula.util.diffusive_knudsen import DiffusiveKnudsen as DKn\nfrom particula.util.diffusive_knudsen import celimits\n# from particula.util.diffusive_knudsen import diff_knu as dknu\nfrom particula.util.diffusive_knudsen import red_frifac, red_mass, rxr\nfrom particula.util.approx_coagulation import approx_coag_less\n\n\nclass DimensionlessCoagulation(DKn):\n\n \"\"\" dimensionless coagulation\n \"\"\"\n\n def __init__(\n self,\n dkn_val=None,\n coag_approx=\"hardsphere\",\n **kwargs\n ):\n \"\"\" Dimensionless particle--particle coagulation kernel.\n\n Attributes:\n diff_knu (float) [dimensionless]\n\n Notes:\n The dimensionless coagulation kernel is defined as\n a function of the diffusive knudsen number; for more info,\n please see the documentation of the respective function:\n - particula.util.diffusive_knudsen.diff_knu(**kwargs)\n \"\"\"\n super().__init__(**kwargs)\n\n self.diff_knu = DKn(**kwargs).get_diff_knu() if dkn_val is None \\\n else in_scalar(dkn_val)\n\n self.coag_approx = coag_approx\n\n self.kwargs = kwargs\n\n def coag_less(self):\n \"\"\" Return the dimensionless coagulation kernel.\n \"\"\"\n\n impls = [\"hardsphere\", \"gh2012\", \"cg2019\", \"dy2007\", \"gk2008\"]\n\n if self.coag_approx not in impls:\n raise ValueError(f\"{self.coag_approx} not recognized!\")\n\n return approx_coag_less(\n diff_knu=self.diff_knu,\n cpr=self.coulomb_potential_ratio(),\n approx=self.coag_approx\n )\n\n def coag_full(self):\n \"\"\" Retrun the dimensioned coagulation kernel\n \"\"\"\n\n coag = self.coag_less()\n redff = red_frifac(**self.kwargs)\n redm = red_mass(**self.kwargs)\n cekl, cecl = celimits(**self.kwargs)\n xrxr = rxr(**self.kwargs)\n\n return (\n coag * redff * xrxr**3 * cekl**2 / (redm * cecl)\n )\n\n\ndef less_coag(**kwargs):\n \"\"\" Return the dimensionless coagulation kernel.\n\n The dimensionless coagulation kernel is defined as\n a function of the diffusive knudsen number; for more info,\n please see the documentation of the respective function:\n - particula.util.diffusive_knudsen.diff_knu(**kwargs)\n\n Examples:\n ```\n >>> from particula import u\n >>> from particula.util.dimensionless_coagulation import less_coag\n >>> # only for hardsphere coagulation for now\n >>> # with only one radius\n >>> less_coag(radius=1e-9)\n \n >>> # with two radii\n >>> less_coag(radius=1e-9, other_radius=1e-8)\n \n >>> # with two radii and charges\n >>> less_coag(\n ... radius=1e-9, other_radius=1e-8, charge=1, other_charge=-1\n ... )\n \n \"\"\"\n\n return DimensionlessCoagulation(**kwargs).coag_less()\n\n\ndef full_coag(**kwargs):\n \"\"\" Return the dimensioned coagulation kernel\n \"\"\"\n\n return DimensionlessCoagulation(**kwargs).coag_full()\n","sub_path":"particula/util/dimensionless_coagulation.py","file_name":"dimensionless_coagulation.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"333885224","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"Project Euler Solution 021\n\nCopyright (c) 2011 by Robert Vella - robert.r.h.vella@gmail.com\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and / or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nimport cProfile\nfrom euler.numbers.number_theory import AmicablePairs\nfrom itertools import takewhile\n\ndef get_answer():\n \"\"\"Question:\n \n Let d(n) be defined as the sum of proper divisors of n (numbers \n less than n which divide evenly into n). If d(a) = b and d(b) = a, \n where a ≠ b, then a and b are an amicable pair and each of a and b \n are called amicable numbers.\n \n For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, \n 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 \n are 1, 2, 4, 71 and 142; so d(284) = 220.\n \n Evaluate the sum of all the amicable numbers under 10000.\n \"\"\"\n #Initilise cache of amicable pairs.\n amicable_pairs = AmicablePairs()\n \n #Return result \n return sum(\n sum(amicable_pair) for amicable_pair \n in takewhile(\n lambda amicable_pair : amicable_pair[1] <= 10000,\n amicable_pairs\n )\n )\n \n \nif __name__ == \"__main__\":\n cProfile.run(\"print(get_answer())\")\n\n","sub_path":"answers/euler021.py","file_name":"euler021.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"612720686","text":"\r\nfrom flask import Flask, render_template, url_for, redirect, request, abort\r\nimport json, requests\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport joblib\r\nimport base64\r\nimport io\r\napp = Flask(__name__)\r\n# Load pokemon and combats csv\r\npokemon = pd.read_csv('pokemon.csv')\r\ncombats = pd.read_csv('combats.csv')\r\npokemon['total'] = pokemon['HP'] + pokemon['Attack'] + pokemon['Defense'] + pokemon['Sp. Atk'] + pokemon['Sp. Def'] + pokemon['Speed']\r\n\r\n@app.route('/')\r\n@app.route('/home')\r\ndef home():\r\n return render_template('home.html')\r\n\r\n@app.route('/hasil', methods=['POST','GET'])\r\ndef hasil():\r\n pokemon1 = request.form['pokemon1'].lower()\r\n pokemon2 = request.form['pokemon2'].lower()\r\n\r\n url1 = 'https://pokeapi.co/api/v2/pokemon/' + pokemon1\r\n url2 = 'https://pokeapi.co/api/v2/pokemon/' + pokemon2\r\n\r\n data1 = requests.get(url1)\r\n data2 = requests.get(url2)\r\n\r\n if str(data1) == '':\r\n return render_template('error.html')\r\n elif str(data2)=='':\r\n return render_template('error.html')\r\n\r\n filedata1 = data1.json()['forms']\r\n filedata2 = data2.json()['forms']\r\n \r\n name1 = filedata1[0]['name'].capitalize()\r\n name2 = filedata2[0]['name'].capitalize()\r\n\r\n picture1 = data1.json()['sprites']['front_default']\r\n picture2 = data2.json()['sprites']['front_default']\r\n\r\n if pokemon1.capitalize() in pokemon['Name'].values and pokemon2.capitalize() in pokemon['Name'].values:\r\n firstPokemon = pokemon[pokemon['Name'] == pokemon1.capitalize()][['Name' ,'HP', 'Attack', 'Defense', 'Sp. Atk', 'Sp. Def', 'Speed', 'total']]\r\n secondPokemon = pokemon[pokemon['Name'] == pokemon2.capitalize()][['Name' ,'HP', 'Attack', 'Defense', 'Sp. Atk', 'Sp. Def', 'Speed', 'total']]\r\n battle = np.concatenate([firstPokemon.drop('Name', axis=1).values, secondPokemon.drop('Name', axis=1).values], axis=1)\r\n prediction = model.predict(battle)[0]\r\n \r\n compare = pd.concat([firstPokemon, secondPokemon])\r\n \r\n # Plot graph\r\n plt.figure(figsize=(12,6))\r\n plt.subplot(161)\r\n plt.bar([compare.iloc[0]['Name'], compare.iloc[1]['Name']], compare['HP'], color=['blue', 'green'])\r\n plt.title('HP')\r\n plt.subplot(162)\r\n plt.bar([compare.iloc[0]['Name'], compare.iloc[1]['Name']], compare['Attack'], color=['blue', 'green'])\r\n plt.title('Attack')\r\n plt.subplot(163)\r\n plt.bar([compare.iloc[0]['Name'], compare.iloc[1]['Name']], compare['Defense'], color=['blue', 'green'])\r\n plt.title('Defense')\r\n plt.subplot(164)\r\n plt.bar([compare.iloc[0]['Name'], compare.iloc[1]['Name']], compare['Sp. Atk'], color=['blue', 'green'])\r\n plt.title('Sp. Attack')\r\n plt.subplot(165)\r\n plt.bar([compare.iloc[0]['Name'], compare.iloc[1]['Name']], compare['Sp. Def'], color=['blue', 'green'])\r\n plt.title('Sp. Defense')\r\n plt.subplot(166)\r\n plt.bar([compare.iloc[0]['Name'], compare.iloc[1]['Name']], compare['Speed'], color=['blue', 'green'])\r\n plt.title('Speed')\r\n\r\n plt.tight_layout()\r\n\r\n # Saving Plot as image\r\n img = io.BytesIO()\r\n plt.savefig(img, format='png', transparent=True)\r\n img.seek(0)\r\n graph_url = base64.b64encode(img.getvalue()).decode()\r\n graph = 'data:image/png;base64,{}'.format(graph_url)\r\n\r\n # Predicting probability\r\n if prediction == 1:\r\n prob = model.predict_proba(battle)[0][1] * 100\r\n win = name1\r\n result = {'prob':prob, 'win':win, 'graph':graph}\r\n return render_template('hasil.html', name1=name1, name2=name2, result=result, picture1=picture1, picture2 = picture2)\r\n else:\r\n prob = model.predict_proba(battle)[0][0] * 100,2\r\n win = name2\r\n result = {'prob':prob, 'win':win, 'graph':graph}\r\n return render_template('hasil.html', name1=name1, name2=name2, result=result, picture1=picture1, picture2 = picture2)\r\n else:\r\n abort(404)\r\n \r\n@app.errorhandler(404)\r\ndef page_not_found(error):\r\n\treturn render_template('Notfound.html')\r\n\r\nif __name__ == \"__main__\":\r\n model = joblib.load('pokeModelDT')\r\n app.run(debug=True)","sub_path":"soal3flask.py","file_name":"soal3flask.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"314136161","text":"import os\nimport urllib\nimport webapp2\n\nfrom custom_exceptions import PageNotFoundException\nfrom constants import Constants\nfrom helper import Helper\nfrom repositories.general import SiteInformationRepository\nfrom repositories.data.repository_pickle import SharkAttackRepository, CountryRepository, AreaRepository, DataHelper\nfrom siteinformation import SiteInformation\nfrom error_handlers import ErrorHandlers\n\nclass BasePage(webapp2.RequestHandler):\n def __init__(self, request, response):\n self.initialize(request, response)\n self.helper = Helper()\n self._siteInformationRepository = SiteInformationRepository()\n self._countryRepository = CountryRepository()\n self._areaRepository = AreaRepository()\n self._sharkAttackRepository = SharkAttackRepository()\n self._dataHelper = DataHelper()\n self._pageTemplate = \"main.html\"\n self._host = os.environ.get(\"HTTP_HOST\")\n self._urlScheme = os.environ.get(\"wsgi.url_scheme\")\n self._path = os.environ.get(\"PATH_INFO\")\n self._fullUrl = \"%s://%s%s\" % (self._urlScheme, self._host, self._path)\n self._isSiteMaintenancePage = False\n\n def isGsaf(self):\n return self.__class__.__name__.startswith(\"Gsaf\")\n\n def get(self, *args, **kwargs):\n self.respond(*args, **kwargs)\n\n def head(self, *args, **kwargs):\n self.respond(*args, **kwargs)\n\n def respond(self, *args, **kwargs):\n siteInfo = self._siteInformationRepository.get()\n if siteInfo.status == SiteInformation.STATUS_OFFLINE and not self._isSiteMaintenancePage:\n self.response.status = \"307 Temporary Redirect\"\n self.response.headers[\"Location\"] = \"/site-maintenance?%s\" % urllib.urlencode({ \"referrer\": self._path })\n return\n else:\n try:\n pageDict = self.handle(*args)\n except PageNotFoundException as nfe:\n if nfe.correctPath is not None:\n self.response.status = \"301 Moved Permanently\"\n self.response.headers[\"Location\"] = nfe.correctPath\n else:\n ErrorHandlers.generate404(self.request, self.response, 404)\n return\n\n template_values = {\n \"title\": \"Shark Attack Data\",\n \"subtemplate\": self.resolveTemplatePath(\"basepage.html\"),\n \"show_social_media_buttons\": True,\n \"og_image\": \"%s://%s/assets/images/Sharks-1920-1200.jpg\" % (self._urlScheme, self._host),\n \"meta_description\": Constants.SiteDescription,\n \"full_url\": self._fullUrl\n }\n \n for key, value in pageDict.iteritems():\n template_values[key] = value\n\n template = Constants.JINJA_ENVIRONMENT.get_template(self.resolveTemplatePath(self._pageTemplate))\n self.response.write(template.render(template_values))\n\n def getBreadcrumbData(self, node):\n retval = []\n firstRun = True\n site = \"\"\n if self.isGsaf():\n site = \"gsaf\"\n while node is not None:\n if firstRun:\n if self._dataHelper.nodeIsSharkAttack(node):\n retval.append({ \"name\": self._dataHelper.getNodeId(node), \"url\": \"\" })\n else:\n retval.append({ \"name\": self._dataHelper.getNodeName(node), \"url\": \"\" })\n else:\n retval.append({ \"name\": node.name, \"url\": self.helper.getUrlForNode(site, node) })\n firstRun = False\n node = self._dataHelper.getNodeParent(node)\n\n retval.append({ \"name\": \"Countries\", \"url\": \"\" if firstRun else self.helper.getUrlForNode(site, None) })\n\n retval.reverse()\n return retval\n\n def resolveTemplatePath(self, relativePath):\n return self.helper.resolveTemplatePath(relativePath, self.isGsaf())\n\n","sub_path":"basepage.py","file_name":"basepage.py","file_ext":"py","file_size_in_byte":3866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"537620423","text":"\"\"\"\nGiven a 2d grid map of '1's (land) and '0's (water), count the number of islands. \nAn island is surrounded by water and is formed by connecting adjacent lands \nhorizontally or vertically. You may assume all four edges of the grid are all \nsurrounded by water.\n\nExample 1:\n\n11110\n11010\n11000\n00000\nAnswer: 1\n\nExample 2:\n\n11000\n11000\n00100\n00011\nAnswer: 3\n\ntraverse through the whole matrix, if we come across a '1', we do DFS starting from this point, \nand marked it to be visited, we then find another point that is not yet visited and is '1' to do another \nDFS. How many DFS we have done, how many island we have. \n\ntime O(m*n), space O(max(m, n)) 递归栈空间\n\n\"\"\"\n\nclass Solution(object):\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n row = len(grid)\n if row == 0:\n return 0\n col = len(grid[0])\n if col == 0:\n return 0\n res = 0\n\n for i in range(row):\n for j in range(col):\n if grid[i][j] == '1' :\n self.dfs(grid, i, j, row, col)\n res += 1\n return res\n\n def dfs(self, grid, i, j, row, col):\n \n grid[i][j] = 'visited'\n \n if i < row - 1 and grid[i+1][j] == '1' :\n self.dfs(grid, i+1, j, row, col)\n \n if j < col - 1 and grid[i][j+1] == '1' :\n self.dfs(grid, i, j+1, row, col)\n \n if i > 0 and grid[i-1][j] == '1' :\n self.dfs(grid, i-1, j, row, col)\n \n if j > 0 and grid[i][j-1] == '1' :\n self.dfs(grid, i, j-1, row, col)\n","sub_path":"Tree Recur backtracking/200-Number of Islands/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"480311999","text":"\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib import messages\n\nfrom .forms import SignUpForm, saveProfile\nfrom django.contrib.auth.models import User\nfrom .models import userProfile,Channel, Videos, Subscriber, Watch, Likes\nfrom django.utils import timezone\nfrom datetime import datetime\n\n# Create your views here.\ndef index(request):\n\tpost = userProfile.objects.filter(uname=request.user.id)\n\tform = Videos.objects.order_by('-pub_date')\n\tvid=Videos.objects.order_by('-views')\n\tlists=Channel.objects.order_by('-pub_date')\n\treturn render(request, 'index.html',{'post':post, 'form':form,'vid':vid,'lists':lists })\n\ndef getVideos(request, vid):\n\tpost=userProfile.objects.filter(uname=request.user.id)\n\tform=Videos.objects.filter(id=vid)\n\tviewsv=Videos.objects.order_by('-views')\n\n\tfor f in form:\n\t\t\tgets=Channel.objects.filter(name=f.name)\n\n\tif 'Sub'in request.POST or 'Like' in request.POST or 'dislike' in request.POST:\n\t\treturn redirect('login')\n\n\treturn render(request,'videos1.html',{'post':post, 'form':form,'gets':gets,'viewsv':viewsv})\n\ndef videos(request, vid):\n\tpost=userProfile.objects.filter(uname=request.user.id)\n\tu=get_object_or_404(User, username=request.user.username)\n\tform=Videos.objects.filter(id=vid)\n\tviewsv=Videos.objects.order_by('-views')\n\tif request.user.is_authenticated:\n\t\tfor f in form:\n\t\t\tgets=Channel.objects.filter(name=f.name)\n\t\t\tc=Videos.objects.get(id=f.id)\n\t\t\td=f.name\n\t\t\tif f.id==vid and request.user.is_authenticated:\n\t\t\t\tif Watch.objects.filter(viewsid=c):\n\t\t\t\t\tmessages.add_message(request, messages.ERROR, 'You already view')\n\t\t\t\telse:\n\t\t\t\t\tf.views=f.views+1\n\t\t\t\t\tf.save()\n\t\t\t\t\twatchobj=Watch.objects.create(user=u, viewsid=c)\n\n\t\ttitle=Channel.objects.filter(name=d)\n\t\tfor t in title:\n\t\t\tsubs=Videos.objects.filter(name=t.id)\n\t\t\tx=t.id\n\t\t\tif 'Sub' in request.POST:\n\t\t\t\tt.subscribe=t.subscribe+1\n\t\t\t\tt.save()\n\t\t\tif 'unSub' in request.POST:\n\t\t\t\tt.subscribe=t.subscribe-1\n\t\t\t\tt.save()\n\t\tfor f in form:\n\t\t\tc=Videos.objects.get(id=f.id)\n\t\t\tif 'Like' in request.POST:\n\t\t\t\tf.like=f.like+1\n\t\t\t\tf.save()\n\t\t\t\tlikesobj=Likes.objects.create(user=u, likesid=c)\n\n\t\t\tif 'dislike' in request.POST:\n\t\t\t\tf.dislike=f.dislike+1\n\t\t\t\tf.save()\n\n\t\tif request.POST and 'Sub' in request.POST:\n\t\t\tuser=u\n\t\t\tname=Channel.objects.get(name=d)\n\t\t\tif Subscriber.objects.filter(sub_name=x).exists():\n\t\t\t\tmessages.add_message(request, messages.ERROR, 'You have alredy subscribe this channel.')\n\t\t\telse:\n\t\t\t\tsubsobj=Subscriber.objects.create(user=user, sub_name=x)\n\n\t\tif request.POST and 'unSub' in request.POST:\n\t\t\tlists=Subscriber.objects.filter(sub_name=x)\n\t\t\tfor l in lists:\n\t\t\t\tif l.sub_name==x:\n\t\t\t\t\tl.delete()\n\t\ttests=Subscriber.objects.filter(sub_name=x)\n\n\treturn render(request,'videos.html',{'post':post, 'form':form,'gets':gets,'tests':tests,'viewsv':viewsv})\n\ndef getRegister(request):\n\tform=SignUpForm(request.POST or None)\n\tif form.is_valid():\n\t\tinstance=form.save(commit=False)\n\t\tinstance.save()\n\t\tmessages.success(request, 'Registration Successfully Completed!')\n\t\treturn redirect('login')\n\treturn render(request, 'signup.html',{'form':form})\n\ndef getLogin(request):\n\tif request.user.is_authenticated:\n\t\treturn redirect('index')\n\telse:\n\t\tif request.method=='POST':\n\t\t\tuser = request.POST.get('user')\n\t\t\tpassword=request.POST.get('pass1')\n\t\t\tauth=authenticate(request,username=user,password=password)\n\t\t\tif auth is not None:\n\t\t\t\tlogin(request, auth)\n\t\t\t\treturn redirect('index')\n\t\t\telse:\n\t\t\t\tmessages.add_message(request, messages.ERROR, 'Incorrect username or password.')\n\treturn render(request,'login.html')\n\ndef getLogout(request):\n\tlogout(request)\n\treturn render(request,'logout.html')\n\ndef getProfile(request):\n\tnew=userProfile.objects.filter(uname=request.user.id)\n\tu=get_object_or_404(User, username=request.user.username)\n\tform=saveProfile(request.POST or None, request.FILES or None)\n\tif form.is_valid():\n\t\tif userProfile.objects.filter(uname=request.user.id).exists():\n\t\t\tmessages.add_message(request, messages.ERROR, 'You already saved your information. If wants to modify just click update button.')\n\t\telse:\n\t\t\tinstance=form.save(commit=False)\n\t\t\tinstance.uname=u\n\t\t\tinstance.save()\n\t\t\treturn redirect('userprofile')\n\treturn render(request, 'profile.html', { 'form':form,'new':new })\n\ndef setProfile(request):\n\tpost = userProfile.objects.filter(uname=request.user.id)\n\tcontext={'post':post}\n\treturn render(request, 'showProfile.html', context)\n\ndef myChannel(request):\n\tnew=userProfile.objects.filter(uname=request.user.id)\n\tpost=Channel.objects.filter(user=request.user.id)\n\tcontext={'new':new, 'post':post}\n\treturn render(request, 'mychannel.html', context)\n\ndef createChannel(request):\n\tnew=userProfile.objects.filter(uname=request.user.id)\n\tu=get_object_or_404(User, username=request.user.username)\n\tif request.POST:\n\t\tuser=u\n\t\tname=request.POST.get('name')\n\t\ttag=request.POST.get('tags')\n\t\timg=request.FILES.get('file')\n\t\tchannel_obj=Channel.objects.create(user=user,name=name,tag_line=tag, img=img)\n\treturn render(request, 'newchannel.html',{'new':new})\n\ndef setUpload(request):\n\tnew=userProfile.objects.filter(uname=request.user.id)\n\tform=Channel.objects.filter(user=request.user)\n\tfor f in form:\n\t\tcid=f.name\n\tu=get_object_or_404(User, username=request.user.username)\n\tc=Channel.objects.get(name=cid)\n\tif request.POST:\n\t\tuser=u\n\t\tname=c\n\t\ttitle=request.POST.get('name')\n\t\tdesc=request.POST.get('tags')\n\t\tcat=request.POST.get('cat')\n\t\tvideo=request.FILES.get('file')\n\t\timg=request.FILES.get('imgfile')\n\t\tvideo_obj=Videos.objects.create(user=user,name=name,title=title, description=desc, category=cat, video=video, img=img)\n\t\treturn redirect('mychannel')\n\treturn render(request, 'upload.html',{'new':new,'form':form})\n\ndef getChannel(request, cid):\n\tpost=userProfile.objects.filter(uname=request.user.id)\n\ttitle=Channel.objects.filter(user=request.user.id)\n\tform=Videos.objects.filter(name=cid)\n\treturn render(request, 'channel.html', {'post':post, 'form':form,'title':title})\n\ndef showChannel(request, name):\n\tu=get_object_or_404(User, username=request.user.username)\n\tpost=userProfile.objects.filter(uname=request.user.id)\n\ttitle=Channel.objects.filter(name=name)\n\tfor t in title:\n\t\tform=Videos.objects.filter(name=t.id)\n\t\tc=t.id\n\t\tif 'Sub' in request.POST:\n\t\t\tt.subscribe=t.subscribe+1\n\t\t\tt.save()\n\t\tif 'unSub' in request.POST:\n\t\t\tt.subscribe=t.subscribe-1\n\t\t\tt.save()\n\n\tif request.POST and 'Sub' in request.POST:\n\t\tuser=u\n\t\tname=Channel.objects.get( name=name)\n\t\tif Subscriber.objects.filter(sub_name=name).exists():\n\t\t\tmessages.add_message(request, messages.ERROR, 'You have alredy subscribe this channel.')\n\t\telse:\n\t\t\tsubsobj=Subscriber.objects.create(user=user, sub_name=name)\n\n\n\tif request.POST and 'unSub' in request.POST:\n\t\tlists=Subscriber.objects.filter(sub_name=c)\n\t\tfor l in lists:\n\t\t\tif l.sub_name==c:\n\t\t\t\tl.delete()\n\tgets=Subscriber.objects.filter(sub_name=c)\n\tcontext={'post':post,'title':title, 'form':form,'gets':gets}\n\treturn render(request, 'showChannel.html',context)\n\ndef setSubscribe(request):\n\tpost=userProfile.objects.filter(uname=request.user.id)\n\tform=Channel.objects.filter(user=request.user.id)\n\tnew=Subscriber.objects.filter(user=request.user.id)\n\tfor n in new:\n\t\tgets=Channel.objects.filter(name=n.sub_name)\n\tcontext={'post':post,'form':form,'gets':gets}\n\treturn render(request, 'subscribe.html', context)","sub_path":"video/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"168925429","text":"# Generators\nclass Gen:\n def __init__(self, n):\n self.n = n\n self.last = 0\n\n def __next__(self):\n return self.next()\n\n def next(self):\n if self.last == self.n:\n raise StopIteration()\n\n rv = self.last ** 2\n self.last += 1\n return rv\n\n\ng = Gen(100)\n\nwhile True:\n try:\n print(next(g))\n except StopIteration:\n break\n\n\ng = Gen(100000)\n","sub_path":"Expert/generators1.py","file_name":"generators1.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"218462261","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nUtility functions.\n\"\"\"\n\n\ndef _check_columns(df_to_check, cols) -> None:\n \"\"\"Check that a list of required column names is in a data frame\n\n Args:\n df_to_check: A DataFrame to check columns on.\n cols (Iterable[str]): Required columns.\n Returns:\n None\n Raises:\n ValueError: if required cols are not a subset of column names in\n ``df_to_check``.\n Examples:\n >> df = pd.DataFrame({'col_a': [1,2], 'col_b': [2,4]})\n >> check_columns(df, ['col_c'])\n ValueError: Missing columns: `{col_c}`\n \"\"\"\n if isinstance(cols, str):\n cols = [cols]\n\n if not set(cols).issubset(df_to_check.columns):\n missing_cols = set(cols).difference(df_to_check.columns)\n raise ValueError(f\"Missing columns: `{missing_cols}`.\")\n\n return None\n","sub_path":"dpipe/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"414354310","text":"import numpy as np\n\n# sample mean\nsamples = [9, 4, 0, 8, 1]\nprint(np.mean(samples))\n\n# sample mean, standard deviation\nw = [10.7, 11.7, 9.8, 11.4, 10.8, 9.9, 10.1, 8.8, 12.2, 11.0, 11.3,\n 11.1, 10.3, 10.0, 9.9, 11.1, 11.7, 11.5, 9.1, 10.3, 8.6, 12.1,\n 10.0, 13.0, 9.2, 9.8, 9.3, 9.4, 9.6, 9.2]\n\nxbar = np.mean(w)\nsd = np.std(w, ddof=1)\nprint('평균 %.2f, 표준편차 %.2f' %(xbar, sd))\n\n# 신뢰구간 추정\nimport scipy.stats\nalpha = 0.05\nzalpha = scipy.stats.norm.ppf(1-alpha/2)\nprint('zalpha : ', zalpha)\nprint('min : ', xbar - zalpha*sd/np.sqrt(len(w)), 'max : ', xbar + zalpha*sd/np.sqrt(len(w)))\n\nA = [31, 33, 29, 28, 25, 32, 32, 34, 26,\n 30, 29, 29, 32, 26, 27, 27, 25, 26,\n 33, 29, 25, 33, 32, 26, 28, 34, 32,\n 29, 33, 30, 30, 31, 26, 28, 28, 32]\nn = len(A)\nxbar = np.mean(A)\nstd = np.std(w, ddof=1)\nalpha = 0.05\nzalpha = scipy.stats.norm.ppf(1-alpha/2)\ndiff = zalpha * std / np.sqrt(n)\nsection = (xbar - diff, xbar + diff)\nprint('section : ', section)\n\n# 모비율 추정\nx = 48\nn = 150\nphat = x / n\nalpha = 0.05\nzalpha = scipy.stats.norm.ppf(1-alpha/2)\nsd = np.sqrt((phat * (1-phat))/n)\nci = [phat - zalpha*sd, phat + zalpha*sd]\nprint('phat %.3f, zalpha %.3f, sd %.3f' %(phat, zalpha, sd))\nprint(ci)\n\n","sub_path":"1/ai_mathmatics/Statistics/codes/ps06.py","file_name":"ps06.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"587471909","text":"\"\"\"\nCreate iterator from tuple which created using comprehension\nprint values using: next, loop\n\"\"\"\n\nmy_tuple = tuple(i.capitalize() for i in 'HelloWorld!PythonIsGreat!' if i.islower())\n\n\nchars = iter(my_tuple)\nfor elem in chars:\n print(next(chars))\n","sub_path":"PythonFlow_March2018/Homework/galinakorotenko/HW-2/task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"496428240","text":"import pygame\npygame.init()\nfrom Constants import *\n\nclass Wall:\n\n COLOR = (64, 64, 64)\n\n def __init__(self, points, innerPointCount=0):\n if innerPointCount == 0:\n innerPointCount = int((len(points) / 2))\n self.points = points\n self.innerPoints = self.points[0:innerPointCount]\n\n def draw(self, win, x, y, fov):\n deltaX, deltaY = WIDTH / 2 - x, HEIGHT / 2 - y\n points = [(a + deltaX, b + deltaY) for a, b in self.points]\n pygame.draw.polygon(win, self.COLOR, points)\n","sub_path":"Maps/Wall.py","file_name":"Wall.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"636257208","text":"import numpy as np\nfrom datasets.medical_image_utils import read_nii\nfrom datasets.Abdomen import config\nimport os\nimport cv2\nfrom tqdm import tqdm\n\n\ndef exist_required_labels(label_slice):\n if np.sum(label_slice) != 0:\n return True\n return False\n\n\ndef convert_nii2png_with_label(case_name, labels_mapping={1: 1, 6: 2}, save_dir=None):\n img_path = os.path.join(config.RAW_DATA_TRAINING_DIR, 'img', 'img' + case_name)\n label_path = os.path.join(config.RAW_DATA_TRAINING_DIR, 'label',\n 'label' + case_name)\n if not os.path.exists(img_path):\n print(img_path, ' does not exists')\n return [], []\n img = read_nii(img_path)\n label = read_nii(label_path)\n print(np.shape(img), np.shape(label))\n # 窗宽窗位\n window_center = config.get_dataset_config('V1')['window_center']\n window_width = config.get_dataset_config('V1')['window_width']\n window_left = window_center - window_width / 2\n window_right = window_center + window_width / 2\n img[img < window_left] = window_left\n img[img > window_right] = window_right\n\n # label mapping\n zero_matrix = np.zeros_like(label)\n for key in labels_mapping.keys():\n zero_matrix[label == key] = labels_mapping[key]\n label = zero_matrix\n return_imgs = []\n return_labels = []\n for idx in tqdm(range(1, np.shape(img)[-1]-1)):\n label_slice = label[:, :, idx]\n img_slice = img[:, :, idx]\n if exist_required_labels(label_slice):\n if save_dir is not None:\n img_png_path = os.path.join(save_dir, 'PNGs/img',\n case_name + '_' + str(idx) + '.png')\n label_png_path = os.path.join(save_dir, 'PNGs/label',\n case_name + '_' + str(idx) + '.png')\n label_vis_png_path = os.path.join(save_dir, 'PNGs/label_vis',\n case_name + '_' + str(idx) + '.png')\n # print(img_png_path)\n cv2.imwrite(img_png_path, np.asarray(img_slice, np.int))\n cv2.imwrite(label_png_path, np.asarray(label_slice, np.int))\n cv2.imwrite(label_vis_png_path, np.asarray(label_slice * 100, np.int))\n return_imgs.append(img_slice)\n return_labels.append(label_slice)\n return np.asarray(return_imgs, np.int), np.asarray(return_labels, np.int)\n\n\nif __name__ == '__main__':\n img_slices, label_slices = convert_nii2png_with_label('0001.nii', save_dir=config.RAW_DATA_TRAINING_DIR)\n print(np.shape(img_slices), np.shape(label_slices))","sub_path":"datasets/Abdomen/nii2PNG.py","file_name":"nii2PNG.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"216966044","text":"#!/usr/bin/env python3\n\nimport csv\n\nfrom datetime import date, timedelta\n\nSTOCKS_PATH = \"../../saves/stocks\"\nTSV_PATH = \"../../saves/tsv\"\n\nDATE = 0\nOPEN = 1\nHIGH = 2\nLOW = 3\nCLOSE = 4\nADJ_CLOSE = 5\nVOLUME = 6\n\ndef csv_to_list(csvin):\n string = csvin.read() + '\\n'\n string = list(string.split('\\n'))\n for line in range(len(string)):\n string[line] = list(string[line].split(','))\n for i in range(len(string[line])):\n if i == 5:\n string[line][i] = string[line][i + 1]\n string[line].pop(5)\n break\n if string[line][i] == '':\n string[line][i] = 'NULL'\n return(string)\n\ndef list_to_tsv(stock_symbol: str, interval: str):\n stocks_path = f'{STOCKS_PATH}/{stock_symbol}_{interval}.csv'\n tsv_path = f'{TSV_PATH}/{stock_symbol}_{interval}.tsv'\n\n with open (stocks_path, 'r') as csvin, open(tsv_path, 'w') as tsvout:\n tsv_write = csv.writer(tsvout, delimiter='\\t')\n string = csv_to_list(csvin)\n for row in string:\n if row[DATE] == 'NULL' or row[OPEN] == 'NULL':\n continue\n tsv_write.writerow(row)\n csvin.close()\n tsvout.close()\n return (0)\n","sub_path":"predictor/src/backend/list_to_tsv.py","file_name":"list_to_tsv.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"285520509","text":"import xml.etree.ElementTree as ET\r\n\r\ndef _serializeElements(parent, elements, tag, bLoad):\r\n if bLoad :\r\n elements = parent.findall(tag)\r\n else:\r\n parent.extend(elements)\r\n\r\ndef _serializeElement(parent, tag, bLoad):\r\n element = None\r\n if bLoad :\r\n element = parent.find(tag)\r\n else:\r\n element = ET.SubElement(parent, tag)\r\n return element\r\n\r\ndef _serializeAndCheckElement(parent, tag, bLoad):\r\n element = _serializeElement(parent, tag, bLoad)\r\n if element == None and bLoad:\r\n raise RuntimeError(\"Cannot find element \\\"\" + tag + \"\\\" under \\\"\" + parent.tag + \"\\\".\")\r\n return element\r\n\r\ndef __prettifyText(element, indent): \r\n if element.text:\r\n strs = element.text.splitlines()\r\n element.text = indent\r\n for line in strs:\r\n element.text += line + indent\r\n if not len(element):\r\n element.text = element.text[:len(element.text) - 1]\r\n \r\n if len(element):\r\n if not element.text:\r\n element.text = indent\r\n\r\ndef __prettifyElement(element, level=0):\r\n indent = \"\\n\" + level*\"\\t\"\r\n nextIndent = indent + \"\\t\"\r\n \r\n __prettifyText(element, nextIndent)\r\n \r\n if level > 0:\r\n if not element.tail:\r\n element.tail = indent\r\n else:\r\n element.tail += indent\r\n \r\n elementNumber = len(element)\r\n if elementNumber: \r\n index = 0\r\n for child in element:\r\n __prettifyElement(child, level + 1)\r\n index += 1\r\n if index == elementNumber:\r\n child.tail = child.tail[:len(child.tail) - 1]\r\n\r\ndef prettifyTree(tree):\r\n root = tree.getroot()\r\n __prettifyElement(root)\r\n\r\ndef __deprettifyText(element, indent):\r\n if element.text:\r\n if not len(element):\r\n element.text += '\\t'\r\n \r\n indentLength = len(indent)\r\n element.text = element.text[:len(element.text) - indentLength]\r\n if len(element.text) > 0:\r\n element.text = element.text[1:]\r\n \r\n strs = element.text.splitlines()\r\n element.text = \"\"\r\n for line in strs:\r\n element.text += line[indentLength - 1:] + '\\n'\r\n element.text = element.text[:len(element.text) - 1]\r\n \r\ndef __deprettifyElement(element, level=0):\r\n indent = \"\\n\" + level*\"\\t\"\r\n nextIndent = indent + \"\\t\"\r\n indentLength = len(indent)\r\n \r\n elementNumber = len(element)\r\n if elementNumber: \r\n index = 0\r\n for child in element:\r\n __deprettifyElement(child, level + 1)\r\n index += 1\r\n if index == elementNumber:\r\n child.tail += '\\t'\r\n \r\n if level > 0:\r\n indentLength = len(indent)\r\n element.tail += element.tail[:len(element.tail) - indentLength]\r\n \r\n __deprettifyText(element, nextIndent)\r\n\r\ndef deprettifyTree(tree):\r\n root = tree.getroot()\r\n __deprettifyElement(root)","sub_path":"utility/xml_helper.py","file_name":"xml_helper.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"305216122","text":"import tensorflow as tf\n\nfrom .ops import fully_connected, initializers, get_variables, batch_norm\n\nHe_uniform = initializers.variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False)\n\nclass Network(object):\n def __init__(self,\n session,\n input_shape,\n action_size,\n hidden_dims,\n use_batch_norm=True,\n action_merge_layer=-2,\n hidden_activation_fn=tf.nn.relu,\n hidden_weights_initializer=He_uniform,\n hidden_biases_initializer=tf.constant_initializer(0.0),\n output_activation_fn=None,\n output_weights_initializer=tf.random_uniform_initializer(-3e-4,3e-4),\n output_biases_initializer=tf.constant_initializer(0.0),\n name='NAF'):\n self.sess = session\n\n with tf.variable_scope(name):\n x = hidden_layer = tf.placeholder(tf.float32, [None] + list(input_shape), name='observations')\n u = tf.placeholder(tf.float32, [None, action_size], name='actions')\n is_train = tf.placeholder(tf.bool, name='is_train')\n\n n_layers = len(hidden_dims) + 1\n if n_layers > 1:\n action_merge_layer = \\\n (action_merge_layer % n_layers + n_layers) % n_layers\n else:\n action_merge_layer = 1\n\n hidden_layer = batch_norm(x, is_train)\n\n for idx, hidden_dim in enumerate(hidden_dims):\n # if batch_norm is used, apply activation_fn after batch norm,\n # and remove biases which is redundant\n hidden_layer = fully_connected(\n hidden_layer,\n num_outputs=hidden_dim,\n activation_fn=hidden_activation_fn if not use_batch_norm else None,\n weights_initializer=hidden_weights_initializer,\n biases_initializer=hidden_biases_initializer,\n scope='hid%d' % idx,\n )\n\n if use_batch_norm and idx != action_merge_layer:\n hidden_layer = hidden_activation_fn(batch_norm(hidden_layer, is_train))\n\n def make_output(layer, num_outputs, activation_fn=None, scope='out'):\n return fully_connected(\n layer,\n num_outputs=num_outputs,\n activation_fn=output_activation_fn if activation_fn == None else activation_fn,\n weights_initializer=output_weights_initializer,\n biases_initializer=output_biases_initializer,\n scope=scope,\n )\n\n with tf.variable_scope('advantage'):\n l = make_output(hidden_layer, (action_size * (action_size + 1))/2, scope='l')\n mu = make_output(hidden_layer, action_size, scope='mu')\n\n columns = []\n for idx in xrange(action_size):\n column = tf.pad(tf.slice(l, (0, 0), (-1, action_size - idx)), ((0, 0), (idx, 0)))\n columns.append(column)\n\n L = tf.pack(columns, axis=1)\n P = tf.batch_matmul(L, tf.transpose(L, (0, 2, 1)))\n\n tmp = tf.expand_dims(u - mu, 2)\n A = -tf.batch_matmul(tf.transpose(tmp, [0, 2, 1]), tf.batch_matmul(P, tmp))/2\n A = tf.reshape(A, [-1, 1])\n\n with tf.variable_scope('value'):\n V = make_output(hidden_layer, 1, scope='V')\n\n Q = A + V\n\n with tf.variable_scope('optimizer'):\n target_Q = tf.placeholder(tf.float32, [None], name='target_Q')\n loss = tf.reduce_mean(tf.square(target_Q - Q), name='loss')\n\n self.x = x\n self.u = u\n self.loss = loss\n self.target_Q = target_Q\n self.is_train = is_train\n\n self.V, self.L, self.P, self.mu, self.A, self.Q = V, L, P, mu, A, Q\n self.variables = get_variables(name)\n\n def predict(self, state):\n return self.sess.run(self.mu, {self.x: state, self.is_train: False})\n\n def make_copy_from(self, network):\n self.assign_op = {}\n\n for from_, to_ in zip(network.variables, self.variables):\n self.assign_op[to_.name] = to_.assign(from_)\n\n def update_from(self, network):\n for variable in self.variables:\n self.sess.run(self.assign_op[variable.name])\n","sub_path":"src/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":3972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"102457886","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Filename: mysqlhelper\n# @Date : 2017-02-23 08:46:47\n# @Author : ZhengZhe\n# @Blog : http://www.osgnu.com\n\nimport MySQLdb\nimport conf\n\nclass MysqlHelper(object):\n def __init__(self):\n self.__conn_dict = conf.conn_dict\n\n def Get_Dict(self,sql):\n conn = MySQLdb.connect(**self.__conn_dict)\n cur = conn.cursor(cursorclass=MySQLdb.cursors.DictCursor)\n\n recount = cur.execute(sql)\n data = cur.fetchall()\n\n list1 = []\n list2 = []\n for i in data:\n list1.append(i.get('words'))\n list2.append(i.get('reply'))\n d1 = dict(zip(list1,list2))\n return d1\n\n\n #cur.close()\n #conn.close()\n #return data\n#d = MysqlHelper()\n#b = d.Get_Dict('select words,reply from words')\n#print b\n\n","sub_path":"homework/robot/mysqlhelper.py","file_name":"mysqlhelper.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"143187709","text":"from unittest import TestCase\n\nfrom dask.datasets import timeseries\n\nfrom dask_sql import Context\n\n\nclass TimeSeriesTestCase(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.c = Context()\n\n df = timeseries(freq=\"1d\").persist()\n cls.c.register_dask_table(df, \"timeseries\")\n\n def test_complex_query(self):\n result = self.c.sql(\n \"\"\"\n SELECT\n lhs.name,\n lhs.id,\n lhs.x\n FROM\n timeseries AS lhs\n JOIN\n (\n SELECT\n name AS max_name,\n MAX(x) AS max_x\n FROM timeseries\n GROUP BY name\n ) AS rhs\n ON\n lhs.name = rhs.max_name AND\n lhs.x = rhs.max_x\n \"\"\"\n )\n\n # should not fail\n df = result.compute()\n\n self.assertGreater(len(df), 0)\n","sub_path":"tests/integration/test_complex.py","file_name":"test_complex.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"653247692","text":"import unittest\ntry:\n from unittest import mock\nexcept ImportError:\n import mock\nfrom binstar_client.utils.test.utils import example_path\nfrom binstar_client.utils.projects.filters import (VCSFilter,\n LargeFilesFilter,\n ProjectIgnoreFilter,\n FilesFilter,\n ignore_patterns,\n remove_comments)\n\n\nclass IgnorePatternsTestCase(unittest.TestCase):\n def test_ignore_patterns(self):\n patterns = ignore_patterns(example_path('bokeh-apps/weather'))\n self.assertListEqual(patterns, ['*.rb', '*.pyc', '__pycache__'])\n\n\nclass RemoveCommentsTestCase(unittest.TestCase):\n def test_commented_lines(self):\n c1 = \"# Ingored line\"\n c2 = \"*.pyc # python files\"\n c3 = \"__pycache__\"\n c4 = \"\"\n\n self.assertEqual(\n remove_comments(c1),\n \"\"\n )\n self.assertEqual(\n remove_comments(c2),\n \"*.pyc\"\n )\n self.assertEqual(\n remove_comments(c3),\n \"__pycache__\"\n )\n self.assertEqual(\n remove_comments(c4),\n \"\"\n )\n\n\nclass LargeFilesFilterTestCase(unittest.TestCase):\n def test_valid_file(self):\n pfile = mock.MagicMock(size=100)\n self.assertTrue(LargeFilesFilter([]).run(pfile))\n\n def test_invalid_file(self):\n pfile = mock.MagicMock(size=3097152)\n self.assertFalse(LargeFilesFilter([]).run(pfile))\n\n\nclass FilesFilterTestCase(unittest.TestCase):\n def test_valid_file(self):\n pfile = mock.MagicMock(relativepath='other-file')\n self.assertTrue(FilesFilter([]).run(pfile))\n\n def test_invalid_file(self):\n pfile = mock.MagicMock(relativepath='.anaconda/project-local.yml')\n self.assertFalse(FilesFilter([]).run(pfile))\n\n def test_invalid_file_yaml(self):\n pfile = mock.MagicMock(relativepath='.anaconda/project-local.yaml')\n self.assertFalse(FilesFilter([]).run(pfile))\n\n\nclass VCSFilterTestCase(unittest.TestCase):\n def test_can_test(self):\n assert VCSFilter([]).can_filter()\n\n def test_git_files(self):\n pfile = mock.MagicMock(relativepath=\".git/hooks/pre-applypatch\", fullpath=\"\")\n self.assertFalse(VCSFilter([]).run(pfile))\n\n def test_svn_files(self):\n pfile = mock.MagicMock(relativepath=\".svn/hooks/pre-applypatch\", fullpath=\"\")\n self.assertFalse(VCSFilter([]).run(pfile))\n\n def test_valid_files(self):\n pfile = mock.MagicMock(relativepath=\"folder/hooks/applypatch\", fullpath=\"\")\n self.assertTrue(VCSFilter([]).run(pfile))\n\n\nclass ProjectIgnoreFilterTestCase(unittest.TestCase):\n def test_can_filter(self):\n example_path('bokeh-apps/weather')\n self.assertTrue(\n ProjectIgnoreFilter([], basepath=example_path('bokeh-apps/weather'))\n )\n\n def test_cant_filter(self):\n example_path('bokeh-apps/timeout.py')\n self.assertTrue(\n ProjectIgnoreFilter([], basepath=example_path('bokeh-apps/weather'))\n )\n\n def test_run(self):\n pfile1 = mock.MagicMock(relativepath=\"dir/file.rb\")\n pfile2 = mock.MagicMock(relativepath=\"dir/file.py\")\n\n f = ProjectIgnoreFilter([], basepath=example_path('bokeh-apps/weather'))\n\n self.assertFalse(f.run(pfile1))\n self.assertTrue(f.run(pfile2))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"binstar_client/utils/projects/tests/test_filters.py","file_name":"test_filters.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"132008359","text":"import logging\nimport socket\n\nimport pytest\nfrom aiohttp import ClientSession\nfrom yarl import URL\n\nfrom server.__main__ import parser\nfrom server.http_server import HTTPServer\n\nlog = logging.getLogger(__name__)\n\n\n@pytest.fixture\ndef services(arguments, rest_service):\n return [\n rest_service,\n ]\n\n\n@pytest.fixture(scope=\"session\")\ndef localhost():\n params = (\n (socket.AF_INET, \"127.0.0.1\"),\n (socket.AF_INET6, \"::1\"),\n )\n for family, addr in params:\n with socket.socket(family, socket.SOCK_STREAM) as sock:\n try:\n sock.bind((addr, 0))\n except Exception:\n pass\n else:\n return addr\n raise RuntimeError(\"localhost unavailable\")\n\n\n@pytest.fixture\ndef services(rest_service):\n return [\n rest_service,\n ]\n\n\n@pytest.fixture\ndef arguments(localhost, rest_port):\n return parser.parse_args(\n [\n \"--log-level=debug\",\n f\"--api-address={localhost}\",\n f\"--api-port={rest_port}\",\n ]\n )\n\n\n@pytest.fixture\ndef rest_port(aiomisc_unused_port_factory) -> int:\n return aiomisc_unused_port_factory()\n\n\n@pytest.fixture\ndef rest_url(localhost, rest_port):\n return URL(f\"http://{localhost}:{rest_port}\")\n\n\n@pytest.fixture\nasync def rest_service(arguments):\n return HTTPServer(\n address=arguments.api_address,\n port=arguments.api_port,\n )\n\n\nasync def test_app(rest_url):\n async with ClientSession() as session:\n resp = await session.get(rest_url)\n assert resp.status == 200\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"243218795","text":"import numpy as np\nimport datetime as dt\nimport sqlalchemy\nfrom sqlalchemy import create_engine, func\nimport pandas as pd\nfrom flask import Flask, jsonify, render_template, url_for\nimport json\n\n\n#################################################\n# Database Setup\n#################################################\nengine = create_engine(\"sqlite:///data/ironman.sqlite\")\n\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n\n#################################################\n# Flask Routes\n#################################################\n@app.route(\"/\")\ndef home():\n return render_template('index.html')\n\n@app.route(\"/about\")\ndef about():\n return render_template('about.html')\n\n# this is how we send data to javascript\n@app.route(\"/api/race_stats\")\ndef race_stats_api():\n # connect to our database\n conn = engine.connect()\n # return query results\n return pd.read_sql(\"select BIB, Last_Name, First_Name, Country, Gender, Division, Swim, Bike, Run,\\\n Overall, Division_Rank, Gender_Rank, Overall_Rank, Latitude_average, Longitude_average\\\n FROM race_stats Order by Division\", conn).to_json(orient='records')\n\n# display with time on y or normalize to who comleted \n# divide everything by the fastest time ever\n\n# create route for bar chart data\n@app.route(\"/api/bar_chart/\")\ndef bar_chart_api(selectedItem):\n\n # create gender variable for filtering\n genderLetter = 'M'\n if selectedItem.startswith('F'):\n genderLetter = 'F'\n \n # connect to our database\n conn = engine.connect()\n # return query results\n query = pd.read_sql(f\"select Division, avg(Swim) as Swim, avg(Bike) as Bike, avg(Run) as Run FROM race_stats WHERE Division LIKE '{genderLetter}%' GROUP BY Division ORDER BY Division\", conn).to_json(orient='records')\n\n return(query)\n\n@app.route(\"/api/bar_chart/initial\")\ndef init_bar_chart():\n\n # connect to our database\n conn = engine.connect()\n # return query results\n return pd.read_sql(\"select Division, avg(Swim) as Swim, avg(Bike) as Bike, avg(Run) as Run FROM race_stats WHERE Division LIKE 'F%' GROUP BY Division ORDER BY Division\", conn).to_json(orient='records')\n \n# create route for top ten table\n@app.route(\"/api/top_ten_table/\")\ndef table_api(selectedItem):\n # connect to our database\n conn = engine.connect()\n # return query results\n return pd.read_sql(\"SELECT Division_Rank, Division, First_Name, Last_Name, Country, Gender, Overall, Overall_Rank FROM race_stats WHERE Division = '%s' AND Division_Rank < 11 ORDER BY Division_Rank\" %selectedItem, conn).to_json(orient='records')\n \n@app.route(\"/api/world_map/\")\ndef map_api(selectedItem):\n # connect to our database\n conn = engine.connect()\n # return query results\n return pd.read_sql(\"SELECT BIB, Last_Name, First_Name, Division, Country, Latitude_average, Longitude_average FROM race_stats WHERE Division = '%s'\" %selectedItem, conn).to_json(orient='records')\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"628118330","text":"from pymongo import MongoClient\nfrom datetime import datetime, timedelta\nimport time\nimport re\nfrom copy import deepcopy\n\nstartTime = time.time()\n\nclient = MongoClient()\ndb = client.Vizurbi\n\n# On commence par récupérer toutes les données de la journée\nstops = list(db.Daily_Stops_Schedules.find())\n# On instancie deux dictionnaires, un qu'on stockera, l'autre qu'on remettre dans daily\ntoDump = deepcopy(stops)\ntoSave = deepcopy(toDump)\n# Ensuite on trier les temps de départs\nfor stop in range(0, len(stops)):\n trips = stops[stop]['departures']\n for trip in range(0, len(trips)):\n # Appuyons nous sur les compréhensions de listes pythonesques pour aller très vite\n departures = [re.search('(\\d{4})-(\\d{2})-(\\d{2}) (\\d{2}):(\\d{2}):(\\d{2})', departure)\n for departure in trips[trip]['times']]\n dates = [datetime(int(date.group(1)), int(date.group(2)),\n int(date.group(3)), int(date.group(4)),\n int(date.group(5)), int(date.group(6))) for date in departures]\n # On filtre les horaires par rapport à maintenant 23 heures avant\n before = datetime.now() - timedelta(hours = 23)\n # On insère dans les dictionaires correspondants\n toSave[stop]['departures'][trip]['times'] = [str(date) for date in dates if date > before]\n toDump[stop]['departures'][trip]['times'] = [str(date) for date in dates if date < before]\n \n# On garde les données fraiches\ndss = db.Daily_Stops_Schedules\nfor stop in toSave:\n dss.save({'_id' : stop['_id'], 'departures' : stop['departures']})\n \n# On archive les vieilles données\nass = db.Alltime_Stops_Schedules\ntoday = datetime.now().date()\ndays = ('monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday')\nid = str(today.year) + '/' + str('%02d'%today.month) + '/' + str('%02d'%today.day)\nfor dump in toDump:\n ass.update({'_id' : id}, {'$set' : {str(dump['_id']) : dump}}, True)\n\nprint(str(time.time() - startTime) + \" seconds\")\n","sub_path":"Insertions/Schedules_Transfer.py","file_name":"Schedules_Transfer.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"113823198","text":"import pandas as pd\r\n#1\r\n#This files purpose is to reclassify diseases as a more broad category.\r\n#This is done by iterating over the dataframe, and checking if the 6th item in each row is within the predefined list\r\n#Each predefined list is manually created and curated by user.\r\n#Each item in each predefined list should be spelled exactly how it is in FAERS \"pt\"\r\n#End of script, a new array is created from the original, but only keeping records that list one of the new groups just created\r\n# ^ basically everything is stripped except for the adverse events of interest\r\n\r\n\r\n#To increase total data size/more rows, increase the size of groups by adding in more criteria (like expanding cardiac to be more broad by including additional cardiac AE's)\r\n\r\n\r\n\r\ndef check(los):\r\n\tif los[6] in cardiac:\r\n\t\tlos[6]='Cardiac'\r\n\treturn los\r\n\r\n'''def combo_pt(sub_df):\r\n\tif len(sub_df[(sub_df['pt']=='Seizure') | (sub_df['pt']=='Muscle rigidity')])>2:\r\n\t\treturn sub_df.replace({'Seizure':'Serotonin syndrome','Muscle rigidity':'Serotonin syndrome'})\r\n\tif len(sub_df[(sub_df['pt']=='Seizure') | (sub_df['pt']=='Muscle spasticity')])>2:\r\n\t\treturn sub_df.replace({'Seizure':'Serotonin syndrome','Muscle spasticity':'Serotonin syndrome'})\r\n\telse:\r\n\t\treturn sub_df\r\n'''\r\n\r\ndf=pd.read_csv(r'faers_base.csv',low_memory=False)\r\n#dfd=pd.DataFrame(columns='primaryid\tcaseid\tage\tsex\toccr_country\tprod_ai\tpt\tdrugname'.split('\\t'))\r\n\r\n#in order to begin grouping diseases into categories, build your lists of disease.\r\n#Note: The must be present in your raw data frame from FAERS.\r\n#Note: Functionality for disease combos is in combo_pt (there are no cases so far with both)\r\n\r\ncardiac=['Acute cardiac event',\r\n 'Cardiac arrest','Sudden death','Cardiac death',\r\n 'Arrhythmia','Cardiac arrest','Cardiac arrest neonatal','Sudden cardiac death']\r\n#ss=['Serotonin syndrome','Muscle rigidity']\r\ndeath=['Sudden death','Sudden infant death syndrome','Accidental death',\r\n 'Brain death','Premature baby death','Foetal death','Apparent death',\r\n 'Death neonatal']\r\nprint('beginning reclassifciation')\r\ncases=list(set(df['caseid']))\r\n\r\n'''\r\nfor i in cases:\r\n\tdfd=pd.concat([dfd,combo_pt(df[df['caseid']==i])])'''\r\n\t\r\nprint('done reclassifying SS')\r\n#df=dfd\r\n\r\n#turn into an array\r\narr=[list(i) for i in df.values]\r\nbefore=[i[6] for i in arr]\r\n\r\nfor i in arr:\r\n if i[6] in cardiac:\r\n i[6]='Cardiac'\r\n# if i[6] in ss:\r\n# i[6]='Serotonin syndrome'\r\n if i[6] in death:\r\n i[6]='Death'\r\n \r\nafter=[i[6] for i in arr] \r\nprint('Cardiac: (before: %d) | (after: %d)'%(before.count('Cardiac'),after.count('Cardiac')))\r\n#print('Serotonin syndrome: (before: %d) | (after: %d)'%(before.count('Serotonin syndrome'),after.count('Serotonin syndrome')))\r\nprint('Death: (before: %d) | (after: %d)'%(before.count('Death'),after.count('Death')))\r\n\r\ndf1=pd.DataFrame(arr,columns=['primaryid', 'caseid', 'age', 'sex', 'occr_country','prod_ai', 'pt', 'drugname'])\r\n\r\ndf1.to_csv('faers_ALL_ae_grouping_altered_cardiac.csv')\r\n\r\n#this is done to trim the data down to increase speed and efficiency of entire system and SHOULD NOT BE USED\r\n#FOR ANYTHING ELSE EXCEPT FOR CREATING AE FREQUENCY MATRIX\r\nfinal_df=df1[(df1['pt']=='Cardiac') | (df1['pt']=='Death')]\r\nfinal_df.to_csv('faers_ae_grouping_altered_cardiac.csv')\r\n","sub_path":"faers_grouping_AE_1.py","file_name":"faers_grouping_AE_1.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"60651305","text":"# %load q03_stop_word_stemmer/build.py\n# Default imports\nfrom greyatomlib.nlp_day_01_project.q01_load_data.build import q01_load_data\nfrom greyatomlib.nlp_day_01_project.q02_tokenize.build import q02_tokenize\nfrom nltk.corpus import stopwords\nimport pandas as pd\nstop = set(stopwords.words('english'))\nfrom nltk.stem.porter import PorterStemmer\n\n\npath = 'data/20news-bydate-train/'\n# Your solution here:\ndef q03_stop_word_stemmer(path):\n X_train = q02_tokenize(path)\n stop_words=set(stopwords.words('english'))\n porter_stemmer = PorterStemmer()\n \n i=0\n for row in X_train:\n sentence=[]\n for word in row:\n if word not in stop_words:\n porter_stemmer.stem(word)\n sentence.append(word)\n X_train.iloc[i]=sentence\n i+=1\n return list(X_train)\n\n\n","sub_path":"q03_stop_word_stemmer/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"244728755","text":"# -*- coding: utf-8 -*-\n\nimport scrapy\n\nfrom luntan.items import LuntanItem\n\n\nclass TianyaSpider(scrapy.Spider):\n name = \"tianya\"\n allowed_domains = [\"bbs.tianya.cn\"]\n start_urls = ['http://bbs.tianya.cn/list-1179-1.shtml',\n 'http://bbs.tianya.cn/list.jsp?item=1179&nextid=1556938543000']\n\n def parse(self, response):\n item = LuntanItem()\n ns = {\"re\": \"http://exslt.org/regular-expressions\"}\n for col in response.xpath('//*[@id=\"main\"]/div[7]/table/tbody/tr'):\n #xpath抓取的值含有/r/n/t,用normalize-space()去掉\n item['title'] = col.xpath('normalize-space(td[1]/a/text())').extract()\n item['author'] = col.xpath('normalize-space(td[2]/a/text())').extract()\n item['click_num'] = col.xpath('td[3]/text()').extract()\n item['response_num'] = col.xpath('td[4]/text()').extract()\n item['response_date'] = col.xpath('td[5]/text()').extract()\n yield item\n next = response.xpath('//*[@id=\"main\"]/div[8]/div/a[3]/@href').extract_first()\n if next is not None:\n url = response.urljoin(next)\n yield scrapy.Request(url,callback = self.parse)\n","sub_path":"luntan/luntan/spiders/tianya.py","file_name":"tianya.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"523092343","text":"\"\"\"allocation\n\nRevision ID: 17a6c2222c43\nRevises: \nCreate Date: 2016-08-17 12:29:30.588186\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = \"17a6c2222c43\"\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.sql import column, func, table\n\n\ndef upgrade():\n op.create_table(\"allocation\",\n sa.Column(\"id\", sa.Integer, primary_key=True),\n sa.Column(\"resource_id\", sa.String(50),\n nullable=False, index=True),\n sa.Column(\"status_id\", sa.Integer,\n nullable=False),\n sa.Column(\"start_time\", sa.DateTime(),\n nullable=False),\n sa.Column(\"end_time\", sa.DateTime(),\n nullable=False),\n sa.Column(\"user_id\", sa.Integer, nullable=True,\n index=True),\n sa.Column(\"created\", sa.DateTime, nullable=True,\n server_default=func.current_timestamp()),\n sa.Column(\"updated\", sa.DateTime, nullable=True,\n server_default=func.current_timestamp())\n )\n\n op.create_table(\"status\",\n sa.Column(\"id\", sa.Integer, primary_key=True),\n sa.Column(\"name\", sa.String(50), nullable=False),\n sa.Column(\"created\", sa.DateTime, nullable=True,\n server_default=func.current_timestamp()),\n sa.Column(\"updated\", sa.DateTime, nullable=True,\n server_default=func.current_timestamp())\n )\n\n op.create_foreign_key(\"fk_allocation_status\", \"allocation\",\n \"status\", [\"status_id\"], [\"id\"],\n ondelete=\"CASCADE\")\n\n op.create_unique_constraint(\"ux_allocation\",\n \"allocation\",\n [\"resource_id\", \"user_id\",\n \"start_time\", \"end_time\",\n \"status_id\"])\n\n\ndef downgrade():\n op.drop_constraint(\"fk_allocation_status\", \"allocation\",\n type_=\"foreignkey\")\n op.drop_constraint(\"ux_allocation\",\n \"allocation\", type_=\"unique\")\n op.drop_table(\"allocation\")\n op.drop_table(\"status\")\n","sub_path":"resource/migrations/versions/17a6c2222c43_allocation.py","file_name":"17a6c2222c43_allocation.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"493246073","text":"'''\nWrite fortran code from a list of expressions\n'''\n\n__all__=['code_func']\n\nfrom ..basics import *\n\nclass CodedFunction:\n def __init__(self,name,inargs,outargs):\n self.name=name\n self.inargs=inargs\n self.outargs=outargs\n\n def pretty(self):\n s='--%s--\\n'%self.name\n s+='IN: %s\\n'%(', '.join([str(x) for x in self.inargs]))\n for argname in self.inargs:\n if isinstance(self.inargs[argname],np.ndarray):\n V=sy.IndexedBase(argname)\n choices=[r_[0:x] for x in self.inargs[argname].shape]\n for argidx in cartesian_product(*choices):\n s+=sy.pretty(sy.Eq(self.inargs[argname][tuple(argidx)],V[tuple(argidx)]))\n s+=\"\\n\"\n else:\n pass\n #V=sy.Symbol(argname)\n #s+=sy.pretty(sy.Eq(V,self.inargs[argname]))\n #s+=\"\\n\"\n s+='\\n'\n\n s+='OUT: %s \\n'%(', '.join([str(x) for x in self.outargs]))\n for argname in self.outargs:\n if isinstance(self.outargs[argname],np.ndarray):\n V=sy.IndexedBase(argname)\n choices=[r_[0:x] for x in self.outargs[argname].shape]\n for argidx in cartesian_product(*choices):\n s+=sy.pretty(sy.Eq(V[tuple(argidx)],self.outargs[argname][tuple(argidx)]))\n s+=\"\\n\"\n else:\n V=sy.Symbol(argname)\n s+=sy.pretty(sy.Eq(V,self.outargs[argname]))\n s+=\"\\n\"\n\n return s\n\ndef code_func(exprs_list,args_list,fn=None,nm=None,cse=True,user_functions={}):\n '''\n args_list may be either\n - a tuple (in which case it is assumed that all variables are real(8)s treated separately)\n - an odictionary of the form {'name': ('real(8)',[variable1,variable2]), 'name2': ...}\n\n exprs may be either\n - a tuple (in which case it is assumed that all out-expressions are lumped into an array\n of type real(8)\n - a odictinoary of the form {'name': (('real(8),'inout','addin'),[expr1,expr2]), 'name2':...} as before\n\n '''\n\n #########################3\n # format exprs_list\n if isinstance(exprs_list,tuple):\n exprs_old=exprs_list\n exprs_list=collections.OrderedDict()\n exprs_list['rez']=(('real(8)','out'),list(exprs_old))\n assert isinstance(exprs_list,collections.OrderedDict),\"exprs must be either tuple or ordered dictionary\"\n\n # replace oneseys\n for x in exprs_list:\n try:\n vecsize=len(exprs_list[x][1])\n except TypeError:\n exprs_list[x]=(exprs_list[x][0],[exprs_list[x][1]])\n\n #########################3\n # format args_list\n if isinstance(args_list,tuple):\n args_list_old=args_list\n args_list=collections.OrderedDict()\n for x in args_list_old:\n args_list[str(x)]=('real(8)',[x])\n assert isinstance(args_list,collections.OrderedDict),\"args_list must be either tuple or ordered dictionary\"\n\n # replace oneseys \n for x in args_list:\n try:\n vecsize=len(args_list[x][1])\n except TypeError:\n args_list[x]=(args_list[x][0],[args_list[x][1]])\n\n for x in args_list:\n if len(args_list[x][1])==1:\n assert x==str(args_list[x][1][0]), \"You want to call %s %s?\"%(str(args_list[x][1][0]),x)\n\n ######################### \n # format args, exprs, replace pi, max\n\n args=[]\n for x in args_list:\n args.extend(args_list[x][1])\n exprs=[]\n\n backhash={}\n for x in exprs_list:\n backhash[x]=slice(len(exprs),len(exprs)+len(exprs_list[x][1]))\n exprs.extend(exprs_list[x][1])\n for i in range(len(exprs)):\n exprs[i]=exprs[i].subs(sy.pi,np.pi)\n exprs[i]=exprs[i].subs(sy.Integer(0),sy.Float(0))\n\n ###############################\n # count atoms\n atms=set()\n for ex in exprs:\n atms=atms.union(ex.atoms(sy.Symbol))\n assert set(args).issuperset(atms),str(str(args)+str(atms)+\"you are MISSING\"+str(set(atms).difference(set(args))))\n\n ###############################\n # optionally run CSE\n if cse:\n replacements,new_exprs=sy.cse(exprs,optimizations='basic')\n #replacements,new_exprs=sy.cse(exprs)\n helper_dict=dict(replacements)\n all_helpers=[x for (x,y) in replacements]\n exprs=new_exprs\n\n allnames=''.join([str(x) for x in args])\n for x in all_helpers:\n if str(x) in allnames:\n raise Exception(\"You named something the same as I wanted to name my helper.\")\n else:\n helper_dict={}\n all_helpers=[]\n\n ###############################\n # construct argin_string\n argin_string=''.join([' %s,&\\n'%x for x in args_list])\n argout_string=''.join([' %s,&\\n'%x for x in exprs_list])\n argout_string=argout_string[:-3]\n\n\n ###############################\n ###############################\n ###############################\n ###############################\n # writing code\n s='\\n\\n! %s {{{1\\n'%nm\n\n ###############################\n # construct defines\n for x in args_list:\n if len(args_list[x][1])==1:\n pass\n #s+='#define %s %s\\n'%(args_list[x][1][0],x)\n else:\n for i,v in enumerate(args_list[x][1]):\n s+='#define %s %s(%d)\\n'%(str(v),x,i+1)\n\n\n ###############################\n # begin outer template\n s+=r'''\n\nsubroutine %s(%s %s)\n use iso_c_binding\n implicit none'''%(nm,\"&\\n\"+argin_string,argout_string)+'\\n\\n'\n\n ###############################\n # specify output variables\n for x in exprs_list:\n #if len(exprs_list[x][1])==1:\n # s+=' %s,intent(%s) :: %s\\n'%(exprs_list[x][0][0],exprs_list[x][0][1],x)\n #else:\n s+=' %s,intent(%s) :: %s(%d)\\n'%(exprs_list[x][0][0],exprs_list[x][0][1],\n x,len(exprs_list[x][1]))\n\n s+='\\n\\n'\n\n ###############################\n # specify input variables\n for x in args_list:\n if len(args_list[x][1])==1:\n s+=' %s,intent(in) :: %s\\n'%(args_list[x][0],x)\n else:\n s+=' %s,intent(in) :: %s(%d)\\n'%(args_list[x][0],x,len(args_list[x][1]))\n\n s+='\\n\\n'\n\n ###############################\n # specify helpers\n for h in all_helpers:\n s+=' real(kind=8) :: %s\\n'%str(h)\n\n s+='\\n\\n'\n\n ###############################\n # execute helpers\n for h in all_helpers:\n s+=' %s=%s\\n'%(str(h),sy.fcode(helper_dict[h],source_format='free',user_functions=user_functions).strip())\n\n\n s+='\\n\\n'\n\n ###############################\n # execute expressions\n for outname in exprs_list:\n dct={}\n dct['outname']=outname\n descrip=exprs_list[outname][0]\n for i,expr in enumerate(exprs[backhash[outname]]):\n dct['i']=i+1\n dct['val']=sy.fcode(expr.evalf(),source_format='free',user_functions=user_functions).strip()\n #if len(exprs[backhash[outname]])==1:\n # dct['outname2']='%(outname)s'%dct\n #else:\n dct['outname2']='%(outname)s(%(i)d)'%dct\n\n if len(descrip)>2 and descrip[2]=='addin':\n if expr!=0:\n s+=' %(outname2)s=%(outname2)s+(%(val)s)\\n'%dct\n else:\n s+=' %(outname2)s=%(val)s\\n'%dct\n\n assert s.count('zoo')==0\n\n s+='end subroutine\\n\\n'\n\n ###############################\n # construct undefines\n for x in args_list:\n if len(args_list[x][1])==1:\n pass\n else:\n for i,v in enumerate(args_list[x][1]):\n s+='#undef %s\\n'%(str(v))\n\n ###############################\n # save to file\n if not (fn is None):\n assert nm is not None\n\n f=open(fn+'/%s.f95'%nm,'w')\n f.write(s)\n f.close()\n\n return s\n\n","sub_path":"msee_cmcv/batteries/symbolics/coding.py","file_name":"coding.py","file_ext":"py","file_size_in_byte":7897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"398767656","text":"import re, time\n\nt_start = time.time()\nfile = open('plr01.txt','r')\ncontent = file.read()\nfile.close()\n\n\nresultd,result,date = {},[],[]\nb = re.compile('([^<]+v.[^<]+)[^\\[<]*\\[\\n[^<]+\\s+\\n()\\][^(]+\\([^)]+\\s+([1-2][0-9][0-9][0-9])\\)', re.MULTILINE)\n#rep = {\"condition1\": \"\", \"condition2\": \"text\"}\n#b = re.compile('([^<]* v. [^<]*)[^\\s]*\\s+(\\[\\s+[^\\]]+\\])', re.MULTILINE)\n#([^<]* v. [^<]*)[^\\s]*\\s+(\\[[^\\]]+\\])\n#(([|\\[)[^\\[]*(]|\\]))\n#a = re.compile('([^<]* v. [^<]*)')\nresult1 = b.findall(content)\n#DB = dict(result1)\nfor each in result1:\n if int(each[2])>=1995:\n date.append(each)\nfor every in date:\n new = every[:2]\n a = new[0].replace(\"\\n\",\" \").replace('','').replace('','')\n b = new[1].replace(\"\\n\",\"\").replace(\"[<\",\"[ <\").replace(' LINK-STATUS=\"ACTIVE\"',\"\").replace(' EFFECT=\"DEFAULT\"',\"\").replace(' RELATE=\"NO\"',\"\")\n #a = re.compile('\\s\\s+',\" \")\n #b = re.compile('\\s\\s+',\" \")\n result.append((a,b))\n\nresultd = dict(result)\nprint(resultd, \" \\nin \", time.time() - t_start,\"s\")\n\n'''with open(\"PLR01.txt\", \"w\") as myfile:\n for key in resultd:\n myfile.write(key + \"\\t\"+ (resultd[key]) + \"\\n\")\n myfile.close()\n \nwith open(\"test.txt\", \"w\") as myfile:\n for key in resultd:\n myfile.write(resultd[key] + \"\\n\")\n myfile.close()\n'''\n#print(resultd['Southard v. Texas Board of Criminal Justice'], \" \\nin \", time.time() - t_start,\"s\")\n","sub_path":"test/PLR01.py","file_name":"PLR01.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"180177104","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n'''\n@AUTHOR:Joselyn\n@CONTACT:zhaojing17@foxmail.com\n@SOFTWERE:PyCharm\n@FILE:main2.py\n@TIME:2019/4/27 22:06\n@DES:\n'''\n\nimport cv2\nfrom matplotlib import pyplot as plt\nimg_name = 'fff.png'\nimg_name2 = 'fff1.png'\n\nimport numpy as np\nif __name__ == '__main__':\n img = cv2.imread(img_name)\n cv2.imshow('RGB', img)\n k = cv2.waitKey(0) # waitkey代表读取键盘的输入,括号里的数字代表等待多长时间,单位ms。 0代表一直等待\n if k ==27: # 键盘上Esc键的键值\n cv2.destroyAllWindows()\n\n b = img[:, :, 0]\n g = img[:, :, 1]\n r = img[:, :, 2]\n # r, g, b = cv2.split(img)\n img = cv2.merge([g, r, b])\n cv2.imshow('BRG', img)\n k = cv2.waitKey(0) # waitkey代表读取键盘的输入,括号里的数字代表等待多长时间,单位ms。 0代表一直等待\n if k == 27: # 键盘上Esc键的键值\n cv2.destroyAllWindows()\n\n school_number = 18023032\n x1 = 18\n y1 = 2\n x2 = 30 + 18\n y2 = 32 + 2\n img[x1:x2 + 1, y1,0] = 0\n img[x1:x2 + 1, y1,1] = 0\n img[x1:x2 + 1, y1,2] = 255\n img[x1:x2 + 1, y2,0] = 0\n img[x1:x2 + 1, y2,1] = 0\n img[x1:x2 + 1, y2,2] = 255\n img[x1, y1:y2 + 1,0] = 0\n img[x1, y1:y2 + 1,1] = 0\n img[x1, y1:y2 + 1,2] = 255\n img[x2, y1:y2 + 1,0] = 0\n img[x2, y1:y2 + 1,1] = 0\n img[x2, y1:y2 + 1,2] = 255\n\n # img[x1:x2 + 1, y1] = (0, 0, 255)\n # img[x1:x2 + 1, y2] = (0, 0, 255)\n # img[x1, y1:y2 + 1] = (0, 0, 255)\n # img[x2, y1:y2 + 1] = (0, 0, 255)\n cv2.imwrite(img_name2, img)\n img2 = cv2.imread(img_name2)\n cv2.imshow('BRG-1', img2)\n k = cv2.waitKey(0) # waitkey代表读取键盘的输入,括号里的数字代表等待多长时间,单位ms。 0代表一直等待\n if k == 27: # 键盘上Esc键的键值\n cv2.destroyAllWindows()\n\n","sub_path":"DL_1/main3.py","file_name":"main3.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"17189783","text":"# Lint as: python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for official.nlp.tasks.masked_lm.\"\"\"\n\nimport tensorflow as tf\n\nfrom official.nlp.configs import bert\nfrom official.nlp.configs import encoders\nfrom official.nlp.data import pretrain_dataloader\nfrom official.nlp.tasks import masked_lm\n\n\nclass MLMTaskTest(tf.test.TestCase):\n\n def test_task(self):\n config = masked_lm.MaskedLMConfig(\n init_checkpoint=self.get_temp_dir(),\n scale_loss=True,\n model=bert.PretrainerConfig(\n encoder=encoders.EncoderConfig(\n bert=encoders.BertEncoderConfig(vocab_size=30522,\n num_layers=1)),\n cls_heads=[\n bert.ClsHeadConfig(\n inner_dim=10, num_classes=2, name=\"next_sentence\")\n ]),\n train_data=pretrain_dataloader.BertPretrainDataConfig(\n input_path=\"dummy\",\n max_predictions_per_seq=20,\n seq_length=128,\n global_batch_size=1))\n task = masked_lm.MaskedLMTask(config)\n model = task.build_model()\n metrics = task.build_metrics()\n dataset = task.build_inputs(config.train_data)\n\n iterator = iter(dataset)\n optimizer = tf.keras.optimizers.SGD(lr=0.1)\n task.train_step(next(iterator), model, optimizer, metrics=metrics)\n task.validation_step(next(iterator), model, metrics=metrics)\n\n # Saves a checkpoint.\n ckpt = tf.train.Checkpoint(model=model, **model.checkpoint_items)\n ckpt.save(config.init_checkpoint)\n task.initialize(model)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n","sub_path":"models/official/nlp/tasks/masked_lm_test.py","file_name":"masked_lm_test.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"325849200","text":"# -*- coding:utf-8 -*-\n\nimport math\n\nimport tornado\nfrom .base import BaseHandler\n\nimport utils\nimport pages\n\n\nclass AdminUserHandler(BaseHandler):\n @tornado.web.authenticated\n def get(self):\n user_id = utils.get_current_user_id(self.current_user)\n page = self.get_argument(\"page\", \"\") or \"1\"\n\n page = int(page)\n row = 10\n\n conn = utils.create_connection()\n try:\n current_user = pages.get_user_info(conn, user_id)\n\n if not utils.check_is_admin(current_user.get(\"email\")):\n self.redirect(\"/500?error=没有权限\")\n return\n\n users = pages.get_mysite_users(conn, page, row)\n total = pages.get_mysite_users_total(conn).get(\"total\")\n\n max_page = math.ceil(total / row)\n page_info = {\"page\": page, \"max_page\": max_page}\n\n self.render(\n \"admin/user.html\",\n current_user=current_user,\n poster=None,\n users=users,\n page_info=page_info)\n except Exception as e:\n errcode, errmsg = utils.get_err_info(e.args)\n self.redirect(\"/500?error=%s\" % errmsg)\n finally:\n conn.close()\n\n\ndefault_handlers = [\n (r\"/admin/user\", AdminUserHandler),\n]\n","sub_path":"site/handlers/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"633585350","text":"# 几个人\ncandidate_count = 6\nimport random\n# [start, end]\ndef lucky(start, end, count, seed):\n rank = []\n for i in range(start, end+1):\n rank.append(i)\n random.seed(seed)\n random.shuffle(rank)\n return random.sample(rank, count)\n\n# 记录中奖的\nlucky_rank = []\n# 随机种子每个人告诉我一个数字 我累加起来然后随机\n# \nseed = 12+520+97+5+121+17\n\n# 只抽3个\nlucky_rank = lucky(1, candidate_count, 3, seed)\n\nlucky_rank.sort()\nprint (lucky_rank)","sub_path":"python/work/rand.py","file_name":"rand.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"247575433","text":"from pydantic import BaseModel, Field\n\nfrom zmanim_bot.config import GEO_API_URL\nfrom zmanim_bot.misc import bot\n\n\nclass ReverseGeocodingApiResponse(BaseModel):\n latitude: float\n longitude: float\n lookup_source: str = Field(alias='lookupSource')\n plus_code: str = Field(alias='pluseCode')\n\n\nasync def get_location_name(lat: float, lng: float, locality: str) -> str:\n params = {\n 'latitude': lat,\n 'longitude': lng,\n 'localityLanguage': locality\n }\n async with bot.session.get(GEO_API_URL, params=params) as resp:\n raw_resp: dict = await resp.json()\n city = raw_resp.get('city')\n locality = raw_resp.get('locality')\n\n if city and locality and city != locality:\n name = f'{city}, {locality}'[:30]\n elif city or locality:\n name = (city or locality)[:30]\n else:\n name = f'{lat:.3f}, {lng:.3f}'\n return name\n\n\n","sub_path":"zmanim_bot/integrations/geo_client.py","file_name":"geo_client.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"309580704","text":"#!/usr/bin/python3\n\"\"\" Script that creates the State “California” with the City\n“San Francisco” from the database hbtn_0e_100_usa\n\"\"\"\n\nfrom sys import argv\nfrom relationship_state import State, Base\nfrom relationship_city import City\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker, relationship, state\n\nif __name__ == \"__main__\":\n engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format(\n argv[1], argv[2], argv[3]), pool_pre_ping=True)\n\n Base.metadata.create_all(engine)\n Session = sessionmaker(engine)\n session = Session()\n\n new_s = State(name='California')\n new_c = City(name='San Francisco', state=new_s)\n session.add(new_s)\n session.add(new_c)\n session.commit()\n session.close()\n","sub_path":"0x0F-python-object_relational_mapping/100-relationship_states_cities.py","file_name":"100-relationship_states_cities.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"375404163","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\nfrom src import settings\n\nengine = create_engine(settings.DB_URL, convert_unicode=True)\ndb = scoped_session(sessionmaker(autocommit=False,\n autoflush=False,\n bind=engine))\n\nBase = declarative_base()\nBase.query = db.query_property()\n\n\ndef init_db():\n # import all modules here that might define models so that\n # they will be registered properly on the metadata. Otherwise\n # you will have to import them first before calling init_db()\n from src.models import API_ACCOUNT\n Base.metadata.drop_all(bind=engine)\n Base.metadata.create_all(bind=engine)\n\n # Create the fixtures\n mup_group = API_ACCOUNT(name='COINBASE', api_key=settings.COINBASE_API_KEY)\n db.add(mup_group)\n\n db.commit()\n","sub_path":"src/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"468468960","text":"# import enfloat\n# from entierFloatException import EntierFloatException as enReelException\n\n#Exception lancée lors d'une tentative d'affectation de valeur float a une variable int\nclass EntierFloat(Exception):\n def __init__(self, message, value):\n self.message = message\n self.value = value\n\n def __str__(self):\n return (repr(self.value))\n\nclass AST:\n identifiant = 0\n def __init__(self, type, value):\n self.type = type\n self.value = value\n self.sons = []\n\n def __str__(self):\n return '%s%s' % (self.value, self.sons)\n\n def __repr__(self):\n return '%s%s' % (self.value, self.sons)\n\n def e_toAsm(self):\n if self.type == 'NUMBER':\n return \"mov eax, %s\\n\" % self.value\n elif self.type == 'ID':\n return \"mov eax, [%s]\\n\" % self.value\n elif self.type == 'OPBIN':\n op1 = self.sons[0].e_toAsm()\n op2 = self.sons[1].e_toAsm()\n if self.value == '+':\n res = \"%s\\npush eax\\n%s\\npop ebx\\nadd eax, ebx\\n\" % (op1, op2)\n else: # suppose que l'op est un -\n res = \"%s\\npush eax\\n%s\\npop ebx\\nsub eax, ebx\\n\" % (op2, op1)\n return res\n\n def c_toAsm(self):\n if self.value == 'asgnt':\n return '%s\\nmov [%s], eax\\n' % (self.sons[1].e_toAsm(), self.sons[0])\n elif self.value == 'seq':\n return '%s%s' % (self.sons[0].c_toAsm(), self.sons[1].c_toAsm())\n else:\n AST.identifiant = AST.identifiant + 1\n id = AST.identifiant\n return '''debutboucle%s:\n%s\ncmp eax, 0\njz finboucle%s\n%s\njmp debutboucle%s\nfinboucle%s:\n''' % (id, self.sons[0].e_toAsm(), id, self.sons[1].c_toAsm(), id, id)\n\n def pvars(self):\n vars = set()\n if self.type == 'prog':\n vars.update(self.sons[3].pvars())\n vars.update(self.sons[4].pvars())\n return vars\n elif self.type == 'commande':\n if self.value == 'asgnt':\n vars.add(self.sons[0]+';'+str(self.sons[2]))\n vars.update(self.sons[1].pvars())\n return vars\n else:\n vars.update(self.sons[0].pvars())\n vars.update(self.sons[1].pvars())\n return vars\n else:\n if self.type == 'OPBIN':\n vars.update(self.sons[0].pvars())\n vars.update(self.sons[1].pvars())\n return vars\n elif self.type == 'NUMBER':\n return vars\n else:\n vars.add(self.value+';'+str(self.sons[0]))\n return vars\n\n def init_var_int(self, var, i):\n return '''mov ebx, [eax + %s]\npush eax\npush ebx\ncall atoi\nadd esp, 4\nmov [%s], eax\npop eax\n''' % (str(4*(i+1)), var)\n\n def init_var_float(self, var, i):\n return '''mov ebx, [eax + %s]\npush eax\npush ebx\ncall atof\nfadd esp, 4\nmov [%s], eax\npop eax\n''' % (str(4*(i+1)), var)\n\n def com2List(self):\n vars = []\n if self.type == 'commande':\n if self.value == 'asgnt':\n vars.append(list(self.sons[0]))\n vars.append(self.sons[1].exp2List())\n return vars\n elif self.value == 'while':\n vars.append(self.sons[0].exp2List())\n vars.append(self.sons[1].com2List())\n return vars\n elif self.value == 'seq':\n vars.append(self.sons[0].com2List())\n vars.append(self.sons[1].com2List())\n return vars\n\n\n def exp2List(self):\n var_exp = set()\n if self.type == 'ID':\n var_exp.update([self.value, self.sons[0]])\n elif self.type == 'OPBIN':\n var_exp.update(self.sons[0].exp2List())\n var_exp.update(self.sons[1].exp2List())\n return var_exp\n\n def est_division_par_zero_exp(self):\n if self.type == 'ID':\n return False\n elif self.type == 'NUMBER':\n return False\n elif self.type == 'OPBIN':\n print(\"Opérateur = %s - Type = %s\" % (self.value, type(self.value)))\n if self.value == '/' :\n print(\"Division présente\")\n if (isinstance(self.sons[1], float) and float(self.sons[1]) == float(0)) or \\\n (isinstance(self.sons[1], int) and int(self.sons[1]) == int(0)):\n print('Erreur : tentative de division par \\'zéro\\': ligne %s' %\n (self.sons[2]))\n return True\n else:\n return self.sons[1].est_division_par_zero_exp()\n\n def est_division_par_zero_com(self):\n if self.type == 'commande':\n if self.value == 'asgnt':\n return False\n elif self.value == 'seq':\n return self.sons[0].est_division_par_zero_com() or self.sons[1].est_division_par_zero_com()\n elif self.value == 'while':\n return self.sons[0].est_division_par_zero_exp() or self.sons[1].est_division_par_zero_com()\n\n # Fonction transformant une declaration en liste\n # : typeVariable, nomVariable, ligneTypeVariable, ligneNomVariable\n def dec2List(self):\n if self.type == 'declaration':\n return [self.value, self.sons[0], self.sons[1], self.sons[2]]\n\n # Variables passees en parametre a la fonction main\n def vars_main(self):\n var_params_main = []\n if self.type == 'prog':\n if len(self.sons) != 0:\n for paramItem in self.sons[1]:\n var_params_main.append(paramItem.dec2List())\n return var_params_main\n\n #Variables declarees dans le programme main\n def vars_decl(self):\n var_declarees = []\n if self.type == 'prog':\n if len(self.sons) != 0:\n for declItem in self.sons[2]:\n var_declarees.append(declItem.dec2List())\n return var_declarees + self.vars_main()\n\n\n def epurerListeVarsDecl (self, monSet):\n monSetPerfect = []\n corr = []\n monSet = list(monSet)\n for i in range(len(monSet)):\n mesTokens1 = str.split(monSet[i], ';')\n lines = set()\n trouve = 0\n for j in range(i + 1, len(monSet)):\n mesTokens2 = str.split(monSet[j], ';')\n if mesTokens1[0] == mesTokens2[0]:\n trouve = 1\n lines.update(mesTokens1[1])\n lines.update(mesTokens2[1])\n corr.append(j)\n if trouve == 1:\n lines = list(lines)\n lines.sort()\n lignes = lines[0]\n for item in lines:\n if item != lines[0]:\n lignes = lignes + ', ' + item\n monSet[i] = mesTokens1[0] + ';' + lignes\n\n for i in range(len(monSet)):\n if not corr.__contains__(i):\n monSetPerfect.append(monSet[i])\n return monSetPerfect\n\n def verifier_variables(self):\n erreur_trouvee = False\n var_util = self.epurerListeVarsDecl(self.pvars())\n var_params = self.vars_main()\n var_decl = self.vars_decl()\n\n #Verification de variables utilisees dans le corps du programme declarees au prealable\n for var_utilisee in var_util:\n declaree = 0\n infos_var_utilisee = str.split(var_utilisee,';')\n for var_declaree in var_decl:\n if infos_var_utilisee[0] == var_declaree[1]:\n declaree = 1\n if declaree == 0:\n print('Erreur : variable \\'%s\\' non déclarée : ligne(s) %s' %\n (infos_var_utilisee[0], infos_var_utilisee[1]))\n erreur_trouvee = True\n\n #Verification de la declaration des variables passees en paramètre dans le main\n # var_params = self.vars_main()\n # for param in var_params:\n # declaree = 0\n # for var_declaree in var_decl:\n # if param[1] == var_declaree[1]:\n # declaree = 1\n # if param[0] == var_declaree[0]:\n # declaree = 2\n # if declaree == 1:\n # print('Erreur : variable \\'%s\\' déclarée (ligne %s) et utilisée en paramètre (ligne %s) '\n # 'dans le main mais avec des types différents'%\n # (param[1], var_declaree[3], param[3]))\n # erreur_trouvee = True\n # if declaree == 0:\n # print('Erreur : variable \\'%s\\' non déclarée et utilisée en paramètre dans le main'\n # ' : ligne(s) %s' %\n # (param[1], param[3]))\n # erreur_trouvee = True\n\n #Verification de non redondance des variables passees en paramètre dans le main\n for i in range(len(var_params)):\n for j in range(i + 1, len(var_params)):\n if var_params[i][1] == var_params[j][1]:\n print('Erreur : repassage en paramètre au main de la variable \\'%s\\' '\n '(ligne %s) déjà utilisée (ligne %s)' %\n ((var_params[j][1]), var_params[j][3], var_params[i][3]))\n erreur_trouvee = True\n\n # Verification de non redondante dans les declarations des variables\n for i in range(len(var_decl)):\n for j in range(i + 1, len(var_decl)):\n if var_decl[i][1] == var_decl[j][1]:\n print('Erreur : redéclaration de la variable \\'%s\\' (ligne %s) déjà déclarée (ligne %s)' %\n ((var_decl[j][1]), var_decl[j][3], var_decl[i][3]))\n erreur_trouvee = True\n return erreur_trouvee\n\n def verifier_valeur_retour(self):\n erreur_trouvee = False\n if self.type == 'prog':\n if self.sons[0][0] == 'int' and self.sons[5].value == 'float':\n print('Erreur : valeur de attendue attendue de type \\'%s\\' (ligne %s) et '\n 'valeur de retour trouvée de type \\'%s\\' (ligne %s)' %\n (self.sons[0][0], self.sons[0][1], self.sons[5].value, self.sons[5].sons[1]))\n erreur_trouvee = True\n return erreur_trouvee\n\n def verifier_division(self):\n if self.type == 'prog':\n un = self.sons[4].est_division_par_zero_exp()\n deux = self.sons[3].est_division_par_zero_com()\n return (un or deux)\n\n #Fonction pour retrouver le type d'une variable a partir de son nom\n def trouverType(self, maVar, var_declarees):\n trouve = 0\n for item in var_declarees:\n if item[1] == maVar:\n return item[0]\n return []\n\n def type_operandes_expression(self, var_declarees):\n if self.type == 'ID':\n return [self.trouverType(self.value, var_declarees)]\n elif self.type == 'NUMBER':\n if isinstance(self.value, int):\n return ['int']\n else:\n return ['float']\n else:\n lop = self.sons[0].type_operandes_expression(var_declarees)\n rop = self.sons[1].type_operandes_expression(var_declarees)\n if lop[0] != rop[0]:\n return ['float']\n else:\n return lop\n\n def type_operandes_commande(self, var_declarees):\n if self.type == 'commande':\n if self.value == 'asgnt':\n lop = self.trouverType(self.sons[0], var_declarees)\n rop = self.sons[1].type_operandes_expression(var_declarees)\n if lop == 'int' and rop[0] == 'float':\n print('Erreur : tentative d\\'affectation de valeur '\n 'de type \\'float\\' à '\n 'une variable (\\'%s\\') de type \\'int\\' : ligne %s' %\n (self.sons[0], self.sons[2]))\n raise EntierFloat('DIVISION PAR ZERO','Erreur : tentative d\\'affectation de valeur '\n 'de type \\'float\\' à '\n 'une variable (\\'%s\\') de type \\'int\\' : ligne %s' %\n (self.sons[0], self.sons[2]))\n else:\n return [lop, rop[0]]\n elif self.value == 'seq':\n return [self.sons[0].type_operandes_commande(var_declarees),\n self.sons[1].type_operandes_commande(var_declarees)]\n elif self.value == 'while':\n return [self.sons[0].type_operandes_expression(var_declarees),\n self.sons[1].type_operandes_commande(var_declarees)]\n\n\n def verifier_typage_commandes(self, com, var_declarees):\n typesOperandes = []\n typesOperandes = com.type_operandes_commande(var_declarees)\n\n def verifier_typage_operations(self, expr, var_declarees):\n typesOperandes = []\n typesOperandes = expr.type_operandes_expression(var_declarees)\n\n\n def verifier_operations_main(self):\n if self.type == 'prog':\n var_declarees = []\n var_declarees = self.vars_decl()\n self.verifier_typage_operations(self.sons[4], var_declarees)\n self.verifier_typage_commandes(self.sons[3], var_declarees)\n\n def init_vars(self, moule, vars_decl_int, vars_decl_float, vars_params):\n moule = moule.replace('LEN_INPUT_INT',str(1+len(vars_decl_int)))\n init_var_int = []\n for i in range(len(vars_decl_int)):\n for j in range(len(vars_params)):\n if vars_decl_int[i] != vars_params[i][1]:\n init_var_int.append(self.init_var_int(vars_decl_int[i],i))\n else:\n init_var_int.append(self.init_var_int_main(vars_decl_int[i], i, valeur))\n\n moule = moule.replace('VAR_INIT_INT', '\\n'.join(init_var_int))\n\n moule = moule.replace('LEN_INPUT_FLOAT', str(1 + len(vars_decl_float)))\n init_var_float = [self.init_var(vars_decl_float[i], i) for i in range(len(vars_decl_float))]\n moule = moule.replace('VAR_INIT_FLOAT', '\\n'.join(init_var_float))\n return moule\n\n def p_toAsm(self):\n f = open('moule.asm')\n moule = f.read()\n vars_declarees_int = []\n vars_declarees_float = []\n vars_declarees_intASM = set()\n vars_declarees_floatASM = set()\n vars_declarees = self.vars_decl()\n for item in vars_declarees:\n if item[0] == 'int':\n vars_declarees_int.append(item[1])\n elif item[0] == 'float':\n vars_declarees_float.append(item[1])\n vars_declarees_intASM = {'%s: dd 0' % v for v in vars_declarees_int}\n vars_declarees_floatASM = {'%s: dd 0' % v for v in vars_declarees_float}\n var_decl_int = '\\n'.join(vars_declarees_intASM)\n var_decl_float = '\\n'.join(vars_declarees_floatASM)\n moule = moule.replace('VAR_DECL_INT', var_decl_int)\n moule = moule.replace('VAR_DECL_FLOAT', var_decl_float)\n moule = self.init_vars(moule, vars_declarees_int, vars_declarees_float, self.vars_main())\n moule = moule.replace('COMMAND_EXEC', self.sons[3].c_toAsm())\n moule = moule.replace('EVAL_OUTPUT', self.sons[4].e_toAsm())\n return moule\n\n def analyses (self):\n if self.type == 'prog':\n un = self.verifier_variables()\n # deux = self.verifier_division()\n self.verifier_operations_main()\n trois = self.verifier_valeur_retour()\n if un is False and trois is False:\n print(self.p_toAsm())","sub_path":"ast.py","file_name":"ast.py","file_ext":"py","file_size_in_byte":15711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"639965362","text":"import advent\n\n\nclass ContainerSolution(advent.Solution):\n def __init__(self):\n super(ContainerSolution, self).__init__(year=2019, day=4)\n\n @staticmethod\n def meets_criteria(value, limit=True):\n # It is a six-digit number.\n if not (10**5 <= value < 10**6):\n return False\n\n # The value is within the range given in the puzzle input.\n # (already satisified in this call)\n\n # Two adjacent digits are the same.\n # Going from left to right the digits never decrease.\n digits = list(map(int, str(value)))\n\n # Keep track of our adjacency values, the first index is the last digit\n # seen, the second is the current run length, and the last is if we've\n # already seen a valid adjacency group.\n adjacency = [digits[0], 1, False]\n for digit in digits[1:]:\n # First check to ensure our digit increases.\n if digit < adjacency[0]:\n return False\n\n # Second, check for adjacency.\n if adjacency[0] == digit:\n adjacency[1] += 1\n if not limit:\n adjacency[2] = True\n\n elif limit:\n if adjacency[1] == 2:\n adjacency[2] = True\n adjacency[1] = 1\n\n # Save the last digit.\n adjacency[0] = digit\n\n # If we made it to the end without failing, check our adjacency value.\n return (adjacency[2] or adjacency[1] == 2)\n\n def run(self, name=\"input\"):\n print(self.meets_criteria(112233, limit=True))\n print(self.meets_criteria(123444, limit=True))\n print(self.meets_criteria(111122, limit=True))\n\n # Read in our limits from our input.\n limits = tuple(map(int, self.get_input_lines(name=name)))\n\n # Create a generator for the values we need to check.\n values = range(limits[0], limits[1] + 1)\n\n # Calculate the number of values that meet our criteria.\n count = len(list(\n filter(lambda x: self.meets_criteria(x, limit=False), values)\n ))\n self.report_answer(\"A\", count)\n\n # Calculate the number of values that meet the criteria for the second\n # part.\n count = len(list(\n filter(lambda x: self.meets_criteria(x, limit=True), values)\n ))\n self.report_answer(\"B\", count)\n\n\nif __name__ == \"__main__\":\n ContainerSolution().run()\n","sub_path":"2019/04/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"393568915","text":"# coding: UTF-8\nimport re\nimport time\nimport requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\ndef load(url):\n res = requests.get(url)\n #HTTPリクエストが失敗したステータスコードを返した場合、HTTPErrorを送出\n res.raise_for_status()\n #レスポンスボディをテキスト形式で入手\n return res.text\n\n#htmlタグの取得\ndef get_tag(html, find_tag):\n soup = BeautifulSoup(str(html), 'html.parser')\n tag = soup.find(find_tag)\n return tag\n\n#htmlタグの取得\ndef get_tags(html, find_tag):\n soup = BeautifulSoup(str(html), 'html.parser')\n tag = soup.find_all(find_tag)\n return tag\n\n#htmlのid取得\ndef get_id(html, find_id):\n soup = BeautifulSoup(str(html), 'html.parser')\n html_id = soup.select(find_id)\n return html_id\n\n#プログラムで扱えるデータ構造に変換\ndef parse(html):\n soup = BeautifulSoup(html, 'html.parser')\n #htmlタグの削除\n simple_row = soup.getText()\n simple_row = simple_row.replace(' ', '') \n return simple_row\n\ndef parse_lyric(html):\n soup = BeautifulSoup(html, 'html.parser')\n #htmlタグの削除\n simple_row = soup.get_text(separator=\" \").strip()\n simple_row = simple_row.replace(' ', ' ')\n\n return simple_row\n\n#それぞれ歌の情報の取得\ndef get_info(url):\n base_url = 'https://www.uta-net.com/'\n html = load(url)\n #曲ごとのurlを格納\n song_url = []\n #歌を格納\n song_info = []\n songs_info=[]\n\n #曲のurlを取得\n #tdのurlを格納\n for td in get_tags(html, 'td'):\n #a要素の取得\n for a in get_tags(td, 'a'):\n #href属性にsongを含むか否か\n if 'song' in a.get ('href'):\n #urlを配列に追加\n song_url.append(base_url + a.get('href'))\n\n #曲の情報の取得\n for i, page in enumerate(song_url):\n print('{}曲目:{}'.format(i + 1, page))\n html = load(page)\n song_info = []\n\n #Song\n for tag in get_tag(html, 'h2'):\n #id検索を行うため、一度strにキャスト\n tag = str(tag)\n simple_row = parse(tag)\n song_info.append(simple_row) \n\n #Artist\n for tag in get_tags(html, 'h3'):\n tag = str(tag)\n if r'itemprop=\"byArtist name\"' in tag:\n simple_row = parse(tag)\n song_info.append(simple_row)\n\n #Lyricist\n for tag in get_tags(html, 'a'):\n tag = str(tag)\n if r'itemprop=\"lyricist\"' in tag:\n simple_row = parse(tag)\n song_info.append(simple_row)\n\n #Composer\n for tag in get_tags(html, 'a'):\n tag = str(tag)\n if r'itemprop=\"composer\"' in tag:\n simple_row = parse(tag)\n song_info.append(simple_row)\n\n #Lyric\n for id_ in get_id(html, '#kashi_area'):\n id_ = str(id_)\n if r'id=\"kashi_area\"' in id_:\n simple_row = parse_lyric(id_)\n song_info.append(simple_row)\n songs_info.append(song_info)\n\n #1秒待機(サーバの負荷を軽減)\n time.sleep(1)\n break\n\n return songs_info\n\ndef create_df(file_name, url):\n # データフレームを作成\n #df = pd.DataFrame('Song_Title', 'Artist', 'Lyricist', 'Composer', 'Lyric')\n df = pd.DataFrame(get_info(url))\n df = df.rename(columns={0:'Song_Title', 1:'Artist', 2:'Lyricist', 3:'Composer', 4:'Lyric'})\n # CSVファイル出力\n csv = df.to_csv(\"csv/{}.csv\".format(file_name))\n return csv\n\nfile_name = 'sample'\nurl = 'https://www.uta-net.com/artist/684/'\ncreate_df(file_name, url)","sub_path":"scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":3758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"45109090","text":"#!/usr/bin/env python3\nfrom multiprocessing import Process, Queue\nimport sys\nMAX = 5000\ndelta = [(-1, 0), (0, -1), (1, 0), (0, 1)]\n\ndef check (c, x, y):\n cnt = 0\n for dx, dy in delta:\n cx, cy = x + dx, y + dy\n if (cx >= 1 and cx <= MAX) and (cy >= 1 and cy <= MAX):\n if (cx, cy) in c:\n cnt = cnt + 1\n return True if cnt > 1 else False\n\ndef advance (c, a):\n n = set()\n for x, y in a:\n for dx, dy in delta:\n cx, cy = x + dx, y + dy\n if (cx >= 1 and cx <= MAX) and (cy >= 1 and cy <= MAX):\n if (cx, cy) not in c:\n if check (c, cx, cy):\n n.add((cx, cy))\n return n\n\ndef runOneIteration(coord, idx, q):\n tick = 0\n adv = set(coord)\n while 1:\n adv = advance (coord, adv)\n if len(adv) == 0: break;\n tick = tick + 1\n coord.update(adv)\n q.put({idx:tick})\n\ndef main (fn):\n ifile = open(fn, \"r\")\n ofile = open(fn.replace(\"in\", \"out\"), \"w\")\n\n T = int(ifile.readline())\n\n procs = []\n outputs = [Queue()] * T\n coords = []\n for t in range(T):\n coord = set()\n for i in range(int(ifile.readline())):\n x, y = map(int, ifile.readline().split())\n if x <= MAX and y <= MAX:\n coord.add((x, y))\n coords.append(coord)\n\n idx = 0\n for c in coords:\n proc = Process(target=runOneIteration, args=(c, idx, outputs[idx]))\n procs.append(proc)\n proc.start()\n idx = idx + 1\n\n for p in procs:\n p.join()\n\n out = {}\n for o in outputs:\n out.update(o.get());\n\n for k in out:\n ofile.write (str(out[k]) + \"\\n\")\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print((\"Usage : %s input.in\")%(sys.argv[0]))\n else:\n main (sys.argv[1])\n","sub_path":"CodeJam/2017/Problem_1/solve_set2.py","file_name":"solve_set2.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"351525994","text":"import json\nimport math\nimport os\nimport pprint\nfrom time import sleep\n\nimport matplotlib.pyplot as plt\nimport requests\nfrom dotenv import load_dotenv\n\nfrom constants import INPUT_IMAGE_FOLDER\n\nload_dotenv(verbose=True)\nAPI_KEY = os.environ.get(\"API_KEY\")\nLOGIN_URL = \"https://api.wrnch.ai/v1/login\"\nJOBS_URL = \"https://api.wrnch.ai/v1/jobs\"\n\n\ndef get_job_response(get_job_url, fetch_try=1, wait_for=0.50):\n sleep(wait_for)\n resp_get_job = requests.get(\n get_job_url, headers={\"Authorization\": f\"Bearer {JWT_TOKEN}\"}\n )\n # print('Status code:', resp_get_job.status_code)\n # print('\\nResponse:')\n pprint.pprint(resp_get_job.text)\n fetch_try = fetch_try + 1\n if resp_get_job.status_code != 200:\n print(f\"{fetch_try} wait for fetching the result\")\n return get_job_response(get_job_url, fetch_try=fetch_try, wait_for=0.10)\n cloud_pose_estimation = json.loads(resp_get_job.text)\n return cloud_pose_estimation\n\n\ndef get_joint_coordinates(body_joints_coordinates, body_joint_position):\n coordinates = [\n body_joints_coordinates[2 * body_joint_position],\n body_joints_coordinates[2 * body_joint_position + 1],\n ]\n # Converting the sign of y coordinate as positive y coordinate from wrnch means y coordinate going down\n return coordinates\n\n\ndef get_distance(coordinates_1, coordinates_2):\n distance = math.sqrt(\n (coordinates_2[0] - coordinates_1[0]) ** 2\n + (coordinates_2[1] - coordinates_1[1]) ** 2\n )\n return distance\n\n\ndef plot_lines(right_wrist_co, left_wrist_co, right_shoulder_co, left_shoulder_co):\n plt.plot(\n [right_wrist_co[0], left_wrist_co[0]],\n [right_wrist_co[1], left_wrist_co[1]],\n \"r-\",\n )\n # plt.annotate('local max', xy=(1, 0), xytext=(1, 0.5),\n # arrowprops=dict(facecolor='black', shrink=0.05),\n # )\n plt.plot(\n [right_shoulder_co[0], left_shoulder_co[0]],\n [right_shoulder_co[1], left_shoulder_co[1]],\n \"b-\",\n )\n plt.axis([-1, 1, -1, 1])\n\n plt.show()\n\n\nresp_auth = requests.post(LOGIN_URL, data={\"api_key\": API_KEY})\n# # print(resp_auth.text)\n# the jwt token is valid for an hour\nJWT_TOKEN = json.loads(resp_auth.text)[\"access_token\"]\n\n\ndef get_wrnch_data(local_image_path=None, image_byte_stream=None):\n # Open the file as a byte stream\n # Send a post request with authentification and the file\n if local_image_path:\n if os.path.isfile(local_image_path):\n with open(local_image_path, \"rb\") as f:\n resp_sub_job = requests.post(\n JOBS_URL,\n headers={\"Authorization\": f\"Bearer {JWT_TOKEN}\"},\n files={\"media\": f},\n data={\"work_type\": \"json\"},\n )\n else:\n raise Exception(f\"Path={local_image_path} doesnot exist\")\n elif image_byte_stream:\n resp_sub_job = requests.post(\n JOBS_URL,\n headers={\"Authorization\": f\"Bearer {JWT_TOKEN}\"},\n files={\"media\": image_byte_stream},\n data={\"work_type\": \"json\"},\n )\n else:\n raise Exception(\"No Input Image Data!!\")\n\n job_id = json.loads(resp_sub_job.text)[\"job_id\"]\n # print('Status code:', resp_sub_job.status_code)\n # print('Response:', resp_sub_job.text)\n # The status code should be 202 and return a job_id.\n\n job_fetcher_url = JOBS_URL + \"/\" + job_id\n # print(job_fetcher_url)\n pose_estimation = get_job_response(get_job_url=job_fetcher_url)\n # print(f\"pose_estimation Keys={pose_estimation.keys()}\")\n man_pose = pose_estimation[\"frames\"][0][\"persons\"][0]\n\n # There are 25 joints and the joints json hold 50 values (25 pairs of coordinates).\n # Joint 0 coordinates, (x,y) = (man_pose['pose2d']['joints'][0],man_pose['pose2d']['joints'][1])\n # Joint n coordinates, (x,y) = (man_pose['pose2d']['joints'][2n],man_pose['pose2d']['joints'][2n+1])\n man_pose_joints = man_pose[\"pose2d\"][\"joints\"]\n # print(len(man_pose_joints))\n\n left_wrist = get_joint_coordinates(man_pose_joints, 15)\n # print(f\"left_wrist={left_wrist}\")\n\n right_wrist = get_joint_coordinates(man_pose_joints, 10)\n # print(f\"right_wrist={right_wrist}\")\n\n left_shoulder = get_joint_coordinates(man_pose_joints, 13)\n # print(f\"left_shoulder={left_shoulder}\")\n\n right_shoulder = get_joint_coordinates(man_pose_joints, 12)\n # print(f\"right_shoulder={right_shoulder}\")\n\n wrist_distance = get_distance(right_wrist, left_shoulder)\n # print(f\"wrist_distance={wrist_distance}\")\n\n shoulder_distance = get_distance(right_shoulder, left_shoulder)\n # print(f\"shoulder_distance={shoulder_distance}\")\n\n length_resize_factor = wrist_distance / shoulder_distance\n # print(f\"length_resize_factor={length_resize_factor}\")\n\n left_wrist_increase_factor = (\n (left_wrist[0] - left_shoulder[0]) / (left_shoulder[0] - right_shoulder[0])\n ) + 1\n # print(f\"left_wrist_increase_factor={left_wrist_increase_factor}\")\n\n left_wrist_displacement = left_wrist[0] - left_shoulder[0]\n # print(f\"left_wrist_displacement={left_wrist_displacement}\")\n\n right_wrist_increase_factor = (\n (right_shoulder[0] - right_wrist[0]) / (left_shoulder[0] - right_shoulder[0])\n ) + 1\n # print(f\"right_wrist_increase_factor={right_wrist_increase_factor}\")\n\n right_wrist_displacement = right_shoulder[0] - right_wrist[0]\n # print(f\"right_wrist_displacement={right_wrist_displacement}\")\n\n rotation_angle_radians = math.atan2(\n (left_wrist[1] - right_wrist[1]), (left_wrist[0] - right_wrist[0])\n )\n # print(f\"rotation_angle_radians={rotation_angle_radians}\")\n\n rotation_angle_degrees = math.degrees(rotation_angle_radians)\n # print(f\"rotation_angle_degrees={rotation_angle_degrees}\")\n\n # plot_lines(right_wrist_co=right_wrist, left_wrist_co=left_wrist, right_shoulder_co=right_shoulder,\n # left_shoulder_co=left_shoulder)\n\n return (\n wrist_distance,\n shoulder_distance,\n left_wrist_displacement,\n right_wrist_displacement,\n rotation_angle_degrees,\n )\n\n\nif __name__ == \"__main__\":\n IMAGE_NAME = \"WhatsApp Image 2020-01-19 at 1.56.26 AM (4).jpeg\"\n IMAGE_FILE_PATH = f\"{INPUT_IMAGE_FOLDER}/{IMAGE_NAME}\"\n (\n wrist_dist,\n shoulder_dist,\n left_wrist_displ,\n right_wrist_displ,\n rotation_degrees,\n ) = get_wrnch_data(local_image_path=IMAGE_FILE_PATH)\n print(right_wrist_displ)\n print(left_wrist_displ)\n","sub_path":"job_submit.py","file_name":"job_submit.py","file_ext":"py","file_size_in_byte":6530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"301477748","text":"import matplotlib.pyplot as plt\nimport random\nimport math\nimport timeit\n\ndef impact_craters():\n x = [] # initialize x list\n y = [] # initialize y list\n time = [] # keep track of time the crater was made\n hold = 0.90 # baseline impact ratio so while will run\n last = 1 # number of impacts on last run\n area = 25 * 25 # area of data points on graph\n plt.title('Impact Craters')\n plt.ylabel('Kilometers')\n plt.xlabel('Kilometers')\n plt.axis([0, 500, 0, 500]) # sets graph to be 500km x 500km\n dub = timeit.default_timer() # baseline time to check for double runtime\n while(hold < 0.95): # while the ratio of impacts from the last ratio check is less than 95%\n clear = [] # empty list to use for crater removal due to obliteration\n inX = random.randint(0,500) # random X coordinate between 0 and 500\n inY = random.randint(0,500) # random Y coordinate between 0 and 500\n x.append(inX) # adds new value to end of x list\n y.append(inY) # adds new value to end of y list\n time.append(timeit.default_timer()) # adds time crater was made to time list\n for i in range(len(x) - 1): # iterated through x, excludes last x which is the new crater\n if x[i] > (inX-30) and x[i] < (inX+30): # if the x value is within range to be obliterated\n check = math.sqrt((inX - x[i])**2 + (inY - y[i])**2) # use pythag to determine distance from center of craters\n if check <= 30: # if crater is closer than 30km or half the diameter of obliteration\n clear.append(i) # add location to clear list\n for i in range(len(clear),0,-1): # clear list runs backwards to not change locations as they are deleted from each list\n x.pop(i) # deletes i crater from x list\n y.pop(i) # deletes i crater from y list\n time.pop(i) # deletes i crater from time list\n if (timeit.default_timer() / dub) >= 2: # if current time is double start time\n hold = last / len(x) # calc ratio for amount of craters\n last = len(x) # update last for next calc\n dub = timeit.default_timer() # update for next time comparison\n final = timeit.default_timer() # will execute after while loop breaks, this is actual end of simulation\n print (\"Number of Craters is:\") \n print (len(x))\n for i in range(len(x)):\n print('Crater', i + 1 , ': x -' , x[i] , ' y -', y[i], '@ -', time[i])\n plt.scatter(x , y , s = area, c = 'b', alpha = 1)\n plt.show()\n return (final)\n \n \ndef main():\n print(impact_craters())\n return (0)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"impact_craters.py","file_name":"impact_craters.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"319481006","text":"lista = [ ]\n\nfor i in range(20): \n num = int(input('Insira um número inteiro positivo ou negativo: '))\n posicao = 0\n lista.append(num)\n\nfor y in lista[::-1]:\n posicao += 1\n print(f'N[{posicao}] = {y}')\n ","sub_path":"Semanas 8 e 9/uri_1175_troca_vetor_1.py","file_name":"uri_1175_troca_vetor_1.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"91862860","text":"# Copyright (c) 2016 Uber Technologies, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals\n)\n\nfrom doubles import allow\nimport pytest\nfrom tchannel import TChannel\nfrom tchannel.errors import (\n BadRequestError as TChBadRequestError,\n UnexpectedError as TChUnexpectedError,\n)\nfrom tchannel.sync import TChannel as TChannelSync\n\nfrom yarpc.errors import ProcedureFailedError\nfrom yarpc.transport import Request\nfrom yarpc.transport.tchannel import TChannelInbound\n\n\ndef test_using_tchannel_sync_raises_error():\n sync = TChannelSync('holler')\n with pytest.raises(ValueError):\n TChannelInbound(sync)\n\n\ndef test_inbound_will_call_listen_on_tchannel():\n tchannel = TChannel('yaboi')\n assert not tchannel.is_listening()\n\n inbound = TChannelInbound(tchannel)\n inbound.start(None)\n assert tchannel.is_listening()\n\n\n@pytest.mark.gen_test\n@pytest.mark.parametrize('req,msg', [\n (\n Request(\n service='service',\n encoding='json',\n procedure='procedure',\n ttl=10000,\n ),\n 'BadRequest: missing caller name\\n',\n ),\n (\n Request(\n caller='caller',\n encoding='json',\n procedure='procedure',\n ttl=10000,\n ),\n 'BadRequest: missing service name\\n',\n ),\n (\n Request(\n caller='caller',\n encoding='json',\n service='service',\n ttl=10000,\n ),\n 'BadRequest: missing procedure\\n',\n ),\n (\n Request(),\n 'BadRequest: missing service name, procedure, caller name, TTL, '\n 'and encoding\\n',\n ),\n (\n Request(\n caller='caller',\n service='service',\n encoding='json',\n procedure='procedure',\n ttl=-10000,\n ),\n 'BadRequest: invalid TTL \"-10000\" for procedure \"procedure\" '\n 'of service \"service\": must be positive integer\\n',\n ),\n\n])\ndef test_bad_request_error(req, msg):\n # we need to mock yarpc.transport.tchannel._to_request to produce\n # faulty yarpc.transport.Request objects because its not possible to\n # send bad requests with the tchannel client library.\n from yarpc.transport.tchannel import inbound as tch_inbound_module\n allow(tch_inbound_module)._to_request.and_return(req)\n\n inbound = TChannelInbound(TChannel('%s-server' % __name__))\n inbound.start(None)\n\n client = TChannel('%s-client' % __name__)\n\n with pytest.raises(TChBadRequestError) as e:\n yield client.raw(\n service='service',\n endpoint='procedure',\n hostport=inbound.hostport,\n timeout=10, # seconds\n )\n\n e = e.value\n assert str(e) == msg\n\n\n@pytest.mark.gen_test\n@pytest.mark.parametrize('error,msg', [\n (\n Exception('great sadness'),\n 'UnexpectedError: error for procedure \"procedure\" '\n 'of service \"service\": great sadness\\n'\n ),\n (\n ProcedureFailedError(\n exception=AttributeError('great sadness does not exist'),\n service='back',\n procedure='holler',\n ),\n 'UnexpectedError: error for procedure \"holler\" '\n 'of service \"back\": great sadness does not exist\\n'\n ),\n\n])\ndef test_unexpected_error(error, msg):\n class Handler(object):\n def handle(self, request):\n raise error\n\n inbound = TChannelInbound(TChannel('%s-server' % __name__))\n inbound.start(Handler())\n\n client = TChannel('%s-client' % __name__)\n\n with pytest.raises(TChUnexpectedError) as e:\n yield client.raw(\n service='service',\n endpoint='procedure',\n hostport=inbound.hostport,\n timeout=10, # seconds\n )\n\n e = e.value\n assert str(e) == msg\n","sub_path":"tests/yarpc/transport/tchannel/test_inbound.py","file_name":"test_inbound.py","file_ext":"py","file_size_in_byte":4884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"394903723","text":"# -*- coding:utf-8 -*-\n# Date: 20 Apr 2021\n# Author:Pingyi Hu a1805597\n# Description:the service of label\n\nimport sqlite3\nimport json\nimport datetime\nimport os\n\nimport sys\nsys.path.append('..')\nfrom model.LabelDB import LabelDB\ndatabase = LabelDB(\"../EZlabel.db\")\n\n# choose image\ndef chooose_image(json_image):\n image_info = json.loads(json_image)\n image_id = image_info(\"image_id\")\n project_id = image_info(\"project_id\")\n re = {\n 'code' : 0,\n 'message' : 'choose image'\n }\n return json.dumps(re)\n\n# delete image from dataset\ndef delete_image(image_url):\n database.delete_image_byurl(image_url)\n\n# delete the image from image folder\ndef delete_image_alias(alias):\n path = './image/' + alias\n print(path)\n os.remove(path)\n re = {\n 'code': 0,\n 'message': 'successfully delete',\n }\n return json.dumps(re)\n\n\n\n########################### TEST with JSON #################################################\n\n# imagetest = {\n# \"filename\":\"Tom\",\n# \"url\":\"123456\",\n# }\n\n# imagejson = json.dumps(imagetest)\n# # insert_image(imagejson, \"3\", \"2\")\n# re = get_all(\"3\", \"2\")\n# print(re)","sub_path":"EZlabelTool/back-end/controller/Label.py","file_name":"Label.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"50690888","text":"import sys\nfrom PyQt5 import uic\nfrom PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QTableWidgetItem\nimport sqlite3\nfrom PyQt5.QtGui import *\nfrom random import randint\nimport random\n\n\nclass MyWidget(QMainWindow):\n def __init__(self):\n super().__init__()\n uic.loadUi('ggwp.ui', self)\n self.con = sqlite3.connect(\"GGWP.db\")\n self.pushButton.clicked.connect(self.update_result)\n self.pushButton_2.clicked.connect(self.otv)\n self.pushButton_3.clicked.connect(self.Ref)\n self.modified = {}\n self.a = 0\n self.titles = None\n self.score = 0\n\n def update_result(self):\n self.pushButton_2.setEnabled(True)\n self.pushButton_3.setEnabled(True)\n self.pushButton.setEnabled(False)\n\n cur = self.con.cursor()\n if self.a == 0:\n n1 = cur.execute(\"SELECT COUNT(1) FROM Task\").fetchall()\n n = list(n1[0])[0]\n self.sp = list(range(1, n + 1))\n random.shuffle(self.sp)\n if self.a > len(self.sp) - 1:\n self.label_4.setText('Игра окончена')\n self.pushButton_2.setEnabled(False)\n self.pushButton_3.setEnabled(False)\n self.pushButton.setEnabled(False)\n else:\n\n result = cur.execute(\"Select * from Task WHERE id=?\",\n str(self.sp[self.a])).fetchall()\n self.a += 1\n print(self.sp)\n\n self.m = list(result[0])\n print(self.m)\n self.label_2.setText(self.m[3])\n self.tableWidget.setRowCount(len(result))\n\n self.tableWidget.setColumnCount(len(result[0]))\n self.label_3.setPixmap(QPixmap(self.m[1]))\n self.titles = [description[0] for description in cur.description]\n for i, elem in enumerate(result):\n for j, val in enumerate(elem):\n self.tableWidget.setItem(i, j, QTableWidgetItem(str(val)))\n self.modified = {}\n\n def otv(self):\n self.pushButton_2.setEnabled(False)\n a = self.lineEdit.text()\n f = open('баллы.txt', 'w')\n if a.lower() == self.m[2]:\n self.label_4.setText('Правильно!')\n self.score += 50\n self.lbScore.setText(str(self.score))\n\n else:\n self.label_4.setText('Неравильно!')\n self.score -= 50\n self.lbScore.setText(str(self.score))\n f.write('Итог - ' + ' ' + str(self.score))\n f.close()\n\n def Ref(self):\n self.update_result()\n\n\napp = QApplication(sys.argv)\nex = MyWidget()\nex.show()\nsys.exit(app.exec_())\n","sub_path":"Фото/ProjectX.py","file_name":"ProjectX.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"453877209","text":"\"\"\" Utility functions related to directories. \"\"\"\nimport os\n\n\ndef maybe_make_directory(path):\n \"\"\" Makes a directory if doesn't exist.\n\n :param path: Path to directory to maybe make.\n :return: T/F if the directory already existed.\n \"\"\"\n if os.path.isdir(path):\n return True\n os.makedirs(path)\n return False\n","sub_path":"science_utils/directory.py","file_name":"directory.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"599664373","text":"from django.contrib import admin\nfrom django.contrib.flatpages.admin import FlatpageForm, FlatPageAdmin\nfrom django.contrib.flatpages.models import FlatPage\n\nfrom modeltranslation.admin import TranslationAdmin\nfrom sorl.thumbnail.admin import AdminImageMixin\nfrom mce_filebrowser.admin import MCEFilebrowserAdmin\n\nfrom main.models import Slider, News, Category, Product, ProductImages, CustomFlatPage, Expertise\n\nclass ExpertiseAdmin(AdminImageMixin, TranslationAdmin, MCEFilebrowserAdmin):\n\tclass Media:\n\t\t\tjs = (\n\t\t\t\t'/static/modeltranslation/js/force_jquery.js',\n\t\t\t\t'http://ajax.googleapis.com/ajax/libs/jqueryui/1.8.2/jquery-ui.min.js',\n\t\t\t\t'/static/modeltranslation/js/tabbed_translation_fields.js',\n\t\t\t)\n\t\t\tcss = {\n\t\t\t\t'screen': ('/static/modeltranslation/css/tabbed_translation_fields.css',),\n\t\t\t}\n\tfieldsets = [\n\t\t(None, {'fields': ['title','slug', 'content', 'img']}),\n\t]\n\tprepopulated_fields = {'slug': ('title',)}\n\tlist_display = ('title', 'slug')\n\nclass CustomFlatPageForm(FlatpageForm):\n\tclass Meta:\n\t\tmodel = CustomFlatPage\n\nclass CustomFlatPageAdmin(FlatPageAdmin, AdminImageMixin, TranslationAdmin, MCEFilebrowserAdmin):\n\tclass Media:\n\t\t\tjs = (\n\t\t\t\t'/static/modeltranslation/js/force_jquery.js',\n\t\t\t\t'http://ajax.googleapis.com/ajax/libs/jqueryui/1.8.2/jquery-ui.min.js',\n\t\t\t\t'/static/modeltranslation/js/tabbed_translation_fields.js',\n\t\t\t)\n\t\t\tcss = {\n\t\t\t\t'screen': ('/static/modeltranslation/css/tabbed_translation_fields.css',),\n\t\t\t}\n\tform = CustomFlatPageForm\n\tfieldsets = [\n\t\t(None, {'fields': ['title','url', 'custom_content', 'img', 'sites', 'template_name']}),\n\t]\n\tprepopulated_fields = {'url': ('title',)}\n\tlist_display = ('title', 'url')\n\nclass SliderAdmin(AdminImageMixin, TranslationAdmin, MCEFilebrowserAdmin):\n\tclass Media:\n\t\t\tjs = (\n\t\t\t\t'/static/modeltranslation/js/force_jquery.js',\n\t\t\t\t'http://ajax.googleapis.com/ajax/libs/jqueryui/1.8.2/jquery-ui.min.js',\n\t\t\t\t'/static/modeltranslation/js/tabbed_translation_fields.js',\n\t\t\t)\n\t\t\tcss = {\n\t\t\t\t'screen': ('/static/modeltranslation/css/tabbed_translation_fields.css',),\n\t\t\t}\n\tfieldsets = [\n\t\t(None, {'fields': ['title','link', 'content', 'image']}),\n\t]\n\nclass NewsAdmin(AdminImageMixin, TranslationAdmin, MCEFilebrowserAdmin):\n\tclass Media:\n\t\t\tjs = (\n\t\t\t\t'/static/modeltranslation/js/force_jquery.js',\n\t\t\t\t'http://ajax.googleapis.com/ajax/libs/jqueryui/1.8.2/jquery-ui.min.js',\n\t\t\t\t'/static/modeltranslation/js/tabbed_translation_fields.js',\n\t\t\t)\n\t\t\tcss = {\n\t\t\t\t'screen': ('/static/modeltranslation/css/tabbed_translation_fields.css',),\n\t\t\t}\n\tfieldsets = [\n\t\t(None, {'fields': ['title', 'content', 'img', 'slug']}),\n\t]\n\tprepopulated_fields = {'slug': ('title',)}\n\nclass CategoryAdmin(AdminImageMixin, TranslationAdmin, MCEFilebrowserAdmin):\n\tclass Media:\n\t\t\tjs = (\n\t\t\t\t'/static/modeltranslation/js/force_jquery.js',\n\t\t\t\t'http://ajax.googleapis.com/ajax/libs/jqueryui/1.8.2/jquery-ui.min.js',\n\t\t\t\t'/static/modeltranslation/js/tabbed_translation_fields.js',\n\t\t\t)\n\t\t\tcss = {\n\t\t\t\t'screen': ('/static/modeltranslation/css/tabbed_translation_fields.css',),\n\t\t\t}\n\tfieldsets = [\n\t\t(None, {'fields': ['title','img', 'description', 'slug']}),\n\t]\n\nclass ImagesInline(AdminImageMixin, admin.TabularInline):\n\t\"\"\"\n\tGallery Image inline\n\t\"\"\"\n\tfieldsets = (\n\t\t(\n\t\t\tNone, \n\t\t\t{\n\t\t\t\t'fields': ('title', 'img',)\n\t\t\t}\n\t\t),\n\t)\n\n\tmodel = ProductImages\n\textra = 0\n\n\n\nclass ProductAdmin(AdminImageMixin, TranslationAdmin, MCEFilebrowserAdmin):\n\tclass Media:\n\t\t\tjs = (\n\t\t\t\t'/static/modeltranslation/js/force_jquery.js',\n\t\t\t\t'http://ajax.googleapis.com/ajax/libs/jqueryui/1.8.2/jquery-ui.min.js',\n\t\t\t\t'/static/modeltranslation/js/tabbed_translation_fields.js',\n\t\t\t)\n\t\t\tcss = {\n\t\t\t\t'screen': ('/static/modeltranslation/css/tabbed_translation_fields.css',),\n\t\t\t}\n\tfieldsets = [\n\t\t(None, {'fields': ['title','img', 'description', 'category', 'slug']}),\n\t]\n\tprepopulated_fields = {'slug': ('title',)}\n\tinlines = (ImagesInline, )\n\tlist_display = ('title', 'category')\n\n\n\nclass ProductImagesAdmin(AdminImageMixin, TranslationAdmin, MCEFilebrowserAdmin):\n\tclass Media:\n\t\t\tjs = (\n\t\t\t\t'/static/modeltranslation/js/force_jquery.js',\n\t\t\t\t'http://ajax.googleapis.com/ajax/libs/jqueryui/1.8.2/jquery-ui.min.js',\n\t\t\t\t'/static/modeltranslation/js/tabbed_translation_fields.js',\n\t\t\t)\n\t\t\tcss = {\n\t\t\t\t'screen': ('/static/modeltranslation/css/tabbed_translation_fields.css',),\n\t\t\t}\n\tfieldsets = [\n\t\t(None, {'fields': ['title','img', 'product']}),\n\t]\n\tlist_display = ('title', 'product')\n\n\n\nadmin.site.unregister(FlatPage)\nadmin.site.register(CustomFlatPage, CustomFlatPageAdmin)\n\nadmin.site.register(Expertise, ExpertiseAdmin)\nadmin.site.register(ProductImages, ProductImagesAdmin)\nadmin.site.register(Product, ProductAdmin)\nadmin.site.register(Slider, SliderAdmin)\nadmin.site.register(News, NewsAdmin)\nadmin.site.register(Category, CategoryAdmin)","sub_path":"main/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"503764120","text":"import pytest\nfrom greent.ontologies.hpo import HPO\nfrom greent.servicecontext import ServiceContext\n\n@pytest.fixture(scope='module')\ndef hpo():\n hpo = HPO(ServiceContext.create_context())\n return hpo\n\ndef test_lookup(hpo):\n terms1=hpo.search('Arrhythmias, Cardiac')\n terms2=hpo.search('CARDIAC ARRHYTHMIAS')\n assert len(terms1) == len(terms2) == 1\n assert terms1[0] == terms2[0] == 'HP:0011675'\n\n","sub_path":"greent/test/test_hpo.py","file_name":"test_hpo.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"589157147","text":"'''\r\nDeveloper: Alper Kaan\r\nDate: 14.02.2021\r\nPurpose of Software: Reinforcement of learned Python Code and Self-improvement\r\n'''\r\n'''\r\nWrite a function that controls the given inputs whether they are equal to their reversed order or not.\r\n\r\nExample:\r\n\r\nInput >>> madam, tacocat, utrecht\r\n\r\nOutput >>> True, True, False\r\n'''\r\ndef equal_reverse():\r\n liste = input(\"listeyi aralarına virgül koyarak yazın: \\n\").split(\",\")\r\n print(liste)\r\n \r\n for i in liste:\r\n word = i.strip()\r\n if i != liste[-1]:\r\n if word == word[::-1]:\r\n print(True,end=\",\")\r\n\r\n elif word != word[::-1]:\r\n print(False,end=\",\")\r\n else:\r\n if word == word[::-1]:\r\n print(True)\r\n\r\n elif word != word[::-1]:\r\n print(False)\r\n\r\nequal_reverse()\r\n\r\n","sub_path":"alper_kaan/equal_reverse.py","file_name":"equal_reverse.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"580536346","text":"# coding=utf-8\nimport tornado.web\nimport peewee\nfrom setting import db\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n def raise_404(self, msg=None):\n raise tornado.web.HTTPError(400, msg or 'Invalid request')\n\n\nclass BaseModel(peewee.Model):\n class Meta:\n database = db\n\n @classmethod\n def getOne(cls, *query, **kwargs):\n # 为了方便使用,新增此接口,查询不到返回None,而不抛出异常\n try:\n return cls.get(*query, **kwargs)\n except peewee.DoesNotExist:\n return None\n\n\ndef format_return(data, error):\n e = 'success'\n if error:\n d = {\n 'returnCode': error['returnCode'],\n 'error': error['error'],\n 'data': data,\n }\n else:\n d = {\n 'returnCode': 1,\n 'error': e,\n 'data': data,\n }\n return d\n\n\ndef model2json(obj, un_json=None, filter=None):\n if not un_json: un_json = []\n if not filter: filter = []\n if not obj:\n return {}\n list = obj._meta.fields\n data = {}\n for op in list:\n value = getattr(obj, op.attname)\n if op.attname in filter:\n continue\n # if op.__class__.__name__ == 'ForeignKey':\n # if str(op.related.parent_model) == \"\":\n # if value:\n # company = Company.objects.get(id=int(value))\n # data[op.attname[:-3]] = {\n # 'id':company.id,\n # 'name': company.name,\n # 'share_count': company.share_count,\n # 'vitality': company.vitality,\n # 'staff_count': company.staff_count\n # }\n # else:\n # data[op.attname] = value is not None and value or 0\n elif op.__class__.__name__ in ('DateField'):\n data[op.attname] = value is not None and str(value) or ''\n elif op.__class__.__name__ in ('FileField', 'ImageField'):\n data[op.attname] = '/%s' % value if value else ''\n elif op.__class__.__name__ in ('DateTimeField'):\n data[op.attname] = (value and value.strftime('%Y-%m-%d %H:%M:%S') or '')\n elif op.__class__.__name__ in ('BooleanField'):\n data[op.attname] = value and 1 or 0\n elif op.__class__.__name__ in ('DecimalField'):\n data[op.attname] = value and float(value) or 0.0\n elif op.__class__.__name__ in ('FloatField'):\n data[op.attname] = '%.1f' % value if value is not None else '-1.0'\n elif op.__class__.__name__ in ('IntegerField'):\n data[op.attname] = value if value else 0\n else:\n if value is not None:\n data[op.attname] = value\n else:\n data[op.attname] = ''\n return data\n\n\ndef login_required(function=None, login_url=None):\n def call_(*args, **kwargs):\n if function:\n if not args[0].get_secure_cookie('username'):\n args[0].redirect('/login')\n else:\n return function(*args, **kwargs)\n\n return call_","sub_path":"app/home/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"581489848","text":"import os, re\nimport util\n\n\n# Aquesta funció calcula el top de paraules i la informació auxiliar necessària per als propers passos. Li entra el directori del dataset i el nombre de paraules\n# més comunes a retornar. Utilitzarem tres diccionaris: globalResults, on emmagatzemarem el recompte del nombre d'aparicions de totes les paraules\n# de tots els textos, individualResults, diccionari que conté, per cada text, un nou diccionari amb les paraules que apareixen i la seva freqüència i,\n# finalment, gender, que conté per cada text (key) el sexe que li correspon (value).\ndef calculateTopWordsTextFrequencies(path, N):\n\tglobalResults = {}\n\tindividualResults = {}\n\tgender = {}\n\n\t# Iterem per cada fitxer que es trobi en el nostre directori de datasets\n\tfor filename in os.listdir(path):\n\n\t\t# Fem cast a integer del nombre que identifica el text, obtenint aquesta informació del nom de l'arxiu\n\t\ttry:\n\t\t\tfileNumber = int(filename.split(\"_\")[0])\n\t\texcept Exception as e:\n\t\t\tprint(\"[ERROR] File name format unexpected for\", \"'{}'\".format(filename), \"- skipped\")\n\t\t\tcontinue\n\n\t\twith open(path+\"/\"+filename) as f:\n\t\t\twordDict = {}\n\t\t\ttext = []\n\t\t\t# Guardem a gender, per al text sobre el que iterem, el sexe de l'escriptor, dada obtinguda del nom del fitxer\n\t\t\tgender[fileNumber] = filename.split(\"_\")[1]\n\n\t\t\t# Llegim línia per línia el text, eliminant caràcters no desitjats, com signes de puntuació i exclamació, obtenint la paraula pura\n\t\t\tfor line in f:\n\t\t\t\tline = line.replace(\"\\r\\n\", \"\").lower()\n\t\t\t\tline = re.sub(\"[^A-Za-z0-9\\s']+\", '', line)\n\t\t\t\tvalues = line.split()\n\t\t\t\ttext.extend(values)\n\n\t\t\t# Incrementem una unitat, en els diccionaris corresponents, el nombre d'aparicions de la paraula que tractem,\n\t\t\t# o bé inicialitzem el nombre d'aparicions d'aquesta a 1 si encara no estava registrada\n\t\t\tfor value in text:\n\t\t\t\tif value in globalResults:\n\t\t\t\t\tglobalResults[value] += 1\n\t\t\t\telse:\n\t\t\t\t\tglobalResults[value] = 1\n\n\t\t\t\tif value in wordDict:\n\t\t\t\t\twordDict[value] += 1\n\t\t\t\telse:\n\t\t\t\t\twordDict[value] = 1\n\n\t\t# Creem una nova entrada en el diccionari que correspon al text que analitzem en cada iteració, que serà un nou diccionari\n\t\t# on guardarem la parella paraula (key) i el percentatge d'aparició en el text (value) per a totes les paraules que formen el text\n\t\tindividualResults[fileNumber] = {}\n\t\tfor member in wordDict.items():\n\t\t\tpercentage = util.calculatePercentage(member[1], len(text))\n\t\t\tindividualResults[fileNumber][member[0]] = percentage\n\n\t# Un cop iterats tots els fitxers i obtingut els resultats finals de les freqüències de les paraules a nivell global,\n\t# ordenem el diccionari pel nombre d'aparicions de les paraules de manera decreixent\n\tsortedWords = sorted([(value,key) for (key,value) in globalResults.items()], reverse=True)\n\n\t# Retornem les N paraules més comunes, els resultats individuals de cada text i el diccionari que conté per cada text el sexe corresponent\n\treturn sortedWords[:N], individualResults, gender\n\n\n# El mètode generateARFF genera el fitxer output.arff amb el format adequat per a executar-ho a weka. \n# Rep com a paràmetres topWords, textResults i gender. topWords és una llista de tuples amb el següent format: [(x,y),...]\n# on \"x\" és un nombre i \"y\" és la paraula. textResults conté un diccionari de diccionaris,\n# el primer diccionari conté els textos, i el segon conté les paraules com a key, i els percentatges com a values.\ndef generateARFF(topWords, textResults, gender):\n\tarffText = \t\"% 1. Title: Gender identification\\n\" \\\n\t\t\t\t\"%\\n\" \\\n\t\t\t\t\"% 2. Sources:\\n\"\t\\\n\t\t\t\t\"% (a) Creator: Bernabé Gonzalez García, Aitor Cubeles Torres\\n\" \\\n\t\t\t\t\"% (b) Date: March, 2018\\n\" \\\n\t\t\t\t\"@RELATION gender\\n\\n\"\n\n\tATTRIBUTE = \"@ATTRIBUTE\"\n\tATTR_NUMERIC = \"NUMERIC\"\n\tDATA = \"@DATA\"\n\tnew_content = [[] for k in range(len(textResults))]\n\n\t# Recorrem totes les tuples de topWords per obtenir la paraula i formar la línia\n\t# concatenant els paràmetres necessaris per al format adequat de weka.\n\tfor value_top in topWords:\n\t\tarffText += ATTRIBUTE + ' \"' + value_top[1] + '\" ' + ATTR_NUMERIC + \"\\n\"\n\t\t# Amb new_content preparem les dades que necessitem per a l'apartat @DATA.\n\t\t# Afegim per a cada text les paraules i les freqüències que formen part del top N.\n\t\tfor keyvalue in textResults.items():\n\t\t\tnewValue = keyvalue[1].get(value_top[1], 0.0)\n\t\t\tnew_content[keyvalue[0]-1].extend([newValue])\n\n\tarffText += ATTRIBUTE + \" gender {male, female}\\n\\n\" + DATA + \"\\n\"\n\n\ti = 0\n\tfor value_new_content in new_content:\n\t\t# Afegim tota la informació que hem recolectat a new_content, però amb el format\n\t\t# adequat per a weka.\n\t\tarffText += (\", \").join([str(x) for x in value_new_content]) + \", \" + gender.get(i+1, str(None)) + \"\\n\"\n\t\ti += 1\n\n\twith open('output.arff', 'w', encoding='UTF-8') as file:\n\t\tfile.write(arffText)\n","sub_path":"P2/featureExtraction.py","file_name":"featureExtraction.py","file_ext":"py","file_size_in_byte":4809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"135570762","text":"\n\noskar = lambda x: x if x <34 else 0\n\nprint(oskar(1500))\n\nn = [2,3,3]\nw = (2,3,3)\n\nprint(iter(n))\n\ndef square(*rozbierajsie):\n for arg in rozbierajsie:\n for arg2 in arg:\n print(arg2)\n return sum(*rozbierajsie)\n\nprint(square((1,2,3,4)))\n\n\n# wygeneruj liczby parzyste od 2 do 10\nlista = [2, 11, 2]\n\nprint(range(*lista))\n\ndef rodzina(*args, **kwargs):\n for arg in args:\n print(\"naziwska to:\" + arg)\n for args, item in kwargs.items():\n print(args ,item)\n\n\nrodzina(\"kowalscy\", a=2, b=2 )\n\nitems = [{'product': {'id': 3, 'name': 'Karton szary 200x30x20 cm', 'price': '43.00', 'imageURL': '/images/Screenshot_2021-04-24_Rower_Magnetyczny_Stacjonarny_z_pulsem_fitness.png'}, 'quantity': 4, 'get_total': '172.00'}]\n\nfor item in items:\n print(item.product.name)\n\n","sub_path":"algorithms/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"231210196","text":"from flask import Flask, request, Response, jsonify\nfrom json import dumps\nfrom flask_restful import Api\nfrom Blockchain import BlockChain\nimport schedule\nfrom apscheduler.schedulers.background import BackgroundScheduler\nimport json\nfrom uuid import uuid4\n\napp = Flask(__name__)\napi = Api(app)\nblockchain = BlockChain()\nnode_identifier = str(uuid4()).replace('-', '')\n\n@app.route('/import', methods=['POST'])\ndef insertTransaction():\n try:\n data = request.json.get('data')\n for i in data:\n sender = i.get('sender')\n recipient = i.get('recipient')\n amount = i.get('amount')\n blockchain.new_transaction(sender, recipient, amount)\n return json.dumps(data) + ' added to Block with index ' + str(len(blockchain.chain) + 1), 200\n except:\n return 400\n\n@app.route('/getBlock/', methods=['GET'])\ndef getBlock(index):\n if int(index) > len(blockchain.chain):\n return 'Index out of range', 404\n elif int(index) <= 0:\n return 'Wrong index', 404\n else:\n return json.dumps(blockchain.chain[int(index) - 1].block_info()), 200\n\n@app.route('/chain', methods=['GET'])\ndef full_chain():\n response = []\n length = len(blockchain.chain)\n data = [i.block_info() for i in blockchain.chain]\n response.append({'length':length, 'data': data})\n return json.dumps(response), 200\n\n@app.route('/mine', methods=['GET'])\ndef mine():\n # We run the proof of work algorithm to get the next proof...\n last_block = blockchain.chain[-1]\n last_proof = last_block.block_info().get('proof')\n proof = blockchain.proof_of_work(last_proof)\n\n # We must receive a reward for finding the proof.\n # The sender is \"0\" to signify that this node has mined a new coin.\n blockchain.new_transaction(\n sender=\"0\",\n recipient=node_identifier,\n amount=1,\n )\n\n # Forge the new Block by adding it to the chain\n previous_hash = blockchain.chain[-1].compute_hash\n block = blockchain.new_block(proof, previous_hash)\n\n response = {\n 'message': \"New Block Forged\",\n 'index': block.block_info().get('index'),\n 'data': block.block_info().get('data'),\n 'proof': block.block_info().get('proof'),\n 'prevHash': block.block_info().get('prevHash'),\n }\n return jsonify(response), 200\n\n@app.route('/node/register', methods=['POST'])\ndef register():\n nodes = request.json.get('node')\n \n if nodes is None:\n return \"Error: Please supply a valid list of nodes\", 400\n\n for node in nodes:\n blockchain.register_node(node)\n\n response = {\n 'message': 'New nodes have been added',\n 'total_nodes': list(blockchain.nodes),\n }\n return jsonify(response), 201\n\ndef run_tasks():\n scheduler = BackgroundScheduler()\n scheduler.add_job(func=blockchain.new_block, trigger=\"interval\", seconds=3)\n scheduler.start()\n return 'Scheduled several long running tasks.', 200\n\nrun_tasks()\napp.run()","sub_path":"Blockchain-system/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"489153065","text":"from collections import defaultdict\n\nd1 = defaultdict(str)\nd1['greet1'] = 'hello'\nprint(d1['greet1']) # works as expected\nprint(d1['greet2']) # not valid, invokes the str() constructor as a result\n\n\nstring_val = 'she sells seashells by the seashore'\nd2 = defaultdict(int)\n\nfor ch in string_val:\n d2[ch] += 1 # initially d[ch] will be an error, default value from int() which is zero will be used\n\nfor k,v in d2.items():\n print('{k}:{v}'.format(k=k, v=v))\n","sub_path":"Optum Tech/IN1468 available until 12-31-20/IN1468_student_files/student_files/ch06_std_lib/02_defaultdict.py","file_name":"02_defaultdict.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"209107244","text":"import glfw\nfrom gl import gl\nimport sys\nfrom nodetree.vmath import *\nimport nodetree.node as node\nfrom nodetree.node import Node\nimport nodetree.sprite as spritenode\nimport nodetree.debugdraw as debugdraw\nimport nodetree.scenenode as scenenode\nimport nodetree.transformnode as transformnode\nfrom nodetree.nodebuilder import NodeBuilder\nimport nodetree.physics as physicsnode\nfrom nodetree.windownode import WindowNode\nfrom nodetree.controllernode import ControllerNode\nfrom ctypes import *\nimport math\nfrom nodetree.util import *\nfrom nodetree.inputmap import *\nfrom nodetree.fpscamera import *\n\n\nclass Controller(Node):\n def __init__(self, input_map, name=None, parent=None):\n super(Controller, self).__init__(name=name, parent=parent)\n self.input_map = input_map\n\n def on_mount(self):\n scene = self.get_ancestor('is_scene')\n if scene:\n scene.on('prestep', self.step)\n\n window = self.get_node('/window')\n if window:\n window.on('key', self.key_callback)\n window.on('cursor_pos', self.mouse_cursor_callback)\n\n def on_unmount(self):\n scene = self.get_ancestor('is_scene')\n if scene:\n scene.off('prestep', self.step)\n\n window = self.get_node('/window')\n if window:\n window.off('key', self.key_callback)\n window.off('mouse', self.mouse_cursor_callback)\n\n def key_callback(self, window, key, scancode, action, mode):\n self.input_map.key_callback(window, key, scancode, action, mode)\n\n def mouse_cursor_callback(self, window, xoffset, yoffset):\n self.input_map.mouse_callback(window, xoffset, yoffset)\n\n def on_step(self):\n pass\n\n\nclass TurretController(Controller):\n MODE_ABSOLUTE = 0\n MODE_RELATIVE = 1\n\n def __init__(self, name=None, parent=None):\n super(TurretController, self).__init__(InputMap(), name=name, parent=parent)\n self.mode = self.MODE_RELATIVE\n\n def step(self):\n body = self.get_node('body')\n\n if body is None:\n print('could not find body.', self)\n return\n\n body.reset_forces()\n turn = 0\n if input.left:\n turn += 1\n \n if input.right:\n turn -= 1\n\n torque = 0.0\n target_angular_velocity = turn * TARGET_ANGULAR_VELOCITY;\n\n # TODO: aiming modes.\n # TURRET_TORQUE = \n\n if self.mode == self.MODE_RELATIVE:\n # attempt to cancel angular velocity.\n inertia = body.angular_velocity * body.moment\n angular_velocity =body.angular_velocity\n torque = clamp((target_angular_velocity - angular_velocity) * body.moment * invTimeStep, -SHIP_TURN_TORQUE, SHIP_TURN_TORQUE)\n else:\n torque = turn * SHIP_TURN_TORQUE\n\n body.torque = torque\n\n\nclass ShipController(Controller):\n def __init__(self, name=None, parent=None):\n super(ShipController, self).__init__(InputMap(), name=name, parent=parent)\n self.sas_enabled = True\n self.inertial_damping_enabled = True\n\n def step(self):\n input = self.input_map.get_input()\n invTimeStep = 30.0\n\n body = self.get_node('body')\n\n if body is None:\n print('could not find body.', self)\n return\n\n SHIP_SCALE = 0.01\n SHIP_FORCE_SCALE = SHIP_SCALE * SHIP_SCALE;\n SHIP_TORQUE_SCALE = SHIP_FORCE_SCALE * 1.0\n \n SHIP_TURN_TORQUE = 12.0 * SHIP_SCALE * SHIP_SCALE\n SHIP_FORWARD_FORCE = 15.0 * SHIP_FORCE_SCALE\n SHIP_REVERSE_FORCE = 7.5 * SHIP_FORCE_SCALE\n SHIP_LATERAL_FORCE = 5.0 * SHIP_FORCE_SCALE\n SHIP_TARGET_ANGULAR_VELOCITY = math.pi\n \n body.reset_forces()\n\n turn = 0\n if input.left:\n turn += 1\n \n if input.right:\n turn -= 1\n\n torque = 0.0\n # print(turn)\n target_angular_velocity = turn * SHIP_TARGET_ANGULAR_VELOCITY;\n if self.sas_enabled:\n # attempt to cancel angular velocity.\n angular_velocity = body.angular_velocity\n inertia = angular_velocity * body.moment\n # print(angular_velocity, body.moment, target_angular_velocity, inertia)\n torque = clamp((target_angular_velocity - angular_velocity) * body.moment * invTimeStep, -SHIP_TURN_TORQUE, SHIP_TURN_TORQUE)\n else:\n torque = turn * SHIP_TURN_TORQUE\n\n print(torque)\n body.torque = torque\n \n # calculate and apply force\n centralInput = 0\n lateralInput = 0\n \n if input.forward:\n centralInput += 1\n \n if input.back:\n centralInput -= 1\n \n if input.strafe_left:\n lateralInput += 1\n \n if input.strafe_right:\n lateralInput -= 1\n\n forwardVector = body.local_to_world((1.0,0.0))\n leftVector = Vec2(-forwardVector.y, forwardVector.x)\n \n velocity = body.velocity\n mass = body.mass\n\n if self.inertial_damping_enabled and centralInput == 0:\n # figure out how much force we need to apply to kill our central velocity\n d = forwardVector.dot(-velocity)\n forwardForce = clamp(d * invTimeStep * mass, -SHIP_REVERSE_FORCE, SHIP_FORWARD_FORCE)\n centralForce = forwardForce * forwardVector\n else:\n f = -SHIP_REVERSE_FORCE if centralInput == -1 else (SHIP_FORWARD_FORCE if centralInput == 1 else 0.0)\n centralForce = forwardVector * f\n \n if self.inertial_damping_enabled and lateralInput == 0:\n # figure out how much force we need to apply to kill our central velocity\n d = leftVector.dot(-velocity)\n lateralForce = clamp(d * invTimeStep * mass, -SHIP_LATERAL_FORCE, SHIP_LATERAL_FORCE)\n lateralForce = leftVector * lateralForce\n else:\n lateralForce = leftVector * (float(lateralInput) * SHIP_LATERAL_FORCE)\n\n force = centralForce + lateralForce\n \n body.apply_force(force)\n\n\nclass GameControllerNode(ControllerNode):\n def key_callback(self, window, key, scancode, action, mode):\n if key == glfw.KEY_ESCAPE and action == glfw.PRESS:\n window.setShouldClose(True)\n\n def setup(self):\n super(GameControllerNode, self).setup()\n # set up\n window = WindowNode(name='window', parent=self)\n window.open()\n window.make_context_current()\n\n gl.init()\n err = gl.getError()\n if err:\n print(\"WINDOW OPEN ERROR:\", err)\n\n scene = physicsnode.SceneNode(name='scene', parent=window) # scenenode.SceneNode(name='scene')\n\n debugdraw_renderer = debugdraw.RendererNode(name='debugdraw_renderer', parent=scene)\n sprite_renderer = spritenode.RendererNode(name=\"sprite_renderer\", parent=scene)\n\n camera_transform = transformnode.TransformNode3D(name='camera_transform', parent=scene, m=Mat4.Translation(Vec3(0.0, 0.0, 3.0)))\n camera = debugdraw.CameraNode(name='camera', parent=camera_transform)\n camera.make_current()\n # camera_controller = CameraController(input_map=InputMap(), parent=camera_transform)\n\n # create_wheelchair_guy(scene)\n create_ship(scene)\n\n window.on('key', self.key_callback)\n\n gl.enable(gl.DEPTH_TEST)\n gl.enable(gl.BLEND)\n gl.blendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)\n\n def step(self):\n window = self.get_node('/window')\n scene = self.get_node('/window/scene')\n sprite_renderer = self.get_node('/window/scene/sprite_renderer')\n debugdraw_renderer = self.get_node('/window/scene/debugdraw_renderer')\n\n if window.should_close():\n sys.exit()\n\n # trigger the callbacks\n glfw.pollEvents()\n\n window.make_context_current()\n\n framebuffer_width, framebuffer_height = window.get_framebuffer_size()\n\n gl.viewport(0, 0, framebuffer_width, framebuffer_height)\n gl.clearColor(0.2, 0.3, 0.3, 1.0)\n gl.clear(gl.COLOR_BUFFER_BIT|gl.DEPTH_BUFFER_BIT)\n\n gl.enable(gl.BLEND)\n gl.enable(gl.DEPTH_TEST)\n # sprite_renderer.draw()\n\n gl.disable(gl.BLEND)\n gl.disable(gl.DEPTH_TEST)\n debugdraw_renderer.draw() # draw the debug draw scene on top of the other stuff.\n scene.step(1.0/30.0, iterations=10)\n\n #print(scene.get_node('transform').position, scene.get_node('transform').angle)\n\n window.swap_buffers()\n\n\ndef create_ship(scene):\n ship = ShipController(parent=scene, name='ship')\n\n body = physicsnode.BodyNode(\n name='body',\n mass=1.0,\n moment=1.0,\n parent=ship,\n angular_velocity=0.0)\n \n\n shape = physicsnode.CircleShapeNode(\n name='shape',\n parent=body,\n radius=0.25\n )\n\n shape_debug = debugdraw.ShapeNode.Circle(\n radius=0.25,\n parent=shape,\n nverts=5\n )\n\n print('created ship:', shape_debug)\n\n # turret = TurretController(parent=ship, name='turret')\n\n\ndef main():\n game_controller = GameControllerNode(name='root')\n game_controller.setup()\n while 1:\n game_controller.step()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"game2.py","file_name":"game2.py","file_ext":"py","file_size_in_byte":9280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"428656194","text":"\"\"\"A script that render requests to html file\"\"\"\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nimport datetime\nimport json\nimport requests\nimport random\nimport os\nfrom dotenv import load_dotenv\nfrom .forms import PostForm\nfrom django.http import HttpResponse\n\n\nload_dotenv()\n\ndef about(request):\n \"\"\"render landing page\"\"\"\n return render(request, 'weats_template/about.html')\n\ndef index(request):\n \"\"\"render main page\"\"\"\n return render(request, 'weats_template/main_page.html')\n\ndef register(request):\n \"\"\"render register page\"\"\"\n if request.method == \"POST\":\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n return HttpResponse(\"Register success\")\n else:\n form = PostForm()\n return render(request, 'weats_template/register.html', {'form': form})\n\ndef search(request):\n \"\"\"render restaurant database\"\"\"\n today = datetime.datetime.today()\n weekday = today.strftime('%A')\n month = today.strftime('%b %d')\n city = request.GET.get('q')\n city_list = [\"san francisco\", \"San Francisco\", \"Oakland\", \"oakland\", \"Berkeley\", \"berkeley\"]\n if city in city_list:\n url = 'http://api.openweathermap.org/data/2.5/weather'\n api_key = os.getenv('WEATHER_API')\n params = {'appid': api_key, 'q': city, 'units': 'imperial'}\n r = requests.get(url, params=params)\n temp = r.json().get('main').get('temp')\n wind = r.json().get('wind').get('speed')\n str = \"The weather today in {} is {}\".format(city, temp)\n restaurant_list = random.sample(analyze(temp, city), k=8)\n return render(request, 'weats_template/search.html', {'str': str, 'temp': temp, 'wind': wind, 'restaurant_list': restaurant_list, 'city': city, \"weekday\": weekday, \"month\": month})\n else:\n return render(request, 'weats_template/main_page.html')\n\ndef restaurant(term, city):\n \"\"\"retrieve restaurant database\"\"\"\n api_key = os.getenv('YELP_API')\n headers = {'Authorization': 'Bearer %s' % api_key}\n url='https://api.yelp.com/v3/businesses/search'\n params = {'term': term,'location': city, 'limit': 8}\n req=requests.get(url, params=params, headers=headers)\n return (req.json())\n\ndef analyze(temp, city):\n \"\"\"analyze weather and give the recommend retaurant\"\"\"\n restaurant_list = []\n if temp >= 80:\n food_list = ['salad', 'ice cream', 'fruit', 'smothie', 'boba tea', 'acai']\n for food in food_list:\n result = restaurant(food, city)\n for business in result.get('businesses'):\n detail_list = []\n detail_list.append(business.get('name'))\n detail_list.append(business.get('url'))\n detail_list.append(business.get('price'))\n detail_list.append(business.get('rating'))\n detail_list.append(business.get('image_url'))\n restaurant_list.append(detail_list)\n return restaurant_list\n elif temp > 60 and temp < 80:\n food_list = ['BBQ', 'sea food', 'steak', 'burger', 'banh mi', 'burrito',\n 'ice cream', 'fruit', 'smothie', 'boba tea', 'acai', 'spagetti', 'hot dogs']\n for food in food_list:\n result = restaurant(food, city)\n for business in result.get('businesses'):\n detail_list = []\n detail_list.append(business.get('name'))\n detail_list.append(business.get('url'))\n detail_list.append(business.get('price'))\n detail_list.append(business.get('rating'))\n detail_list.append(business.get('image_url'))\n restaurant_list.append(detail_list)\n return restaurant_list\n elif temp <= 60:\n food_list = ['soup', 'ramen', 'pho', 'stew', 'spagetti', 'grill']\n for food in food_list:\n result = restaurant(food, city)\n for business in result.get('businesses'):\n detail_list = []\n detail_list.append(business.get('name'))\n detail_list.append(business.get('url'))\n detail_list.append(business.get('price'))\n detail_list.append(business.get('rating'))\n detail_list.append(business.get('image_url'))\n restaurant_list.append(detail_list)\n return restaurant_list\n","sub_path":"weats/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"454066713","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Topmenu and the submenus are based of the example found at this location http://blog.skeltonnetworks.com/2010/03/python-curses-custom-menu/\n# The rest of the work was done by Matthew Bennett and he requests you keep these two mentions when you reuse the code :-)\n# Basic code refactoring by Andrew Scheller\n\nimport secondary_client\nimport socket\nimport json\nimport sys\n\nhost = \"localhost\"\nport = 4454\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((host, port))\n\nfrom time import sleep\nimport curses, os #curses is the interface for capturing key presses on the menu, os launches the files\nfrom curses.textpad import Textbox, rectangle\nscreen = curses.initscr() #initializes a new window for capturing key presses\ncurses.noecho() # Disables automatic echoing of key presses (prevents program from input each key twice)\ncurses.cbreak() # Disables line buffering (runs each key as it is pressed rather than waiting for the return key to pressed)\ncurses.start_color() # Lets you use colors when highlighting selected menu option\ncurses.use_default_colors()\nscreen.keypad(1) # Capture input from keypad\n\n# Change this to use different colors when highlighting\ncurses.init_pair(1,curses.COLOR_BLACK, curses.COLOR_WHITE) # Sets up color pair #1, it does black text with white background\ncurses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_BLUE)\n\nh = curses.color_pair(2) #h is the coloring for a highlighted menu option\nn = curses.A_NORMAL #n is the coloring for a non highlighted menu option\n\nprimary_menu = {\n 'title': \"Tertiary Messaging\", 'subtitle': \"Please select an option...\",\n 'options':[\n { 'title': \"Login\", 'command': 'login' },\n { 'title': \"Create Account\", 'command': 'create_user' },\n { 'title': \"Exit\", 'command': 'exit' },\n ]\n}\n\nsession_menu = {\n 'title': \"Tertiary Messaging\", 'subtitle': \"Please select an option...\",\n 'options': [\n { 'title': \"Send Message\", 'command': 'send_message' },\n { 'title': \"Get New Messages\", 'command': 'get_new' },\n { 'title': \"Get All Messages\", 'command': 'get_all' },\n { 'title': \"Exit\", 'command': 'exit' },\n ]\n}\n\n# This function displays the appropriate menu and returns the option selected\ndef runmenu(menu):\n curses.cbreak()\n screen.keypad(1)\n\n optioncount = len(menu['options']) # how many options in this menu\n\n pos=0 #pos is the zero-based index of the hightlighted menu option. Every time runmenu is called, position returns to 0, when runmenu ends the position is returned and tells the program what opt$\n oldpos=None # used to prevent the screen being redrawn every time\n x = None #control for while loop, let's you scroll through options until return key is pressed then returns pos to program\n\n # Loop until return key is pressed\n while x !=ord('\\n'):\n if pos != oldpos:\n oldpos = pos\n screen.border(0)\n screen.addstr(2,2, menu['title'], curses.A_BOLD) # Title for this menu\n screen.addstr(4,2, menu['subtitle'], curses.A_BOLD) #Subtitle for this menu\n\n # Display all the menu items, showing the 'pos' item highlighted\n for index in range(optioncount):\n textstyle = n\n if pos==index:\n textstyle = h\n screen.addstr(5+index,4, \"%d - %s\" % (index+1, menu['options'][index]['title']), textstyle)\n textstyle = n\n if pos==optioncount:\n textstyle = h\n #screen.addstr(5+optioncount,4, \"%d - %s\" % (optioncount+1, lastoption), textstyle)\n screen.refresh()\n # finished updating screen\n\n x = screen.getch() # Gets user input\n\n # What is user input?\n if x >= ord('1') and x <= ord(str(optioncount+1)):\n pos = x - ord('0') - 1 # convert keypress back to a number, then subtract 1 to get index\n elif x == curses.KEY_DOWN: # down arrow\n if pos < optioncount - 1:\n pos += 1\n else: pos = 0\n elif x == curses.KEY_UP: # up arrow\n if pos > 0:\n pos += -1\n else: pos = optioncount - 1\n\n # return index of the selected item\n return pos\n\ndef get_credentials():\n screen.addstr(\"Username: \")\n screen.refresh()\n curses.echo()\n username = screen.getstr()\n curses.noecho()\n screen.addstr(\"Password: \")\n password = screen.getstr()\n\n return username, password\n\nclass InputHandler():\n def dispatch(self, command):\n return getattr(self, command)()\n\n def create_user(self):\n screen.clear()\n curses.echo()\n\n screen.addstr(\"Username: \")\n screen.refresh()\n username = screen.getstr()\n curses.noecho()\n\n screen.addstr(\"Password: \")\n screen.refresh()\n password = screen.getstr()\n\n private_key, public_key = secondary_client.generate_RSA()\n #secondary_client.create_user(username, password, public_key)\n\n if not os.path.isdir(\"{0}/.tertiary/{1}\".format(home_dir, username)):\n os.mkdir(\"{0}/.tertiary/{1}\".format(home_dir, username))\n id_rsa = open(\"{0}/.tertiary/{1}/id_rsa\".format(home_dir, username), \"w\")\n id_rsa.write(private_key)\n\n secondary_client.create_user(username, password, public_key)\n\n new_menu_options = {'menu': \"primary_menu\", 'clear': True}\n return new_menu_options\n\n def login(self):\n global current_user\n username, password = get_credentials()\n current_user = username\n\n screen.clear()\n\n auth_token = secondary_client.attempt_login(username, password)\n if auth_token:\n new_menu_options = {'menu': \"session_menu\", 'clear': True}\n else:\n screen.addstr(5, 5, \"Bad credentials...\")\n screen.refresh()\n sleep(2)\n screen.clear()\n new_menu_options = {'menu': \"primary_menu\", 'clear': True}\n\n return new_menu_options\n\n def send_message(self):\n screen.clear()\n\n curses.echo()\n screen.addstr(\"To: \")\n dest = screen.getstr()\n\n screen.addstr(\"Message: \")\n msg = screen.getstr()\n\n curses.noecho()\n\n public_key = secondary_client.get_public_key(dest)\n\n if public_key == None:\n screen.clear()\n screen.addstr(\"No account by that name\")\n screen.refresh()\n else:\n encrypted_msg = secondary_client.encrypt_RSA(public_key, msg)\n result = secondary_client.send_message(dest, encrypted_msg)\n if result:\n screen.clear()\n screen.addstr(1, 1, \"Message Sent\")\n screen.refresh()\n sleep(2)\n else:\n screen.clear()\n screen.addstr(1, 1, \"Message failed to send\")\n screen.refresh()\n sleep(2)\n\n screen.clear()\n\n new_menu_options = {'menu': \"session_menu\", 'clear': True}\n return new_menu_options\n\n def get_new(self):\n #message_packet = secondary_client.get_new()\n #messages_json = json.loads(message_packet.replace('\\n', \"\\\\n\"))\n\n messages_array = secondary_client.get_new()\n\n if not len(messages_array):\n screen.clear()\n screen.addstr(12, 4, \"No New Messages\")\n screen.refresh()\n else:\n #plaintext_messages_json = secondary_client.decrypt_messages(messages_json, \"{0}/.tertiary/{1}/id_rsa\".format(home_dir, current_user))\n screen.clear()\n\n screen.addstr(12, 4, \"Messages-----------------\\\\\\>\\n\")\n screen.addstr(\"\")\n screen.addstr(\"\")\n screen.refresh()\n\n\n for msg in messages_array:\n header = msg[0]\n body = secondary_client.decrypt_message(msg[1], \"{0}/.tertiary/{1}/id_rsa\".format(home_dir, current_user))\n\n curr_y, curr_x = screen.getyx()\n screen.addstr(\"\")\n screen.addstr(\"\")\n screen.addstr(curr_y, 6, \"From: \" + header)\n screen.addstr(curr_y+1, 6, \"Message: \" + body)\n screen.addstr(\"\\n\")\n screen.addstr(\"\\n\")\n screen.refresh()\n\n new_menu_options = { 'menu': \"session_menu\", 'clear': False}\n return new_menu_options\n\n def get_all(self):\n messages_string = secondary_client.get_all_messages()\n messages_json = json.loads(messages_string.replace('\\n', \"\\\\n\"))\n if not len(messages_json):\n screen.clear()\n screen.addstr(12, 4, \"No Messages\")\n screen.refresh()\n else:\n plaintext_messages_json = secondary_client.decrypt_messages(messages_json, \"{0}/.tertiary/{1}/id_rsa\".format(home_dir, current_user))\n screen.clear()\n\n screen.addstr(12, 4, \"Messages-----------------\\\\\\>\\n\")\n screen.addstr(\"\\n\")\n screen.addstr(\"\\n\")\n for val in messages_json.values():\n curr_y, curr_x = screen.getyx()\n screen.addstr(\"\")\n screen.addstr(curr_y, 6, \"From: \" + val[0])\n screen.addstr(curr_y+1, 6, \"Msg: \" + val[1])\n screen.move(curr_y+3, curr_x)\n screen.refresh()\n new_menu_options = { 'menu': \"session_menu\", 'clear': False}\n return new_menu_options\n\n def exit(self):\n return \"exit\"\n\n# This function calls showmenu and then acts on the selected item\ndef processmenu(input_handler, **kwargs): ## Valid args: parent=None, clear=True\n menu = eval(kwargs['menu']) ## menu name is passed as string. get the real thing\n optioncount = len(menu['options'])\n getin = runmenu(menu)\n new_menu_options = {'menu': menu, 'clear': True}\n\n if kwargs['clear']:\n screen.clear() #clears previous screen\n else:\n pass\n\n command = menu['options'][getin]['command']\n new_menu_options = input_handler.dispatch(command)\n\n curses.reset_prog_mode() # reset to 'current' curses environment\n curses.curs_set(1) # reset doesn't do this right\n curses.curs_set(0)\n\n\n return new_menu_options\n\n\"\"\"\nNew Flow - Important\n1) processmenu is first called with initial value. displays primary_menu\n2) processmenu returns the value selected by the user\n3) processmenu is called in a loop until it returns an exit value\n4) processmenu returns chosen option\n5) processmenu is called with chosen option\n\"\"\"\n\n\"\"\"\nHow the fuck do you save a user session...\nSo effing close...\nYou put it in the server\n\"\"\"\n\n\"\"\"\nPossible user flow:\n\n Client sends username and password to server\n If the login is a success:\n Server sends back authenticated token\n How does the client store an authenticated token?\n It doesn't, you put it in the fucking server\n\"\"\"\n\nhome_dir = os.path.expanduser(\"~\")\ncurrent_user = None\n\ndef main():\n handler = InputHandler()\n initial_menu_options = {'menu': \"primary_menu\", 'clear': True}\n menu_options = processmenu(handler, **initial_menu_options)\n\n while not menu_options == \"exit\":\n menu_options = processmenu(handler, **menu_options)\n\n\n screen.clear()\n curses.endwin() #VITAL! This closes out the menu system and returns you to the bash prompt.\n os.system('clear')\n\nmain()\n","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":11522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"498857319","text":"import unittest\nimport requests\nimport re\n\nfrom MyTest.common.Cookie import GetCookie\n\nurl = \"http://localhost:8070/\"\n\n\nclass MyTestCase4(unittest.TestCase):\n\n def test_get_session(self):\n t = requests.get(url + \"login\")\n p = ''\n matchObj = re.search(p, t.text)\n if matchObj:\n csrf = matchObj.group(1)\n payload = {'username': '1', 'password': '1', '_csrf': csrf}\n resp = requests.post(url + \"login\", data=payload, cookies=t.cookies,allow_redirects=False)\n # print(resp.status_code)\n self.assertEqual(resp.status_code,302)\n GetCookie.COOKIE=resp.cookies\n\n def test_something(self):\n cookie=GetCookie.COOKIE\n if cookie:\n rp = requests.get(url + 'test/1111', cookies=cookie)\n print(rp.content)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/MyTest/test_4.py","file_name":"test_4.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"133725163","text":"#!/usr/bin/env python\n\n\"\"\"\nCopyright (c) 2011, Willow Garage, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of the Willow Garage, Inc. nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES LOSS OF USE, DATA, OR PROFITS OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nimport roslib; roslib.load_manifest(\"interactive_markers\")\nimport rospy\nimport copy\n\nfrom interactive_markers.interactive_marker_server import *\nfrom interactive_markers.menu_handler import *\nfrom tf.broadcaster import TransformBroadcaster\nfrom visualization_msgs.msg import *\nfrom math import sin\n\nserver = None\nmarker_pos = 0\nmenu_handler = MenuHandler()\nbr = None\ncounter = 0\n\ndef frameCallback( msg ):\n global counter, br\n time = rospy.Time.now()\n br.sendTransform( (0, 0, sin(counter/140.0)*2.0), (0, 0, 0, 1.0), time, \"base_link\", \"moving_frame\" )\n counter += 1\n\ndef processFeedback( feedback ):\n s = \"Feedback from marker '\" + feedback.marker_name\n s += \"' / control '\" + feedback.control_name + \"'\"\n\n mp = \"\"\n if feedback.mouse_point_valid:\n mp = \" at \" + str(feedback.mouse_point.x)\n mp += \", \" + str(feedback.mouse_point.y)\n mp += \", \" + str(feedback.mouse_point.z)\n mp += \" in frame \" + feedback.header.frame_id\n\n if feedback.event_type == InteractiveMarkerFeedback.BUTTON_CLICK:\n rospy.loginfo( s + \": button click\" + mp + \".\" )\n elif feedback.event_type == InteractiveMarkerFeedback.MENU_SELECT:\n rospy.loginfo( s + \": menu item \" + str(feedback.menu_entry_id) + \" clicked\" + mp + \".\" )\n elif feedback.event_type == InteractiveMarkerFeedback.POSE_UPDATE:\n rospy.loginfo( s + \": pose changed\")\n# TODO\n# << \"\\nposition = \"\n# << feedback.pose.position.x\n# << \", \" << feedback.pose.position.y\n# << \", \" << feedback.pose.position.z\n# << \"\\norientation = \"\n# << feedback.pose.orientation.w\n# << \", \" << feedback.pose.orientation.x\n# << \", \" << feedback.pose.orientation.y\n# << \", \" << feedback.pose.orientation.z\n# << \"\\nframe: \" << feedback.header.frame_id\n# << \" time: \" << feedback.header.stamp.sec << \"sec, \"\n# << feedback.header.stamp.nsec << \" nsec\" )\n elif feedback.event_type == InteractiveMarkerFeedback.MOUSE_DOWN:\n rospy.loginfo( s + \": mouse down\" + mp + \".\" )\n elif feedback.event_type == InteractiveMarkerFeedback.MOUSE_UP:\n rospy.loginfo( s + \": mouse up\" + mp + \".\" )\n server.applyChanges()\n\n\ndef makeBox( msg ):\n marker = Marker()\n\n marker.type = Marker.CUBE\n marker.scale.x = msg.scale * 0.45\n marker.scale.y = msg.scale * 0.45\n marker.scale.z = msg.scale * 0.45\n marker.color.r = 0.5\n marker.color.g = 0.5\n marker.color.b = 0.5\n marker.color.a = 1.0\n\n return marker\n\ndef makeBoxControl( msg ):\n control = InteractiveMarkerControl()\n control.always_visible = True\n control.markers.append( makeBox(msg) )\n msg.controls.append( control )\n return control\n\n\n#####################################################################\n# Marker Creation\n\n\ndef makeRotateMarker():\n global marker_pos\n int_marker = InteractiveMarker()\n int_marker.header.frame_id = \"/base_link\"\n int_marker.pose.position.y = -3.0 * marker_pos\n marker_pos += 1\n int_marker.scale = 1\n\n int_marker.name = \"rotate\"\n int_marker.description = \"Rotate\"\n\n makeBoxControl(int_marker)\n\n control = InteractiveMarkerControl()\n control.orientation.w = 1\n control.orientation.x = 0\n control.orientation.y = 1\n control.orientation.z = 0\n control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS\n control.orientation_mode = InteractiveMarkerControl.FIXED\n int_marker.controls.append(control)\n\n server.insert(int_marker, processFeedback)\n\n\nif __name__==\"__main__\":\n rospy.init_node(\"basic_controls\")\n br = TransformBroadcaster()\n \n # create a timer to update the published transforms\n rospy.Timer(rospy.Duration(0.01), frameCallback)\n\n server = InteractiveMarkerServer(\"basic_controls\")\n\n makeRotateMarker( )\n\n server.applyChanges()\n\n rospy.spin()\n\n","sub_path":"scripts/basic_controls.py","file_name":"basic_controls.py","file_ext":"py","file_size_in_byte":5432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"467415702","text":"#!/usr/bin/env python3\nfrom flask import Flask, jsonify\n\napp = Flask(__name__)\n\n\n@app.route(\"/webhooks/answer\")\ndef answer_call():\n ncco = [\n {\n \"action\": \"connect\",\n \"from\": \"VONAGE_NUMBER\",\n \"endpoint\": [{\n \"type\": 'phone',\n \"number\": \"YOUR_SECOND_NUMBER\"\n }]\n }\n ]\n return jsonify(ncco)\n\n\nif __name__ == '__main__':\n app.run(port=3000)\n","sub_path":"voice/connect-an-inbound-call.py","file_name":"connect-an-inbound-call.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"270632547","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2015 Red Hat\n# Licensed under The MIT License (MIT)\n# http://opensource.org/licenses/MIT\n#\n\nfrom __future__ import print_function\n\nimport sys\nimport argparse\nimport beanbag\nimport requests\nimport os\nimport os.path\nimport logging\nimport imp\nimport json\n\n# The client supports Bash completion if argcomplete Python package is\n# installed. To enable it, run this in your terminal (assuming pdc is somewhere\n# on path).\n#\n# eval \"$(register-python-argcomplete pdc)\"\n#\n# This is only a temporary solution, when the client is packaged, a completion\n# file should be shipped with it and installed to /etc/bash_completion.d/.\ntry:\n import argcomplete\nexcept ImportError:\n class argcomplete(object):\n @classmethod\n def autocomplete(*args):\n pass\n\nimport pdc_client\n\n\n# Default path to plugins. This line will be replaced when installing with real\n# install plugin path (usually '/usr/share/pdc-client/plugins').\nDEFAULT_PLUGIN_DIR = os.path.join(os.path.dirname(__file__), 'plugins')\n\n# A list of paths to directories where plugins should be loaded from.\n# The purpose of the plugins is to extend the default behaviour.\nPLUGIN_DIRS = [path for path in\n os.getenv('PDC_CLIENT_PLUGIN_PATH', '').split(':')\n if path]\nif not PLUGIN_DIRS:\n PLUGIN_DIRS = [DEFAULT_PLUGIN_DIR]\n\nDEFAULT_PLUGINS = [\n 'base_product.py',\n 'build_image_rtt_tests.py',\n 'build_images.py',\n 'component.py',\n 'compose_image_rtt_tests.py',\n 'compose.py',\n 'compose_full_import.py',\n 'compose_tree_locations.py',\n 'contact.py',\n 'group_resource_permissions.py',\n 'image.py',\n 'permission.py',\n 'product.py',\n 'product_version.py',\n 'release.py',\n 'release_variant.py',\n 'repo.py',\n 'rpm.py'\n]\n\nCONFIG_PLUGINS_KEY_NAME = 'plugins'\n\n\nclass Runner(object):\n def __init__(self):\n self.raw_plugins = []\n self.plugins = []\n self.logger = logging.getLogger('pdc')\n\n def load_plugins(self):\n config = None\n server = None\n idx_s, idx_server = (None, None)\n plugins_set = set(DEFAULT_PLUGINS)\n args = sys.argv[1:]\n try:\n idx_s = args.index('-s')\n except (ValueError, IndexError):\n pass\n try:\n idx_server = args.index('--server')\n except (ValueError, IndexError):\n pass\n try:\n server = args[max(idx_s, idx_server) + 1]\n except TypeError:\n pass\n\n if server is not None:\n try:\n config = pdc_client.server_configuration(server)\n except pdc_client.config.ServerConfigError as e:\n self.logger.error(e)\n sys.exit(1)\n\n if config:\n plugins = config.get(CONFIG_PLUGINS_KEY_NAME, [])\n if not isinstance(plugins, list):\n raise TypeError('Plugins must be a list')\n plugins_set.update(set(plugins))\n\n for dir in PLUGIN_DIRS:\n self.logger.debug('Loading plugins from {0}'.format(dir))\n for name in os.listdir(dir):\n if not name.endswith('.py') or name not in plugins_set:\n continue\n try:\n module_name = name[:-3]\n file, pathname, description = imp.find_module(module_name, [dir])\n plugin = imp.load_module(module_name, file, pathname, description)\n self.logger.debug('Loaded plugin {0}'.format(module_name))\n self.raw_plugins.append(plugin)\n if hasattr(plugin, 'PLUGIN_CLASSES'):\n for p in plugin.PLUGIN_CLASSES:\n self.logger.debug('Instantiating {0}'.format(p.__name__))\n self.plugins.append(p(self))\n except Exception as e:\n self.logger.error('Failed to load plugin \"{0}\": {1}'.format(module_name, e))\n finally:\n if file:\n file.close()\n\n def run_hook(self, hook, *args, **kwargs):\n \"\"\"\n Loop over all plugins and invoke function `hook` with `args` and\n `kwargs` in each of them. If the plugin does not have the function, it\n is skipped.\n \"\"\"\n for plugin in self.raw_plugins:\n if hasattr(plugin, hook):\n self.logger.debug('Calling hook {0} in plugin {1}'.format(hook, plugin.__name__))\n getattr(plugin, hook)(*args, **kwargs)\n\n def setup(self):\n self.load_plugins()\n\n self.parser = argparse.ArgumentParser(description='PDC Client')\n self.parser.add_argument('-s', '--server', default='stage',\n help='API URL or shortcut from config file')\n\n ssl_group = self.parser.add_mutually_exclusive_group()\n ssl_group.add_argument('-k', '--insecure', action='store_true',\n help='Disable SSL certificate verification')\n # ca-cert corresponds to requests session verify attribute:\n # http://docs.python-requests.org/en/master/user/advanced/#ssl-cert-verification\n ssl_group.add_argument(\"--ca-cert\", help=\"Path to CA certificate file or directory\")\n\n self.parser.add_argument('--debug', action='store_true', help=argparse.SUPPRESS)\n self.parser.add_argument('--json', action='store_true',\n help='display output as JSON')\n self.parser.add_argument('--page-size', dest='page_size', type=int,\n help='change page size in response, -1 means that get all pages of data in one request')\n self.parser.add_argument('--page', dest='page', type=int,\n help='change page in response')\n self.parser.add_argument('--version', action='version',\n version='%(prog)s ' + pdc_client.__version__)\n\n subparsers = self.parser.add_subparsers(metavar='COMMAND')\n subparsers.required = True\n\n for plugin in sorted(self.plugins):\n plugin._before_register(subparsers)\n plugin.register()\n\n argcomplete.autocomplete(self.parser)\n\n def run(self, args=None):\n self.args = self.parser.parse_args(args=args)\n if self.args.insecure:\n requests.packages.urllib3.disable_warnings(\n requests.packages.urllib3.exceptions.InsecureRequestWarning)\n ssl_verify = False\n elif self.args.ca_cert:\n ssl_verify = self.args.ca_cert\n else:\n ssl_verify = None\n\n try:\n self.client = pdc_client.PDCClientWithPage(self.args.server, page_size=self.args.page_size, ssl_verify=ssl_verify, page=self.args.page)\n except pdc_client.config.ServerConfigError as e:\n self.logger.error(e)\n sys.exit(1)\n\n try:\n self.args.func(self.args)\n except beanbag.BeanBagException as ex:\n print(\"Server returned following error: [{0}] {1}\".format(ex.response.status_code, ex.response.reason), file=sys.stderr)\n print(\"Details: \", end='', file=sys.stderr)\n try:\n data = ex.response.json()\n if len(data) == 1 and 'detail' in data:\n print(data['detail'], file=sys.stderr)\n else:\n print(\"\", file=sys.stderr)\n json.dump(data, sys.stderr, indent=2,\n sort_keys=True, separators=(\",\", \": \"))\n except Exception:\n # response was not JSON\n print('Failed to parse error response.', file=sys.stderr)\n sys.exit(1)\n","sub_path":"pdc_client/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":7763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"101446657","text":"import json\n\nfrom service.tests.functional import MonitoringTestCase\n\n\nclass TestAlarm(MonitoringTestCase):\n ENDPOINT = '/nbi/monitoring/api/alarms/'\n\n def test_get_alarms(self):\n \"\"\"\n Test that validates getting all alarms\n It asserts the response code 200, the default alarm limit 5 and the fields in an alarm:\n metrics\n state\n alarm_definition\n updated_timestamp\n created_timestamp\n state_updated_timestamp\n id\n :return:\n \"\"\"\n result = self.app.get(TestAlarm.ENDPOINT, headers={'X-Auth-Token': self.cloud_admin})\n self.assertEqual(result.status, 200)\n data = json.loads(result.body.decode('utf-8'))\n self.assertTrue('alarms' in data)\n for alarm in data.get('alarms'):\n for field in ['metrics', 'state', 'alarm_definition', 'updated_timestamp', 'created_timestamp',\n 'state_updated_timestamp', 'id']:\n self.assertTrue(field in alarm)\n self.assertTrue(len(data.get('alarms')) <= 5)\n\n def test_get_alarms_filter(self):\n \"\"\"\n Test that validates getting an alarm using a filter\n It asserts the response code 200, the filtered information and the limit\n :return:\n \"\"\"\n result = self.app.get(TestAlarm.ENDPOINT, headers={'X-Auth-Token': self.cloud_admin},\n params={'limit': 2, 'state': 'OK'})\n\n self.assertTrue(result.status, 200)\n data = json.loads(result.body.decode('utf-8'))\n self.assertTrue('alarms' in data)\n for alarm in data.get('alarms'):\n self.assertTrue(alarm.get('state'), 'OK')\n self.assertTrue(len(data.get('alarms')) <= 2)\n\n def test_get_alarms_unauth(self):\n \"\"\"\n Test that validates an unauthenticated user can't access the alarms\n It asserts the response code 401\n :return:\n \"\"\"\n result = self.app.get(TestAlarm.ENDPOINT, params={'limit': 2, 'state': 'OK'}, status=401)\n self.assertTrue(result.status, 401)\n\n def test_specific_alarm(self):\n \"\"\"\n Test that validates getting a specific alarm\n It asserts the response code 200 and the fields in the alarm:\n metrics\n state\n alarm_definition\n updated_timestamp\n created_timestamp\n state_updated_timestamp\n id\n :return:\n \"\"\"\n result = self.app.get(TestAlarm.ENDPOINT + '000072da-53f7-434e-8d89-77c4b77f1636',\n headers={'X-Auth-Token': self.cloud_admin})\n self.assertTrue(result.status, 200)\n data = json.loads(result.body.decode('utf-8'))\n for field in ['metrics', 'state', 'alarm_definition', 'updated_timestamp', 'created_timestamp',\n 'state_updated_timestamp', 'id']:\n self.assertTrue(field in data)\n\n def test_get_invalid_alarm(self):\n \"\"\"\n Test that validates a user can't collect an invalid alarm id\n It asserts the response code 404\n :return:\n \"\"\"\n result = self.app.get(TestAlarm.ENDPOINT + '000210ee', status=404,\n headers={'X-Auth-Token': self.cloud_admin})\n self.assertEqual(result.status, 404)\n","sub_path":"MonitoringService/service/tests/functional/alarm_test.py","file_name":"alarm_test.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"66493588","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/qtalchemy/widgets/street_address_widget.py\n# Compiled at: 2013-09-07 09:08:02\nfrom PySide import QtGui, QtCore\nimport re\nfrom qtalchemy import *\nfrom .button_edit import *\n\ndef is_country(s):\n \"\"\"\n Return true if the string s looks like a country name.\n\n We only check that the string doesn't have numbers in it (very lame for the moment).\n \"\"\"\n return re.search('[^ a-zA-Z]', s) is None\n\n\ndef city_state(s):\n \"\"\"\n >>> city_state('bethlehem pa')\n ('Bethlehem', 'PA')\n >>> city_state('seattle, washington')\n ('Seattle', 'Washington')\n >>> city_state('New york, new york')\n ('New York', 'New York')\n \"\"\"\n s = s.strip()\n if s.find(',') > -1:\n city, state = s.rsplit(',', 2)\n elif s.find(' ') > -1:\n city, state = s.rsplit(' ', 2)\n else:\n city, state = s, None\n city = city.strip()\n state = state.strip()\n if state is not None and len(state) == 2:\n state = state.upper()\n else:\n state = state.title()\n city = city.title()\n return (city, state)\n\n\ndef parse_address(addr):\n \"\"\"\n Parses a single string into street, city, state, zip and country. \n Semi-colons, new lines, and carriage returns are all recognized as logical \n line breaks. Parsing proceeds roughly from right to left recognizing the \n country first (if there), zip/postal code, state/province, city, and finally \n street address components. Two street addresses are recognized for PO Box \n lines or other street details.\n\n >>> parse_address('5522 Penelope Parkway; dayton, me 12345')\n ('5522 Penelope Parkway', None, 'Dayton', 'ME', '12345', 'USA')\n >>> parse_address('PO box 14; 103 Kaiser Lane; Marksville, AK 98765')\n ('PO Box 14', '103 Kaiser Lane', 'Marksville', 'AK', '98765', 'USA')\n >>> parse_address('531 fiddley blvd; Marksville, AB K1A 0B1')\n ('531 Fiddley Blvd', None, 'Marksville', 'AB', 'K1A 0B1', 'Canada')\n \"\"\"\n lines = [ l for l in re.split('(;|\\n|\\r)', addr) if l not in list(';\\n\\r') ]\n address1 = address2 = city = state = zip = country = None\n items = ('street', 'zip', 'country')\n while len(lines) > 0 and len(items) > 0:\n if items[(-1)] == 'country':\n if is_country(lines[(-1)]):\n country = lines[(-1)]\n lines = lines[:-1]\n items = items[:-1]\n continue\n elif items[(-1)] == 'zip':\n if re.search('[0-9]{5}$', lines[(-1)].strip()):\n if country is None:\n country = 'USA'\n zip = lines[(-1)].strip()[-5:]\n city, state = city_state(lines[(-1)].strip()[:-5])\n lines = lines[:-1]\n items = items[:-1]\n elif re.search('[0-9]{5}-[0-9]{4}$', lines[(-1)].strip()):\n if country is None:\n country = 'USA'\n zip = lines[(-1)].strip()[-10:]\n city, state = city_state(lines[(-1)].strip()[:-10])\n lines = lines[:-1]\n items = items[:-1]\n elif re.search('[0-9a-zA-Z]{3} [0-9a-zA-Z]{3}$', lines[(-1)].strip()):\n if country is None:\n country = 'Canada'\n zip = lines[(-1)].strip()[-7:]\n city, state = city_state(lines[(-1)].strip()[:-7])\n lines = lines[:-1]\n items = items[:-1]\n else:\n items = items[:-1]\n continue\n else:\n if len(lines) == 1:\n address1 = lines[0].strip().title()\n elif len(lines) == 2:\n address1, address2 = lines[0].strip().title(), lines[1].strip().title()\n else:\n raise Exception(\"address can't be parsed\")\n lines = []\n\n if address1 is not None and address1.startswith('Po Box'):\n address1 = 'PO Box' + address1[6:]\n if address2 is not None and address2.startswith('Po Box'):\n address2 = 'PO Box' + address2[6:]\n return (\n address1, address2, city, state, zip, country)\n\n\ndef concat_address(addr1, addr2, city, state, zip, country, linebreak='\\n'):\n \"\"\"\n This reverses the parsing action of :func:parse_address joining the \n components of an address into a single string.\n \n >>> concat_address('230 E. Main Street', None, 'Bethlehem', 'OH', '18030', '', linebreak='; ')\n '230 E. Main Street; Bethlehem OH 18030'\n \"\"\"\n lines = []\n if addr1 not in (None, ''):\n lines.append(addr1)\n if addr2 not in (None, ''):\n lines.append(addr2)\n lines.append((' ').join([ v for v in (city, state, zip) if v not in (None, '') ]))\n if country not in (None, ''):\n lines.append(country)\n return linebreak.join(lines)\n\n\nclass StreetAddressEdit(QtGui.QStackedWidget):\n addressParsed = Signal(name='addressParsed')\n\n def __init__(self, parent=None):\n QtGui.QStackedWidget.__init__(self, parent)\n self.concat_page = QtGui.QWidget()\n self.concat_page.setObjectName('concat_page')\n vbox = QtGui.QVBoxLayout(self.concat_page)\n vbox.setContentsMargins(1, 1, 1, 1)\n self.unparsedAddress_edit = LayoutWidget(vbox, QtGui.QTextEdit(self.concat_page))\n self.unparsedAddress_edit.setObjectName('unparsedAddress_edit')\n hbox = LayoutLayout(vbox, QtGui.QHBoxLayout())\n self.btnParse = LayoutWidget(hbox, QtGui.QPushButton('Parse (F4)', self.concat_page))\n size = self.btnParse.minimumSize()\n size.setWidth(140)\n self.btnParse.setMinimumSize(size)\n hbox.addItem(QtGui.QSpacerItem(375, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum))\n self.addWidget(self.concat_page)\n self.parsed_page = QtGui.QWidget()\n grid = QtGui.QGridLayout(self.parsed_page)\n grid.setContentsMargins(0, 0, 0, 0)\n grid.setObjectName('gridLayout')\n self.address1_edit = QtGui.QLineEdit(self.parsed_page)\n self.address1_edit.setObjectName('address1_edit')\n grid.addWidget(self.address1_edit, 0, 0, 1, 3)\n self.address2_edit = QtGui.QLineEdit(self.parsed_page)\n self.address2_edit.setObjectName('address2_edit')\n grid.addWidget(self.address2_edit, 1, 0, 1, 3)\n self.city_edit = QtGui.QLineEdit(self.parsed_page)\n self.city_edit.setObjectName('city_edit')\n grid.addWidget(self.city_edit, 3, 0, 1, 1)\n self.state_edit = QtGui.QLineEdit(self.parsed_page)\n self.state_edit.setMaximumSize(QtCore.QSize(40, 16777215))\n self.state_edit.setObjectName('state_edit')\n grid.addWidget(self.state_edit, 3, 1, 1, 1)\n self.zip_edit = QtGui.QLineEdit(self.parsed_page)\n self.zip_edit.setMaximumSize(QtCore.QSize(100, 16777215))\n self.zip_edit.setObjectName('zip_edit')\n grid.addWidget(self.zip_edit, 3, 2, 1, 1)\n self.country_edit = QtGui.QLineEdit(self.parsed_page)\n self.country_edit.setObjectName('country_edit')\n grid.addWidget(self.country_edit, 4, 1, 1, 2)\n self.btnConcatenate = QtGui.QPushButton('Concatenate (F4)', self.parsed_page)\n self.btnConcatenate.setObjectName('btnConcatenate')\n size = self.btnConcatenate.minimumSize()\n size.setWidth(140)\n self.btnConcatenate.setMinimumSize(size)\n grid.addWidget(self.btnConcatenate, 4, 0, 1, 1, QtCore.Qt.AlignLeft)\n self.addWidget(self.parsed_page)\n self.parseAct = QtGui.QAction('parse/concat', self)\n self.parseAct.setShortcut(QtGui.QKeySequence(QtCore.Qt.Key_F4))\n self.parseAct.triggered.connect(self.parse_or_concat)\n self.addAction(self.parseAct)\n self.btnParse.clicked.connect(self.toParsed)\n self.btnConcatenate.clicked.connect(self.toConcat)\n\n def parse_or_concat(self):\n if self.currentIndex() == 0:\n self.toParsed()\n else:\n self.toConcat()\n\n def toParsed(self):\n try:\n address1, address2, city, state, zip, country = parse_address(self.unparsedAddress_edit.toPlainText())\n self.address1_edit.setText(address1)\n self.address2_edit.setText(address2)\n self.city_edit.setText(city)\n self.state_edit.setText(state)\n self.zip_edit.setText(zip)\n self.country_edit.setText(country)\n self.setCurrentIndex(1)\n self.addressParsed.emit()\n except Exception as e:\n QtGui.QMessageBox.information(self.parent(), QtGui.QApplication.applicationName(), str(e))\n\n def toConcat(self):\n addr = concat_address(self.address1_edit.text(), self.address2_edit.text(), self.city_edit.text(), self.state_edit.text(), self.zip_edit.text(), self.country_edit.text())\n self.unparsedAddress_edit.setPlainText(addr)\n self.setCurrentIndex(0)\n\n def implantProperty(self, prop, v):\n if self.currentIndex() == 0:\n self.toParsed()\n getattr(self, prop + '_edit').setText(v)\n\n def extractProperty(self, prop):\n if self.currentIndex() == 0:\n t = parse_address(self.unparsedAddress_edit.toPlainText())\n index = ('address1,address2,city,state,zip,country').split(',').index(prop)\n return t[index]\n else:\n return getattr(self, prop + '_edit').text()\n\n address1 = Property('QString', lambda self: self.extractProperty('address1'), lambda self, v: self.implantProperty('address1', v))\n address2 = Property('QString', lambda self: self.extractProperty('address2'), lambda self, v: self.implantProperty('address2', v))\n city = Property('QString', lambda self: self.extractProperty('city'), lambda self, v: self.implantProperty('city', v))\n state = Property('QString', lambda self: self.extractProperty('state'), lambda self, v: self.implantProperty('state', v))\n zip = Property('QString', lambda self: self.extractProperty('zip'), lambda self, v: self.implantProperty('zip', v))\n country = Property('QString', lambda self: self.extractProperty('country'), lambda self, v: self.implantProperty('country', v))\n\n\nclass StreetAddressYoke(InputYoke):\n\n def __init__(self, mapper, attr):\n InputYoke.__init__(self, mapper)\n self.attr = attr\n user_attr = getattr(self.mapper.cls, self.attr)\n for x in user_attr.my_cols:\n mapper.reverse_yoke(x, self)\n\n self.trapRecurse = 0\n\n def Factory(self):\n self.widget = StreetAddressEdit()\n self.widget.addressParsed.connect(self.Save)\n self._baseAdoptWidget(self.widget)\n return self.widget\n\n def AdoptWidget(self, widget):\n raise NotImplementedError()\n\n def Bind(self):\n if self.trapRecurse > 0:\n return\n else:\n user_attr = getattr(self.mapper.cls, self.attr)\n parts = [ self.mapper.getObjectAttr(a) for a in user_attr.my_cols ]\n parts = [ '' if p is None else p for p in parts ]\n props = ('address1,address2,city,state,zip,country').split(',')\n if parts == [''] * 6:\n self.widget.setCurrentIndex(0)\n for i in range(6):\n self.widget.setProperty(props[i], parts[i])\n\n if parts != [''] * 6:\n self.widget.setCurrentIndex(1)\n return\n\n def Save(self):\n try:\n self.trapRecurse += 1\n user_attr = getattr(self.mapper.cls, self.attr)\n props = ('address1,address2,city,state,zip,country').split(',')\n for i in range(6):\n self.mapper.setObjectAttr(user_attr.my_cols[i], self.widget.property(props[i]))\n\n finally:\n self.trapRecurse -= 1\n\n\nclass StreetAddress(UserAttr):\n \"\"\"\n A StreetAddress UserAttr displays a street address in a stacked widget \n allowing multi-line input which can be parsed into the constituent parts.\n\n TODO: force the constituent parsing before allowing the bound attributes to passed off as valid.\n\n :param myCols: A list of attribute names which store (in order): \n address1, address2, city, state, zip, country\n \"\"\"\n\n def __init__(self, label, myCols, whats_this=None):\n self.my_cols = myCols\n UserAttr.__init__(self, str, label, whats_this=whats_this)\n\n def fget(self, row):\n return concat_address(*[ getattr(row, x) for x in self.my_cols ])\n\n def fset(self, row, value):\n raise NotImplementedError('StreetAddress types may not be assigned en-masse')\n\n def yoke_specifier(self):\n return 'address'","sub_path":"pycfiles/qtalchemy-0.8.3-py2.7/street_address_widget.py","file_name":"street_address_widget.py","file_ext":"py","file_size_in_byte":12704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"20681496","text":"import unittest\nimport sympy\nimport numpy as np\nimport utilities\nimport models.stochastic\n\n\ndef monotonic_convergence(l):\n \"\"\"\n See if a list of values increases or decreases monotonically and if its\n derivative also decreases monotonically.\n\n :param l: list of values.\n :return: (bool) whether or not there is monotonic convergence.\n \"\"\"\n\n diff = np.abs(np.diff(l))\n return np.all(np.less_equal(diff[:-1], diff[1:]))\n\n\nclass TestStochasticMethods(unittest.TestCase):\n x = sympy.symbols('x x_1 x_2')\n eps = sympy.symbols('eps eps_1 eps_2')\n\n u_var = 1.0\n model = models.stochastic.StochasticModel(\n [x[0]], [eps[0]], [0.5 * x[0] + 0.5 * eps[0]]\n )\n\n u_var2 = np.array([[1.0, 0.5], [0.5, 1.0]])\n model2 = models.stochastic.StochasticModel(\n [x[1], x[2]],\n [eps[1], eps[2]],\n [(x[1] + x[2] + eps[1]) / 3, (x[1] + x[2] + eps[2]) / 3]\n )\n\n nbd2 = models.parasitism.get_model('nbd(2)')\n\n def test_analytic_variance(self):\n # X_t = .5X_{t-1} + .5u, u ~ N(0, sigma_u)\n # sigma^2_x = (1/2)^2 sigma^2_x + (1/2)^2 sigma^2_u\n # sigma^2_x = (1/4)sigma^2_x + (1/4)sigma^2_u\n # (3/4)sigma^2_x = (1/4)sigma^2_u\n # sigma^2_x = (1/3)sigma^2_u\n analytic_var_1 = self.model.calculate_covariance({}, [self.u_var])\n self.assertEqual(analytic_var_1, self.u_var / 3)\n\n # This one's a little more complicated.\n analytic_var_2 = self.model2.calculate_covariance({}, [self.u_var2])\n self.assertTrue(np.allclose(\n analytic_var_2, np.array([[8./45, 11./90], [11./90, 8./45]])\n ))\n\n # 2-patch NBD. Quite complicated.\n self.assertTrue(np.allclose(\n self.nbd2.calculate_covariance(\n models.parasitism.sym_params(\n dict(r=2.0, a=1.0, c=1.0, k=0.5, mh=0.25, mp=0.25)\n ),\n utilities.noise_cov(dict(SpSh=1, Chh=0.5, Cpp=0.5))\n ), np.array([\n [\n 928315516107./63549989434,\n 1507134437739./127099978868,\n 130208704032./31774994717,\n 116000554032./31774994717\n ],\n [\n 1507134437739./127099978868,\n 928315516107./63549989434,\n 116000554032./31774994717,\n 130208704032./31774994717\n ],\n [\n 130208704032./31774994717,\n 116000554032./31774994717,\n 188809697688./31774994717,\n 162803616588./31774994717\n ],\n [\n 116000554032./31774994717,\n 130208704032./31774994717,\n 162803616588./31774994717,\n 188809697688./31774994717\n ]\n ])\n ))\n\n def test_integrated_variance(self):\n integrated_vars_1 = [\n self.model.integrate_covariance_from_analytic_spectrum(\n {}, [self.u_var], n**2\n ) for n in range(1, 16)\n ]\n # First, make sure this is monotonically converging.\n self.assertTrue(monotonic_convergence(integrated_vars_1))\n\n # TODO: Test if this is reasonable close to the analytic result or if\n # it appears to be converging toward it.\n\n def test_simulated_variance(self):\n # TODO: Stub. Need to find a suitable way to test accuracy /\n # convergence of covariance of random simulations.\n pass\n\n def test_correlation(self):\n self.assertEqual(\n models.utilities.correlation(\n self.model.calculate_covariance({}, [self.u_var])\n ), 1.0\n )\n\n self.assertTrue(\n np.array_equal(\n models.utilities.correlation(\n self.model2.calculate_covariance({}, [self.u_var2])\n ),\n [[1, 11./16], [11./16, 1]]\n )\n )\n\n def test_covariance_multiples(self):\n cov_base_1 = self.model.calculate_covariance({}, self.u_var)\n cov_base_2 = self.model2.calculate_covariance({}, self.u_var2)\n\n for mult in np.linspace(-2, 2, 9):\n cov_1 = self.model.calculate_covariance({}, self.u_var * mult)\n cov_2 = self.model2.calculate_covariance({}, self.u_var2 * mult)\n\n self.assertEqual(cov_1, cov_base_1 * mult)\n self.assertTrue(np.allclose(cov_2, cov_base_2 * mult))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"460603420","text":"import argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport numpy as np\nimport random\nimport os\nimport time\nimport AverageMeter\nimport SERGCN\nimport resnet_roi\nfrom sklearn.metrics import recall_score\nfrom utils.eval import compute_map\nfrom utils.logger import logger\nfrom utils.epoch_utils import generate_dataloader\nimport logging\n\nparser = argparse.ArgumentParser(description='PyTorch Social Relation')\nparser.add_argument('--batch-size', type=int, default=32, metavar='N',\n help='input batch size for training (default: 32)')\nparser.add_argument('--test-batch-size', type=int, default=16, metavar='N',\n help='input batch size for testing (default: 32)')\nparser.add_argument('--image-size', type=int, default=448, metavar='N',\n help='the size of image (default: 448)')\nparser.add_argument('--num-workers', default=2, type=int,\n help='number of load data workers (default: 2)')\nparser.add_argument('--save-model', type=str, default='./Save_Model/',\n help='where you save model')\nparser.add_argument('--lr', type=float, default=5e-5,\n help='fc layer learning rate (default: 0.00002)')\nparser.add_argument('--max-epochs', type=int, default=20, metavar='N',\n help='number of epochs to train (default: 20)')\nparser.add_argument('--print-freq', default=50, type=int,\n metavar='N', help='print frequency (default: 50)')\nparser.add_argument('--manualSeed', type=int, default=-1,\n help='manual seed')\nparser.add_argument('--time-steps', type=int, default=2, metavar='N',\n help='the time steps (default: 2)')\nparser.add_argument('--load-model', action='store_true', default=False,\n help='whether load model or not')\nparser.add_argument('--dataset', type=str, default='pipa_fine',\n help='pipa_fine, pipa_coarse, pisc_fine, pisc_coarse')\n\nparser.add_argument('--regenerate-roifeat', action='store_true', default=False)\nparser.add_argument('--log-filename', type=str, default='log_info.txt')\n\n\nparser.add_argument('--factor', type=float, default=0.5,\n help='factor')\nparser.add_argument('--patience', default=5, type=int,\n metavar='N', help='patience default 5')\n\n\nargs = parser.parse_args()\nargs.cuda = torch.cuda.is_available()\nif args.manualSeed is None or args.manualSeed < 0:\n args.manualSeed = random.randint(1, 10000)\nargs.log_filename = args.log_filename.split('.')[0]+'_{}.txt'.format(args.manualSeed)\n\n# FileHandler\nfile_handler = logging.FileHandler(args.log_filename)\nfile_handler.setLevel(level=logging.INFO)\nformatter = logging.Formatter('%(asctime)s|%(filename)s[%(lineno)d]|%(message)s')\nfile_handler.setFormatter(formatter)\nlogger.addHandler(file_handler)\n\nif 'pipa' in args.dataset:\n args.images_root = '../../Dataset/PIPA_image'\n args.loss_weight = False\n args.max_person = 5\n if 'coarse' in args.dataset:\n args.train_file_pre = './relation_split/PIPA/PIPA_coarse_relation_train'\n args.valid_file_pre = './relation_split/PIPA/PIPA_coarse_relation_valid'\n args.test_file_pre = './relation_split/PIPA/PIPA_coarse_relation_test'\n else:\n args.train_file_pre = './relation_split/PIPA/PIPA_fine_relation_train'\n args.valid_file_pre = './relation_split/PIPA/PIPA_fine_relation_valid'\n args.test_file_pre = './relation_split/PIPA/PIPA_fine_relation_test'\n\nelif 'pisc' in args.dataset:\n args.images_root = '../../Dataset/PISC_image'\n if 'coarse' in args.dataset:\n args.train_file_pre = './relation_split/PISC/PISC_coarse_relation_train'\n args.valid_file_pre = './relation_split/PISC/PISC_coarse_relation_valid'\n args.test_file_pre = './relation_split/PISC/PISC_coarse_relation_test'\n args.loss_weight = False\n args.max_person = 8\n else:\n args.train_file_pre = './relation_split/PISC/PISC_fine_relation_train'\n args.valid_file_pre = './relation_split/PISC/PISC_fine_relation_valid'\n args.test_file_pre = './relation_split/PISC/PISC_fine_relation_test'\n args.loss_weight = True\n args.max_person = 8\nelse:\n raise ValueError('Unknown dataset {}'.format(args.dataset))\n\nif args.dataset == 'pipa_fine':\n args.num_classes = 16\nelif args.dataset == 'pipa_coarse':\n args.num_classes = 5\nelif args.dataset == 'pisc_fine':\n args.num_classes = 6\nelif args.dataset == 'pisc_coarse':\n args.num_classes = 3\nelse:\n raise ValueError('Unknown dataset {}'.format(args.dataset))\n\ndef setup_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n\nsetup_seed(args.manualSeed)\n\nlogger.info(args)\n\nclass edge_loss(nn.Module):\n def __init__(self):\n super(edge_loss, self).__init__()\n if args.loss_weight: # for PISC fine dataset\n self.criterion = nn.CrossEntropyLoss(weight=torch.from_numpy(class_weight).float())\n else:\n self.criterion = nn.CrossEntropyLoss()\n\n def forward(self, scores, labels, masks): # labels masks shape [batch_size, max_person, max_person]\n masks = masks.view(-1, 1).bool()\n masks = masks.detach() # [batch_size*max_person*max_person, 1]\n labels = labels.view((-1, 1))[masks] # [batch_size*max_person*max_person, 1]\n\n # scores shape [batch_size, max_person, max_person, num_classes]\n scores = scores.view(-1, args.num_classes) # [batch_size*max_person*max_person, num_classes]\n scores = scores[masks.repeat(1, args.num_classes)].view(-1, args.num_classes)\n losses = self.criterion(scores, labels)\n\n return losses\n\ndef cal_acc(logits, labels, masks):\n labels_np = labels.data.cpu().long().numpy()\n masks_np = masks.data.cpu().long().numpy()\n count = np.sum(masks_np)\n acc_list = []\n all_logits_np = []\n\n logits_np = F.softmax(logits, dim=-1).data.cpu().numpy() # [batch_size, max_person, max_person, num_classes]\n all_logits_np.append(logits_np)\n pred = np.argmax(logits_np, axis=3) # [batch_size, max_person, max_person]\n res = (pred == labels_np)\n res = res * masks_np\n right_num = np.sum(res)\n acc_list.append(right_num * 1.0 / count)\n\n conf = logits_np[np.where(masks_np == 1)]\n pred_label = pred[np.where(masks_np == 1)] # ndarray, size 1\n true_label = labels_np[np.where(masks_np == 1)]\n\n return acc_list, count, true_label, pred_label, conf\n\ncheckpoint_name = os.path.join(args.save_model, str(args.image_size) + args.dataset + '-checkpoint.txt')\n\ndef load_model(unload_model):\n if not os.path.exists(args.save_model):\n os.makedirs(args.save_model)\n logger.info(args.save_model,'is created!')\n if not os.path.exists(checkpoint_name):\n f = open(checkpoint_name, 'w')\n logger.info('checkpoint', 'is created!')\n\n start_index = 0\n with open(checkpoint_name, 'r') as fin:\n lines = fin.readlines()\n if len(lines) > 0:\n model_path, model_index = lines[0].split()\n logger.info('Resuming from {} with epoch {}'.format(model_path, model_index))\n if int(model_index) == 0:\n unload_model_dict = unload_model.state_dict()\n\n pretrained_dict = torch.load(os.path.join(args.save_model,model_path))\n\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if (k in unload_model_dict and pretrained_dict[k].shape == unload_model_dict[k].shape )} \n unload_model_dict.update(pretrained_dict)\n unload_model.load_state_dict(unload_model_dict) \n else:\n unload_model.load_state_dict(torch.load(os.path.join(args.save_model, model_path)))\n \n start_index = int(model_index) + 1\n return start_index\n \n\ndef save_model(tosave_model, epoch):\n model_path = 'model-' + str(args.image_size) + '-' + args.dataset + '.pth'\n save_path = os.path.join(args.save_model, model_path)\n torch.save(tosave_model.module.state_dict(), save_path)\n with open(checkpoint_name, 'w') as fin:\n fin.write(model_path + ' ' + str(epoch) + '\\n')\n\n#dataset prepare\n#---------------------------------\nlogger.info('Loading dataset...')\nroi_net = resnet_roi.resnet101_roi()\n\nif args.cuda:\n roi_net.cuda()\n roi_net = torch.nn.DataParallel(roi_net)\n\ntrainloader, class_weight, cls_num_list = generate_dataloader(roi_net, 'train', args)\nvalidloader, _, _ = generate_dataloader(roi_net, 'valid', args)\ntestloader, _, _ = generate_dataloader(roi_net, 'test', args)\n\ntorch.cuda.empty_cache()\n\n##Model prepare\nlogger.info(\"Loading model...\")\nSRModel = SERGCN.SERGCN(num_class=args.num_classes, hidden_dim=2048,\n time_step=args.time_steps, node_num=args.max_person\n )\n\ntotal_param = 0\nfor param in SRModel.parameters():\n total_param += np.prod(list(param.data.size()))\nlogger.info(\"Model total parameters in SRModel is {}\".format(total_param))\n\n\nif args.load_model:\n start_epoch = load_model(SRModel)\nelse:\n start_epoch = 1\n\nif args.cuda:\n SRModel.cuda()\n\nSRModel = torch.nn.DataParallel(SRModel)\n\ncriterion = edge_loss()\nssl_loss_func = nn.BCELoss()\n\nif args.cuda:\n criterion.cuda()\n\noptimizer = optim.Adam(SRModel.parameters(), lr=args.lr)\nscheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=5, verbose=False)\n\ndef train_epoch(epoch):\n\n batch_time = AverageMeter.AverageMeter()\n data_time = AverageMeter.AverageMeter()\n losses = AverageMeter.AverageMeter()\n acces = AverageMeter.AverageMeter()\n\n SRModel.train()\n\n end_time = time.time()\n for batch_idx, (feat, union_feat, relation_half_mask, relation_id, full_mask) in enumerate(trainloader):\n data_time.update(time.time() - end_time)\n\n if args.cuda:\n feat, union_feat, relation_half_mask, relation_id, full_mask = \\\n feat.cuda(), union_feat.cuda(), relation_half_mask.cuda(), relation_id.cuda(), full_mask.cuda()\n feat, union_feat, relation_half_mask, relation_id, full_mask = \\\n Variable(feat), Variable(union_feat), Variable(relation_half_mask), Variable(relation_id), Variable(full_mask)\n\n optimizer.zero_grad()\n\n logits = SRModel(feat, union_feat, full_mask)\n\n loss = criterion(logits, relation_id, relation_half_mask)\n\n loss.backward()\n optimizer.step()\n losses.update(loss.cpu().data.numpy())\n\n #calcalate accuracy\n acc_list, count, _, _, _ = cal_acc(logits, relation_id, relation_half_mask)\n acces.update(acc_list[-1], count)\n\n batch_time.update(time.time() - end_time)\n end_time = time.time()\n acc_str = ''\n acc_str += ('%.3f ' % acces.avg)\n\n if batch_idx % args.print_freq == 0:\n logger.info('Epoch: [%d][%d/%d] '\n 'Time %.3f (%.3f)\\t'\n 'Data %.3f (%.3f)\\t'\n 'Loss %.3f (%.3f)\\t'\n 'pair %.1f\\t'\n % (epoch, batch_idx, len(trainloader),\n batch_time.val,batch_time.avg, data_time.val,data_time.avg,\n losses.val,losses.avg, count * 1.0) + acc_str)\n\n\ndef valid_test_epoch(epoch, loader, eval_type):\n\n batch_time = AverageMeter.AverageMeter()\n\n losses = AverageMeter.AverageMeter()\n acces = AverageMeter.AverageMeter()\n\n SRModel.eval()\n\n end_time = time.time()\n true_labels, pred_labels, confs = [], [], []\n for batch_idx, (feat, union_feat, relation_half_mask, relation_id, full_mask) in enumerate(loader):\n\n if args.cuda:\n feat, union_feat, relation_half_mask, relation_id, full_mask = \\\n feat.cuda(), union_feat.cuda(), relation_half_mask.cuda(), relation_id.cuda(), full_mask.cuda()\n feat, union_feat, relation_half_mask, relation_id, full_mask = \\\n Variable(feat), Variable(union_feat), Variable(relation_half_mask), Variable(relation_id), Variable(full_mask)\n\n logits = SRModel(feat, union_feat, full_mask)\n\n loss = criterion(logits, relation_id, relation_half_mask)\n\n losses.update(loss.cpu().data.numpy())\n\n #calcalate accuracy\n acc_list, count, true_label, pred_label, conf = cal_acc(logits, relation_id, relation_half_mask)\n true_labels.append(true_label)\n pred_labels.append(pred_label)\n confs.append(conf)\n acces.update(acc_list[-1], count)\n\n batch_time.update(time.time() - end_time)\n end_time = time.time()\n acc_str = ''\n acc_str += ('%.3f ' % acces.avg)\n\n if batch_idx % args.print_freq == 0:\n logger.info('Epoch: [%d][%d/%d] '\n 'Time %.3f (%.3f)\\t'\n 'Loss %.3f (%.3f)\\t'\n % (epoch, batch_idx, len(loader),\n batch_time.val, batch_time.avg,\n losses.val,losses.avg) + acc_str)\n\n acc_str = ''\n acc_str += ('%.3f ' % acces.avg)\n logger.info(eval_type + \": Acc \" + acc_str + '\\n')\n\n true_labels = np.concatenate(true_labels).reshape(-1)\n pred_labels = np.concatenate(pred_labels).reshape(-1)\n confs = np.concatenate(confs)\n\n recalls = recall_score(true_labels, pred_labels, average=None)\n logger.info(eval_type + \": Recall {} \\n\".format(recalls))\n\n mAP = compute_map(confs, true_labels)\n logger.info(eval_type + \": mAP {} \\n\".format(mAP))\n\n return acces, recalls, mAP, pred_labels, true_labels\n\nlogger.info('Start training...')\nlogger.info(\"Random Seed is {}\".format(args.manualSeed))\n\nbest_test_result = 0\nbest_test_epoch = 0\nbest_test_recalls = []\nbest_test_other = 0\n\nbest_valid_result = 0\nbest_valid_epoch = 0\nbest_valid_recalls = []\nbest_valid_other = 0\n\nif args.load_model:\n args.max_epochs = start_epoch + 1\n\nfor epoch in range(start_epoch, args.max_epochs):\n logger.info('Epoch: %d start!' % epoch)\n\n epoch_start = time.time()\n\n if args.load_model:\n valid_acces, _, valid_map, _, _ = valid_test_epoch(epoch, validloader, 'valid')\n test_acces, test_recalls, test_map, test_pred, test_true = valid_test_epoch(epoch, testloader, 'test')\n else:\n train_epoch(epoch)\n valid_acces, _, valid_map, _, _ = valid_test_epoch(epoch, validloader, 'valid')\n test_acces, test_recalls, test_map, test_pred, test_true = valid_test_epoch(epoch, testloader, 'test')\n\n if 'pipa' in args.dataset:\n test_result = test_acces.avg\n test_other = test_map\n valid_result = valid_acces.avg\n valid_other = valid_map\n scheduler_step = valid_acces.avg\n\n elif 'pisc' in args.dataset:\n test_result = test_map\n test_other = test_acces.avg\n valid_result = valid_map\n valid_other = valid_acces.avg\n scheduler_step = valid_map\n\n if test_result > best_test_result:\n best_test_result = test_result\n best_test_epoch = epoch\n best_test_recalls = test_recalls\n best_test_other = test_other\n save_model(SRModel, epoch)\n if valid_result > best_valid_result:\n best_valid_result = valid_result\n best_valid_epoch = epoch\n best_valid_recalls = test_recalls\n best_valid_other = test_other\n best_valid = test_result\n\n logger.info('Epoch {} time {}'.format(epoch, time.time()-epoch_start))\n scheduler.step(scheduler_step)\n\nlogger.info(\"Test set best-test result is {} best other is {} epoch {} best recalls is {}\".format(\n best_test_result, best_test_other, best_test_epoch, best_test_recalls))\nlogger.info(\"Test set best-valid result is {} best other is {} epoch {} best recalls is {}\".format(\n best_valid, best_valid_other, best_valid_epoch, best_valid_recalls))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"429092260","text":"# Программа определяет по номеру курса\n# время аудиторию и преподавателя\n# на конкретнм примере, можно переделать под свои данные\n\n\ndef main():\n number_aud = {'CS101': 3004, 'CS102': 4501, 'CS103': 6755,\n 'CS104': 1244, 'CS105': 1411}\n\n name_teacher = {'CS101': 'Haynes', 'CS102': 'Alvarado', 'CS103': 'Rich',\n 'CS104': 'Berg', 'CS105': 'Li'}\n\n time = {'CS101': '8:00', 'CS102': '9:00', 'CS103': '10:00',\n 'CS104': '11:00', 'CS105': '13:00'}\n\n number_cours = input('Press number course: ')\n print('Your audience:', number_aud[number_cours])\n print('Your time lesson:', time[number_cours])\n print('Your teacher:', name_teacher[number_cours])\n\n\nmain()\n","sub_path":"info about lesson.py","file_name":"info about lesson.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"353592788","text":"__author__ = \"unknow\"\n__copyright__ = \"Sprace.org.br\"\n__version__ = \"1.0.0\"\n\nimport sys, os, fnmatch\n\nimport pandas as pd\nimport numpy as np\nimport sys\nimport random\n\nfrom trackml.dataset import load_event\nfrom trackml.dataset import load_dataset\nfrom trackml.randomize import shuffle_hits\nfrom trackml.score import score_event\n\nimport multiprocessing\nfrom multiprocessing import Process, Value, Lock\nimport glob, os\n\n#sys.path.append('/home/silvio/github/track-ml-1/utils')\n#from tracktop import *\n\n#obtain amount of columns\ndef amount_of_columns(cell):\n\n indt=0\n test=0\n indret=0\n\n for z in cell:\n indt=indt+1\n #print(\"z\")\n\n if ((z == 0) and (test == 0)) :\n test=1\n indret=indt\n return(indret) # ind is the amount of columns =! 0\n\ndef create_directory_for_results(temporary_directory):\n if (os.path.isdir(temporary_directory)):\n temp_dir = temporary_directory+\"//*\"\n files_in_temp_dir = glob.glob(temp_dir)\n\n for file in files_in_temp_dir:\n #print(\"remove \", file)\n os.remove(file)\n else:\n os.mkdir(temporary_directory)\n\ndef join_files(temporary_directory, output_file_real):\n\n files = []\n for r, d, f in os.walk(temporary_directory):\n for file in f:\n files.append(os.path.join(r, file))\n\n with open(output_file_real, 'w') as outfile:\n for fname in files:\n with open(fname) as infile:\n outfile.write(infile.read())\n\n#function to put particle informatio and all the hits of each track in a single line\ndef create_tracks(tot_columns , hits, cells, particles, truth, be,e,pid,temporary_directory):\n\n b = np.zeros((0))\n\n for index, row in particles.iloc[be:e,:].iterrows():\n\n truth_0 = truth[truth.particle_id == row['particle_id']]\n\n par=particles[['vx','vy','vz','px','py','pz']].loc[particles['particle_id'] == row['particle_id']]\n particleRow = [par['vx'].values[0],par['vy'].values[0],par['vz'].values[0],par['px'].values[0],par['py'].values[0],par['pz'].values[0]]\n\n psize=par.size\n\n b = np.concatenate((b, particleRow))\n\n #print(truth_0.size)\n #print(truth_0.shape)\n h = np.zeros((0))\n #jj=0\n for index, row in truth_0.iterrows():\n\n ch=cells[['ch0']].loc[cells['hit_id'] == row['hit_id']].mean()\n ch1=cells[['ch1']].loc[cells['hit_id'] == row['hit_id']].mean()\n vl=cells[['value']].loc[cells['hit_id'] == row['hit_id']].mean()\n\n hitRow = [row['tx'],row['ty'],row['tz'],ch[0], ch1[0], vl[0]]\n h= np.concatenate((h, hitRow))\n\n hsize=h.size\n b=np.concatenate((b, h))\n\n aux = np.zeros((0))\n remaing_columns_to_zero=tot_columns-1-h.size-6\n if (remaing_columns_to_zero > 0):\n aux = np.zeros(remaing_columns_to_zero)\n auxsize=aux.size\n b=np.concatenate((b, aux))\n #print(\"bb \", b)\n\n #print(\"psize \", psize, \"hsize \", hsize, \"auxsize \", auxsize, \"sum \", psize+hsize+auxsize)\n\n rw=(e-be)\n b = b.reshape(rw, (tot_columns-1))\n np.savetxt(temporary_directory+\"//arr\"+str(pid), b, fmt=\"%s\")\n\ndef createTracks(event_prefix , dir_event_prefix, diroutput):\n #global Am_of_cores\n #global Am_of_particles\n #global total_of_loops\n #global remaining_tracks\n print(dir_event_prefix + \" - \" + event_prefix)\n hits, cells, particles, truth = load_event(os.path.join(dir_event_prefix, event_prefix))\n\n #X = np.zeros((0))\n\n #121 columns -> 6 particles columns; 19 hits (6 columns); um result columns (fake or real) ==> 6x19 + 6 +1 =121\n #tot_columns=121\n tot_columns=175\n\n Am_of_particles = particles.shape[0]\n Am_of_cores = multiprocessing.cpu_count()-2\n total_of_loops = Am_of_particles // Am_of_cores\n remaining_tracks = (Am_of_particles-(total_of_loops*Am_of_cores))\n\n #output_file_all = \"/data/output/TracksRealFake\"+str(event_prefix)+\".csv\"\n #output_file_real = \"/data/output/\"+str(dir_event_prefix)+\"/TracksReal\"+str(event_prefix)+\".csv\"\n output_file_real = str(diroutput)+\"/TracksReal\"+str(event_prefix)+\".csv\"\n #output_file_real_aux = \"/data/output/TracksRealAUX\"+str(event_prefix)+\".csv\"\n #output_file_fake = \"/data/output/TracksFake\"+str(event_prefix)+\".csv\"\n temporary_directory = \"/tmp/res/\"+str(event_prefix)+\"/\"\n\n\n #output_file_all = \"/data/output/TracksRealFake\"+str(event_prefix)+\".csv\"\n #output_file_real = \"/data/output/TracksReal\"+str(event_prefix)+\".csv\"\n #output_file_real_aux = \"/data/output/TracksRealAUX\"+str(event_prefix)+\".csv\"\n #output_file_fake = \"/data/output/TracksFake\"+str(event_prefix)+\".csv\"\n\n #print(\"temporary_directory: \", temporary_directory)\n #print(\"Amount of Particles: \", Am_of_particles)\n #print(\"Amount of Processing cores: \", Am_of_cores)\n #print(\"total of loops: \", total_of_loops)\n #print(\"remaing tracks : \", remaining_tracks)\n print(\"output_file_real : \" , output_file_real)\n\n step=1\n pid=0\n\n create_directory_for_results(temporary_directory)\n\n jobs = []\n for i in range(Am_of_cores+1):\n #for i in range(1):\n\n b=i*total_of_loops\n\n if (i == Am_of_cores):\n e=b+remaining_tracks\n else:\n e=b+total_of_loops\n #e=10\n #b=1\n\n p = multiprocessing.Process(target=create_tracks, args=(tot_columns, hits, cells, particles, truth,b,e,pid,temporary_directory))\n #p = multiprocessing.Process(target=count_hits, args=(b,e,pid,temporary_directory))\n\n #print (\"multiprocessing: \", b,e)\n pid=pid+1\n jobs.append(p)\n p.start()\n\n for proc in jobs:\n proc.join()\n\n del jobs[:]\n\n join_files(temporary_directory, output_file_real)\n tracks = pd.read_csv(output_file_real,header = None, sep = \" \")\n tracks.to_csv(output_file_real)\n","sub_path":"core/utils/tracks_manipulation_lib.py","file_name":"tracks_manipulation_lib.py","file_ext":"py","file_size_in_byte":5862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"161505689","text":"from django.forms.fields import RegexField, CharField, FileField\nfrom django.forms.forms import Form\nfrom mangrove.utils.types import is_empty\n\nclass EntityTypeForm(Form):\n error_css_class = 'error'\n required_css_class = 'required'\n\n entity_type_regex = RegexField(regex=\"^[A-Za-z\\d\\s]+$\", max_length=20, error_message=\"Only letters and numbers are valid\", required=True, label=\"New Subject(eg clinic, waterpoint etc)\")\n\nclass ReporterRegistrationForm(Form):\n required_css_class = 'required'\n\n first_name = RegexField(regex=\"[^0-9.,\\s@#$%&*~]*\", max_length=20,\n error_message=\"Please enter a valid value containing only letters a-z or A-Z or symbols '`- \"\n ,\n label=\"* Name\")\n telephone_number = RegexField(required=True, regex=\"^[^a-zA-Z]*[0-9]+$\", max_length=15, label=\"* Mobile Number\",\n error_message=\"Please enter a valid phone number\")\n geo_code = CharField(max_length=30, required=False, label=\"GPS: Enter Lat Long\")\n location = CharField(max_length=100, required=False, label=\"Enter location\")\n\n def __init__(self, *args, **kwargs):\n super(ReporterRegistrationForm, self).__init__(*args, **kwargs)\n self.fields['first_name'].widget.attrs['watermark'] = \"Enter Data Sender's name\"\n self.fields['telephone_number'].widget.attrs['watermark'] = \"Enter Data Sender's number eg: \"\n self.fields['location'].widget.attrs['watermark'] = \"Enter region, district or commune\"\n self.fields['geo_code'].widget.attrs['watermark'] = \"Enter lat and long eg: 19.3 42.37\"\n\n def _is_int(self, s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n def clean_telephone_number(self):\n return (\"\").join([each for each in self.cleaned_data['telephone_number'] if self._is_int(each) ])\n\n def _geo_code_format_validations(self, lat_long, msg):\n if len(lat_long) != 2:\n self._errors['geo_code'] = self.error_class([msg])\n else:\n try:\n if not (-90 < float(lat_long[0]) < 90 and -180 < float(lat_long[1]) < 180):\n self._errors['geo_code'] = self.error_class([msg])\n except Exception:\n self._errors['geo_code'] = self.error_class([msg])\n\n def _geo_code_validations(self, b):\n msg = \"Incorrect GPS format. The GPS coordinates must be in the following format: xx.xxxx yy.yyyy. Example -18.8665 47.5315\"\n geo_code_string = b.strip()\n geo_code_string = (' ').join(geo_code_string.split())\n if not is_empty(geo_code_string):\n lat_long = geo_code_string.split(' ')\n self._geo_code_format_validations(lat_long, msg)\n self.cleaned_data['geo_code'] = geo_code_string\n\n def clean(self):\n a = self.cleaned_data.get(\"location\")\n b = self.cleaned_data.get(\"geo_code\")\n if not (bool(a) or bool(b)):\n msg = \"Please fill out at least one location field correctly.\"\n self._errors['location'] = self.error_class([msg])\n self._errors['geo_code'] = self.error_class([msg])\n if bool(b):\n self._geo_code_validations(b)\n return self.cleaned_data\n\n\nclass SubjectUploadForm(Form):\n error_css_class = 'error'\n required_css_class = 'required'\n file = FileField(label='Import Subjects')\n","sub_path":"src/datawinners/entity/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"259461977","text":"# The input() function pauses your program and waits for the user to enter some text.\nmessage = input(\"Tell me something, and I will repeat it back to you: \")\nprint(message)\nname = input(\"Please enter your name: \")\nprint(\"Hello, \" + name + \"!\")\n\n# When you use the input() function, Python interprets everything the user enters as a string.\nage = input(\"How old are you? \")\n# The int() function converts a string representation of a number to a numerical representation,\nprint(int(age)>18)\n\n# Letting the User Choose When to Quit\nprompt = \"\\nTell me something, and I will repeat it back to you:\"\nprompt += \"\\nEnter 'quit' to end the program. \"\nmessage = \"\"\nwhile message != 'quit':\n message = input(prompt)\n print(message)\n\n# To exit a while loop immediately without running any remaining code in the loop, regardless of the results of any conditional test, use the break statement.\n\n# Using continue in a Loop\ncurrent_number = 0\nwhile current_number < 10:\n current_number += 1\n if current_number % 2 == 0:\n continue\n print(current_number)\n\n\n# Moving Items from One List to Another\n# Consider a list of newly registered but unverified users of a website. After we verify these users, how can we move them to a separate list of confirmed users?\n# One way would be to use a while loop to pull users from the list of unconfirmed users as we verify them and then add them to a separate list of confirmed users.\n# Start with users that need to be verified,\n# and an empty list to hold confirmed users.\nunconfirmed_users = ['alice', 'brian', 'candace']\nconfirmed_users = []\nwhile unconfirmed_users:\n current_user=unconfirmed_users.pop()\n print(\"Verifying user: \" + current_user.title())\n confirmed_users.append(current_number)\n\n # Display all confirmed users.\nprint(\"\\nThe following users have been confirmed:\")\nfor confirmed_user in confirmed_users:\n print(confirmed_user.title())\n\n# Removing All Instances of Specific Values from a List\npets = ['dog', 'cat', 'dog', 'goldfish', 'cat', 'rabbit', 'cat']\nprint(pets)\n\n# Python removes the first instance of 'cat', returns to the while line, and then reenters the loop when it finds that 'cat' is still in the list.\n# It removes each instance of 'cat' until the value is no longer in the list, at which point Python exits the loop and prints the list again:\nwhile 'cat' in pets:\n pets.remove('cat')\n\nprint(pets)\n\n# Filling a Dictionary with User Input\nresponses = {}\n# Set a flag to indicate that polling is active.\npolling_active = True\nwhile polling_active:\n# Prompt for the person's name and response.\n name = input(\"\\nWhat is your name? \")\n response = input(\"Which mountain would you like to climb someday? \")\n\n # Store the response in the dictionary:\n responses[name] = response\n\n # Find out if anyone else is going to take the poll.\n repeat = input(\"Would you like to let another person respond? (yes/ no) \")\n if repeat == 'no':\n polling_active = False\n\n # Polling is complete. Show the results.\n print(\"\\n--- Poll Results ---\")\n for name, response in responses.items():\n print(name + \" would like to climb \" + response + \".\")\n\n\n # P153","sub_path":"src/basicKB/dev/basics/userInput.py","file_name":"userInput.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"361720064","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 9 14:36:24 2014\n\n@author: nairboon\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\nfrom pybrain.rl.environments.cartpole import CartPoleEnvironment, DiscreteBalanceTask, CartPoleRenderer\nfrom pybrain.rl.agents import LearningAgent\nfrom pybrain.rl.experiments import EpisodicExperiment\n\nfrom matplotlib import pyplot as plt\n\nfrom scipy import mean\n\nfrom pybrain.rl.learners.valuebased import NFQ, ActionValueNetwork, Q\n\nfrom pybrain.rl.environments import cartpole as cp\n\nfrom learner import BNL, ActionValueBayesianNetwork\n\n# switch this to True if you want to see the cart balancing the pole (slower)\n\nimport numpy\n\nimport multiprocessing\n\n#def run(task, parameters):\ndef run(arg):\n task = arg[0]\n parameters = arg[1]\n #print \"run with\", parameters\n \n seed = parameters[\"seed\"]\n \n\n process_id = hash(multiprocessing.current_process()._identity)\n numpy.random.seed(seed + process_id)\n\n\n \n \n render = False \n plot = False\n \n plt.ion()\n \n env = CartPoleEnvironment()\n if render:\n renderer = CartPoleRenderer()\n env.setRenderer(renderer)\n renderer.start()\n \n task_class = getattr(cp, task)\n task = task_class(env, parameters[\"MaxRunsPerEpisode\"])\n testtask = task_class(env, parameters[\"MaxRunsPerEpisodeTest\"])\n\n #print \"dim: \", task.indim, task.outdim\n \n # to inputs state and 4 actions\n module = ActionValueNetwork(task.outdim, task.indim)\n \n\n learner = NFQ()\n # % of random actions\n learner.explorer.epsilon = parameters[\"ExplorerEpsilon\"]\n \n \n agent = LearningAgent(module, learner)\n testagent = LearningAgent(module, None)\n experiment = EpisodicExperiment(task, agent)\n testexperiment = EpisodicExperiment(testtask, testagent)\n\n \n def plotPerformance(values, fig):\n plt.figure(fig.number)\n plt.clf()\n plt.plot(values, 'o-')\n plt.gcf().canvas.draw()\n # Without the next line, the pyplot plot won't actually show up.\n plt.pause(0.001)\n \n performance = []\n \n if plot:\n pf_fig = plt.figure()\n \n m = parameters[\"MaxTotalEpisodes\"]/parameters[\"EpisodesPerLearn\"]\n for episode in range(0,m):\n \t# one learning step after one episode of world-interaction\n experiment.doEpisodes(parameters[\"EpisodesPerLearn\"])\n agent.learn(1)\n \n #renderer.drawPlot()\n \n # test performance (these real-world experiences are not used for training)\n if plot:\n env.delay = True\n \n if (episode) % parameters[\"TestAfter\"] == 0:\n #print \"Evaluating at episode: \", episode\n \n #experiment.agent = testagent\n r = mean([sum(x) for x in testexperiment.doEpisodes(parameters[\"TestWith\"])])\n \n env.delay = False\n testagent.reset()\n #experiment.agent = agent\n \n performance.append(r)\n if plot:\n plotPerformance(performance, pf_fig)\n \n# print \"reward avg\", r\n# print \"explorer epsilon\", learner.explorer.epsilon\n# print \"num episodes\", agent.history.getNumSequences()\n# print \"update step\", len(performance)\n \n# print \"done\"\n return performance\n \n #print \"network\", json.dumps(module.bn.net.E, indent=2)","sub_path":"NFQ.py","file_name":"NFQ.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"289657733","text":"\"\"\"\r\nthe GradeEntry class module.\r\n\"\"\"\r\n\r\n\r\nclass GradeEntry:\r\n \"\"\"Records the grade entries for a student record system.\r\n\r\n id - the course identifier string\r\n weight - the course weight (full year, half year)\r\n \"\"\"\r\n\r\n id: str\r\n weight: float\r\n\r\n def __init__(self, id: str, weight: float) -> None:\r\n \"\"\" Initializes the class GradeEntry\r\n \"\"\"\r\n\r\n self.id = id\r\n self.weight = weight\r\n\r\n def __str__(self):\r\n \"\"\" Provides a string representation of the grade entry\r\n \"\"\"\r\n\r\n raise NotImplementedError('Subclass needed')\r\n\r\n def _invariant(self):\r\n \"\"\" Tests the condition\r\n \"\"\"\r\n\r\n raise NotImplementedError('Subclass needed')\r\n\r\n def __eq__(self, other: 'GradeEntry'):\r\n \"\"\" Determines equality based on course id and weight for LetterGradeEntry\r\n \"\"\"\r\n\r\n return (type(self) == type(other)\r\n and self.id == other.id\r\n and self.weight == other.weight)\r\n\r\n\r\nclass NumericGradeEntry(GradeEntry):\r\n \"\"\" Sub-class of grade-entry to handle numeric grades.\r\n \"\"\"\r\n\r\n grade: int\r\n points: float\r\n\r\n def __init__(self, id: str, grade: int, weight: float) -> None:\r\n \"\"\" Initializes the NumericGradeEntry subclass and\r\n extends GradeEntry by adding:\r\n grade - numeric representation of grades\r\n points - grade point average\r\n \"\"\"\r\n\r\n GradeEntry.__init__(self, id, weight)\r\n\r\n self.grade = grade\r\n\r\n self._invariant()\r\n if grade >= 85:\r\n self.points = 4.0\r\n elif grade >= 80:\r\n self.points = 3.7\r\n elif grade >= 77:\r\n self.points = 3.3\r\n elif grade >= 73:\r\n self.points = 3.0\r\n elif grade >= 70:\r\n self.points = 2.7\r\n elif grade >= 67:\r\n self.points = 2.3\r\n elif grade >= 63:\r\n self.points = 2.0\r\n elif grade >= 60:\r\n self.points = 1.7\r\n elif grade >= 57:\r\n self.points = 1.3\r\n elif grade >= 53:\r\n self.points = 1.0\r\n elif grade >= 50:\r\n self.points = 0.7\r\n else:\r\n self.points = 0.0\r\n\r\n def _invariant(self):\r\n # Tests the validity of the grade entered.\r\n assert 0 <= self.grade <= 100, 'Invalid number grade entry.'\r\n\r\n def __str__(self) -> str:\r\n \"\"\"Representation of NumericGradeEntry\r\n \"\"\"\r\n\r\n return 'Type: {}, Course: {}, Grade: {}, GPA: {}'.format(type(self).__name__,\r\n self.id,\r\n str(self.grade),\r\n str(self.points))\r\n\r\n\r\nclass LetterGradeEntry(GradeEntry):\r\n \"\"\" Sub-class of grade-entry to handle letter grades.\r\n \"\"\"\r\n\r\n grade: str\r\n points: float\r\n\r\n def __init__(self, id: str, grade: int, weight: float) -> None:\r\n \"\"\" Initializes the NumericGradeEntry subclass and\r\n extends GradeEntry by adding:\r\n grade - letter representation of grades\r\n points - grade point average\r\n \"\"\"\r\n\r\n GradeEntry.__init__(self, id, weight)\r\n\r\n self.grade = grade\r\n self._invariant()\r\n if grade == 'A+' or grade == 'A':\r\n self.points = 4.0\r\n elif grade == 'A-':\r\n self.points = 3.7\r\n elif grade == 'B+':\r\n self.points = 3.3\r\n elif grade == 'B':\r\n self.points = 3.0\r\n elif grade == 'B-':\r\n self.points = 2.7\r\n elif grade == 'C+':\r\n self.points = 2.3\r\n elif grade == 'C':\r\n self.points = 2.0\r\n elif grade == 'C-':\r\n self.points = 1.7\r\n elif grade == 'D+':\r\n self.points = 1.3\r\n elif grade == 'D':\r\n self.points = 1.0\r\n elif grade == 'D-':\r\n self.points = 0.7\r\n else:\r\n self.points = 0.0\r\n\r\n def _invariant(self):\r\n # tests validity of the letter grade entered\r\n l = ['A', 'A+', 'A-', 'B', 'B+', 'B-',\r\n 'C', 'C+', 'C-', 'D', 'D+', 'D-', 'F']\r\n\r\n assert self.grade in l, 'Invalid letter grade entry'\r\n\r\n def __str__(self) -> str:\r\n \"\"\"Representation of LetterGradeEntry\r\n \"\"\"\r\n\r\n return 'Type: {}, Course: {}, Grade: {}, GPA: {}'.format(type(self).__name__,\r\n self.id,\r\n self.grade,\r\n str(self.points))\r\n\r\n","sub_path":"labs/lab_2/grade.py","file_name":"grade.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"624844276","text":"url = \"http://blog.sina.com.cn/s/blog_6264d1a80100nwgi.html\"\nimport requests\nfrom bs4 import BeautifulSoup\nhtml = requests.get(url)\nhtml.encoding = \"utf8\"\n\nbs4Obj = BeautifulSoup(html.text)\narticalContent = bs4Obj.find(\"div\",{\"class\":\"sinabloga\",\"id\":\"sinabloga\"}).find(\"div\",{\"id\":\"column_2\",\"class\":\"SG_colW73\"})\\\n.find(\"div\",{\"id\":\"sina_keyword_ad_area2\",\"class\":\"articalContent \"}).findAll(\"p\")\nprint(articalContent)\n\narticle = \"\"\nfor p in articalContent:\n article += p.get_text()\n\nprint(article) ","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"412283784","text":"import sys\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\nimport tensorflow as tf\nimport timeit\n\nif __name__ == \"__main__\":\n if tf.test.is_gpu_available():\n print(\"GPU detected yeeey!\")\n print(\"tf version running \", tf.version.VERSION)\n print(\"Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))\n\n device_name = tf.test.gpu_device_name()\n if device_name != '/device:GPU:0':\n print(\n '\\n\\nThis error most likely means that this notebook is not '\n 'configured to use a GPU. Change this in Notebook Settings via the '\n 'command palette (cmd/ctrl-shift-P) or the Edit menu.\\n\\n')\n raise SystemError('GPU device not found')\n\n def cpu():\n with tf.device('/cpu:0'):\n random_image_cpu = tf.random.normal((200, 200, 200, 3))\n net_cpu = tf.keras.layers.Conv2D(64, 14)(random_image_cpu)\n return tf.math.reduce_sum(net_cpu)\n\n def gpu():\n with tf.device('/device:GPU:0'):\n random_image_gpu = tf.random.normal((200, 200, 200, 3))\n net_gpu = tf.keras.layers.Conv2D(64, 14)(random_image_gpu)\n return tf.math.reduce_sum(net_gpu)\n \n # We run each op once to warm up; see: https://stackoverflow.com/a/45067900\n cpu()\n gpu()\n\n # Run the op several times.\n print('Time (s) to convolve 32x7x7x3 filter over random 100x100x100x3 images '\n '(batch x height x width x channel). Sum of ten runs.')\n print('CPU (s):')\n cpu_time = timeit.timeit('cpu()', number=50, setup=\"from __main__ import cpu\")\n print(cpu_time)\n print('GPU (s):')\n gpu_time = timeit.timeit('gpu()', number=50, setup=\"from __main__ import gpu\")\n print(gpu_time)\n print('GPU speedup over CPU: {}x'.format(int(cpu_time/gpu_time)))\n\n else:\n print(\"No GPU detected!\")\n\n\n\n","sub_path":"talon_test_scripts/tf2.1_gpu_vs_cpu.py","file_name":"tf2.1_gpu_vs_cpu.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"9934229","text":"import cx_Freeze\nimport sys\nimport tkinter\nimport jinja2\nimport PIL\n\nbase = None\n\nif sys.platform == 'win32':\n base = \"Win32GUI\"\n\nexecutables = [cx_Freeze.Executable(\"Main.py\", base=base, icon=\"favicon.ico\", targetName=\"Forms\")]\n\ncx_Freeze.setup(\n name=\"Forms\",\n options={\n \"build_exe\" : {\n \"packages\" : [\"tkinter\", \"jinja2\", \"PIL\", \"Forms\"],\n \"include_files\" : [\n \"Dakhila.py\",\n \"Trial.py\",\n \"Naamsaari.py\",\n \"Form4.py\",\n \"FormUtils.py\",\n \"favicon.ico\",\n \"./Forms\"\n ] \n }\n },\n version=\"0.0.1\",\n description=\"A GUI utility to facilitate filling of government forms\",\n executables=executables\n)","sub_path":"cx_setup.py","file_name":"cx_setup.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"209333202","text":"\"\"\"\nEXERCICIO: Escreva uma funcao que recebe um objeto de coleção e retorna o valor do maior numero dentro dessa coleção, \ne outra função que retorne o menor valor dessa coleção\n\"\"\"\n\ndef maior_valor(object):\n object = sorted(object, reverse=True)\n return object[0]\n\ndef menor_valor(object):\n object = sorted(object)\n return object[0]\n\nlista = [2,3,1,4,5]\n\nprint(\"Printando o maior valor!\")\nprint(maior_valor(lista))\n\nprint(\"Printando o menor valor!\")\nprint(menor_valor(lista))","sub_path":"1 - Python/aula7-exercicio.py","file_name":"aula7-exercicio.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"445418304","text":"import numpy as np\nimport time\n\n\ndef frEpoch(numShards, numNodes, sizeShard, sparsity, chainLength, initBal):\n initChain = np.vstack([np.zeros((1, sizeShard)),\n initBal * np.ones((1, sizeShard))])\n block = blockGenCore(sizeShard, sparsity, initBal / chainLength / 2)\n chain = np.vstack([initChain, np.repeat(block, chainLength, axis=0)])\n tVer = np.zeros(numNodes)\n for n in range(numNodes):\n # measure the verification time of each node\n for k in range(numShards):\n tVer[n] += verifyCore(chain, block[0, :])\n return np.max(tVer), np.median(tVer), np.mean(tVer)\n\n\ndef ssEpoch(numShards, numNodes, sizeShard, sparsity, chainLength, initBal):\n tVerMax = []\n tVerMedian = []\n tVerMean = []\n numRep = int(numNodes / numShards)\n for k in range(numShards):\n tMax, tMedian, tMean = frEpoch(1, numRep, sizeShard, sparsity,\n chainLength, initBal)\n tVerMax.append(tMax)\n tVerMedian.append(tMedian)\n tVerMean.append(tMean)\n return np.max(tVerMax), np.median(tVerMedian), np.mean(tVerMean)\n\n\ndef psEpoch(numShards, numNodes, sizeShard, sparsity, chainLength, initBal):\n initChain = np.vstack([np.zeros((1, sizeShard)),\n initBal * np.ones((1, sizeShard))])\n block = blockGenCore(sizeShard, sparsity, initBal / chainLength / 2)\n chain = np.vstack([initChain, np.repeat(block, chainLength, axis=0)])\n senderBlocks = np.repeat([block[0, :]], numShards, axis=0)\n beta = np.array(range(numShards)) + 1\n alpha = np.array(range(numNodes)) + 1\n coeff = coeGen(numNodes, numShards, beta, alpha)\n tVer = []\n for n in range(numNodes):\n # measure the verification time of each node\n start = time.time()\n # run twice to mimic encoding + decoding\n codedSenderBlock = np.dot(coeff[n, :], senderBlocks)\n codedSenderBlock = np.dot(coeff[n, :], senderBlocks)\n tVer.append(time.time() - start)\n tVer[-1] += verifyCore(chain, codedSenderBlock)\n return np.max(tVer), np.median(tVer), np.mean(tVer)\n\n\ndef verifyCore(chain, block):\n # chain = rowwiseShuffle(chain)\n start = time.time()\n bal = np.sum(chain, axis=0)\n newBal = bal + block\n ignore = (newBal > 0).all()\n t = time.time() - start\n # print('time:', t, 'at size', chain.shape, newBal.shape)\n return t\n\n\ndef rowwiseShuffle(arr):\n # shuffle each row of a matrix independently\n x, y = arr.shape\n rows = np.indices((x, y))[0]\n cols = [np.random.permutation(y) for _ in range(x)]\n return arr[rows, cols]\n\n\ndef blockGenCore(sizeShard, sparsity, txCap):\n '''\n This function creates a block that contains sizeShard * sparisity Trans.\n Inputs:\n see above\n Output:\n block, see above\n '''\n numTrans = int(sizeShard * sparsity)\n userShuffle = np.random.permutation(sizeShard)\n idxSenders = userShuffle[:numTrans]\n userShuffle = np.random.permutation(sizeShard)\n idxRecivers = userShuffle[:numTrans]\n block = np.zeros((2, sizeShard))\n block[0, idxSenders] = -txCap\n block[1, idxRecivers] = txCap\n return block\n\n\ndef coeGen(numNodes, numShards, beta, alpha):\n C = np.ones((numNodes, numShards))\n\n for i in range(numNodes): # generate the coefficients for ith node\n for j in range(numShards):\n multiply = list(range(numShards))\n multiply.remove(j)\n for l in multiply:\n C[i][j] = C[i][j] * (alpha[i] - beta[l]) / (beta[j] - beta[l])\n return C\n\n\n# def oneRound(numShards, numNodes, sizeShard, sparsity, chainLength, initBal):\n# tVerMax, tVerMedian = frEpoch(numShards, numNodes, sizeShard, sparsity,\n# chainLength, initBal)\n# tVerMax, tVerMedian = ssEpoch(numShards, numNodes, sizeShard, sparsity,\n# chainLength, initBal)\n# tVerMax, tVerMedian = psEpoch(numShards, numNodes, sizeShard, sparsity,\n# chainLength, initBal)\n\n\n# numShards = 10\n# numNodes = 30\n# sizeShard = 100\n# sparsity = 0.5\n# chainLength = 100\n# initBal = 1000\n# oneRound(numShards, numNodes, sizeShard, sparsity, chainLength, initBal)\n","sub_path":"mock/mockTiming.py","file_name":"mockTiming.py","file_ext":"py","file_size_in_byte":4240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"332411913","text":"# Author: Jean-Baptiste Schiratti \n# Alexandre Gramfort \n# License: BSD 3 clause\n\n\nimport numpy as np\nfrom numpy.testing import assert_equal, assert_almost_equal, assert_raises\n\nfrom mne_features.univariate import (_slope_lstsq, compute_mean,\n compute_variance, compute_std,\n compute_ptp_amp, compute_skewness,\n compute_kurtosis, compute_hurst_exp,\n compute_app_entropy, compute_samp_entropy,\n compute_decorr_time,\n compute_pow_freq_bands,\n compute_hjorth_mobility_spect,\n compute_hjorth_complexity_spect,\n compute_hjorth_mobility,\n compute_hjorth_complexity,\n compute_higuchi_fd, compute_katz_fd,\n compute_zero_crossings,\n compute_line_length,\n compute_spect_entropy,\n compute_svd_entropy,\n compute_svd_fisher_info,\n compute_energy_freq_bands,\n compute_spect_edge_freq,\n compute_wavelet_coef_energy,\n compute_teager_kaiser_energy)\n\nrng = np.random.RandomState(42)\nsfreq = 256.\ndata = rng.standard_normal((10, 20, int(sfreq)))\nn_epochs, n_channels = data.shape[:2]\nn_times = data[0].shape[1]\n\n\ndef test_slope_lstsq():\n x = rng.standard_normal((100,))\n m = rng.uniform()\n y = m * x + 1\n s1 = _slope_lstsq(x, y)\n s2 = np.polyfit(x, y, 1)[0]\n assert_almost_equal(s1, m)\n assert_almost_equal(s1, s2)\n\n\ndef test_shape_output():\n for func in (compute_mean, compute_variance, compute_std,\n compute_kurtosis, compute_skewness, compute_ptp_amp,\n compute_hurst_exp, compute_hjorth_complexity,\n compute_hjorth_mobility, compute_higuchi_fd, compute_katz_fd,\n compute_zero_crossings, compute_line_length,\n compute_svd_entropy, compute_svd_fisher_info):\n for j in range(n_epochs):\n feat = func(data[j, :, :])\n assert_equal(feat.shape, (n_channels,))\n\n\ndef test_shape_output_decorr_time():\n for j in range(n_epochs):\n feat = compute_decorr_time(sfreq, data[j, :, :])\n assert_equal(feat.shape, (n_channels,))\n\n\ndef test_shape_output_pow_freq_bands():\n fb = np.array([0.1, 4, 8, 12, 30])\n n_freqs = fb.shape[0]\n for j in range(n_epochs):\n feat = compute_pow_freq_bands(sfreq, data[j, :, :], freq_bands=fb)\n assert_equal(feat.shape, (n_channels * (n_freqs - 1),))\n\n\ndef test_shape_output_hjorth_mobility_spect():\n for j in range(n_epochs):\n feat = compute_hjorth_mobility_spect(sfreq, data[j, :, :])\n assert_equal(feat.shape, (n_channels,))\n\n\ndef test_shape_output_hjorth_complexity_spect():\n for j in range(n_epochs):\n feat = compute_hjorth_complexity_spect(sfreq, data[j, :, :])\n assert_equal(feat.shape, (n_channels,))\n\n\ndef test_shape_output_spect_entropy():\n for j in range(n_epochs):\n feat = compute_spect_entropy(sfreq, data[j, :, :])\n assert_equal(feat.shape, (n_channels,))\n\n\ndef test_shape_output_energy_freq_bands():\n fb = np.array([0.1, 4, 8, 12, 30])\n n_freqs = fb.shape[0]\n for j in range(n_epochs):\n feat = compute_energy_freq_bands(sfreq, data[j, :, :], freq_bands=fb)\n assert_equal(feat.shape, (n_channels * (n_freqs - 1),))\n\n\ndef test_shape_output_spect_edge_freq():\n edge = [50., 80., 85., 95.]\n for j in range(n_epochs):\n feat = compute_spect_edge_freq(sfreq, data[j, :, :], edge=edge)\n assert_equal(feat.shape, (n_channels * 4,))\n\n\ndef test_shape_output_wavelet_coef_energy():\n feat = compute_wavelet_coef_energy(data[0, :, :], wavelet_name='haar')\n assert_equal(feat.shape, (n_channels * 6,))\n\n\ndef test_app_entropy():\n feat = compute_app_entropy(data[0, :, :], emb=5)\n assert_equal(feat.shape, (n_channels,))\n with assert_raises(ValueError):\n compute_app_entropy(data[0, :, :], emb=5, metric='sqeuclidean')\n\n\ndef test_samp_entropy():\n feat = compute_samp_entropy(data[0, :, :], emb=5)\n assert_equal(feat.shape, (n_channels,))\n with assert_raises(ValueError):\n compute_samp_entropy(data[0, :, :], emb=5, metric='sqeuclidean')\n\n\ndef test_shape_output_teager_kaiser_energy():\n feat = compute_teager_kaiser_energy(data[0, :, :])\n assert_equal(feat.shape, (n_channels * 6 * 2, ))\n\n\nif __name__ == '__main__':\n\n test_slope_lstsq()\n test_shape_output()\n test_shape_output_decorr_time()\n test_shape_output_pow_freq_bands()\n test_shape_output_spect_entropy()\n test_shape_output_energy_freq_bands()\n test_shape_output_spect_edge_freq()\n test_shape_output_wavelet_coef_energy()\n test_app_entropy()\n test_samp_entropy()\n test_shape_output_teager_kaiser_energy()\n","sub_path":"mne_features/tests/test_univariate.py","file_name":"test_univariate.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"467168234","text":"#this is a stock and flow model \r\nimport queue\r\nimport random\r\nfrom queue import Queue\r\nfrom sys import maxsize\r\nclass Node:\r\n def __init__(self, name, status = True) -> None:\r\n self.status = status\r\n self.name = name\r\n \r\n def stat(self):\r\n print(self.name,self.status)\r\n\r\n def report(self):\r\n if self.status == True:\r\n print(f\"node {self.name} is on\")\r\n else: \r\n print(f\"node {self.name} is off\")\r\n\r\n def off(self):\r\n self.status = False\r\n\r\n def on(self):\r\n self.status = True\r\n\r\nclass Source(Node):\r\n def __init__(self, name, status, rescources, max_output, efficiency) -> None:\r\n super().__init__(name, status=status) #pay attention to \"status = status\" I think it should be \"status = True\"\r\n self.rescources = rescources\r\n self.max_output = max_output\r\n self.efficiency = efficiency\r\n self.desig = \"source\"\r\n self.requests = []\r\n def report(self):\r\n if self.status == True:\r\n print(f\"node {self.name} is on and has {self.rescources} rescources\")\r\n else: \r\n print(f\"node {self.name} is off and has {self.rescources} rescources\")\r\n\r\n def supply(self, ammount):\r\n if self.status == True:\r\n if self.rescources*self.efficiency < ammount:\r\n output = self.rescources*self.efficiency\r\n self.rescources = 0\r\n print(f\"{self.name} is out of rescources!\")\r\n return output\r\n elif ammount > self.max_output:\r\n self.rescources -= self.max_output/self.efficiency\r\n return self.max_output #this line of code operates under the assumption that power stations cannot be overloaded\r\n else:\r\n self.rescources -= ammount/self.efficiency\r\n return ammount\r\n else: \r\n print(f\"{self.name} is not operational!\")\r\n\r\n def replenish(self, ammount):\r\n self.rescources += ammount\r\n \r\n def request(self,source,ammount):\r\n self.requests.append((source,ammount))\r\n\r\nclass Transmission(Node):\r\n def __init__(self, name, status, capacity) -> None:\r\n super().__init__(name, status=status)\r\n self.capacity = capacity\r\n self.desig = \"transmission\"\r\n self.requests = []\r\n def report(self):\r\n if self.status == True:\r\n print(f\"node {self.name} is on and has {self.capacity} capacity\")\r\n else: \r\n print(f\"node {self.name} is off and has {self.capacity} capacity\")\r\n\r\n def transmit(self,load):\r\n if self.status == True:\r\n if load > self.capacity:\r\n self.off()\r\n print(f\"{self.name} has overloaded!\")\r\n return 0\r\n else:\r\n return load\r\n else:\r\n print(f\"{self.name} is off\")\r\n \r\n def request(self,source,ammount):\r\n self.requests.append((source,ammount))\r\n\r\nclass Sink(Node):\r\n def __init__(self, name, status, demand, capacity) -> None:\r\n super().__init__(name, status=status)\r\n self.demand = demand\r\n self.capacity = capacity\r\n self.desig = \"sink\"\r\n\r\n def report(self):\r\n if self.status == True:\r\n print(f\"node {self.name} is on and has {self.demand} demand and {self.capacity} capacity\")\r\n else: \r\n print(f\"node {self.name} is off and has {self.demand} demand and {self.capacity} capacity\")\r\n\r\n def recieve(self, load):\r\n if self.status == True:\r\n if load > self.capacity:\r\n self.off()\r\n print(f\"{self.name} has overloaded!\")\r\n return (False, 0)\r\n elif load != self.demand:\r\n return (False,self.demand-load)\r\n else:\r\n return(True,0)\r\n \r\n else:\r\n print(f\"{self.name} is off\")\r\n\r\n \r\n\r\ndef constructor(matrix):\r\n try: \r\n node_index = {}\r\n tmp_matrix = matrix\r\n\r\n for i in matrix:\r\n node_index.update({matrix.index(i)+1:i[0]}) \r\n\r\n x = 0\r\n for row in tmp_matrix:\r\n n = 0\r\n for column in row:\r\n if column == 1:\r\n tmp_matrix[x][n] = node_index.get(n)\r\n n+=1\r\n x+=1\r\n\r\n for row in tmp_matrix:\r\n try:\r\n while True:\r\n row.remove(0) #https://www.techiedelight.com/remove-all-occurrences-item-list-python/\r\n except:\r\n pass\r\n\r\n graph = {}\r\n for i in tmp_matrix:\r\n graph.update({i[0]:i[1:]})\r\n return graph\r\n except:\r\n pass\r\n\r\nclass Graph:\r\n def __init__(self, name, adj_matrix = None, adj_list=None) -> None:\r\n self.name = name\r\n self.adj_list = constructor(adj_list)\r\n self.adj_matrix = adj_matrix\r\n self.desig = \"graph\" \r\n\r\n def report(self):\r\n print(f\"adjacency matrix is:{self.adj_matrix}, adjacency list is: {self.adj_list}\")\r\n \r\n def isconnected(self, node):\r\n return self.adj_list.get(node)\r\n\r\n# net = Graph(\"powergrid\",None,[[\"node1\",0,0,1,0,1,0],[\"node2\",1,0,1,1,1,1],[\"node3\", 0,0,0,0,1,0],[\"node4\",0,1,0,0,1,1],[\"node5\",0,0,1,1,0,1],[\"node6\",0,1,1,0,0,0]])\r\n\r\n\r\n# station = Source(\"powerstation\",True,100,20,.4)\r\n\r\n# powerline = Transmission(\"line1\", True, 30)\r\n\r\n# house = Sink(\"home\", True, 5, 30)\r\n\r\n\r\n# def identifier(classs):\r\n# if classs.desig == \"graph\":\r\n# print(\"this is a graph\")\r\n# elif classs.desig == \"source\":\r\n# print(\"this is a source\")\r\n# elif classs.desig == \"transmission\":\r\n# print(\"this is a transmission\")\r\n# else:\r\n# print(\"this is a sink\")\r\n\r\n# identifier(station)\r\n# identifier(powerline)\r\n# identifier(house)\r\n\r\nclass Model:\r\n\r\n def __init__(self, sources, trans, sinks, graphs) -> None:\r\n self.sources = sources\r\n self.trans = trans\r\n self.sinks = sinks\r\n self.graphs = graphs\r\n self.all_nodes = sources+trans+sinks\r\n\r\n def sim(self,timesteps,graph_index,daily_demand):# you might have trouble refrencing graphs within its own class\r\n t = 0\r\n for i in range(timesteps):\r\n #step 1: back propogates demand\r\n for i in self.all_nodes:\r\n i.stat()\r\n for sink in self.sinks: \r\n sink.report()\r\n parents = self.graphs[graph_index].isconnected(sink)\r\n print(parents)\r\n weight_tot = 0\r\n for v in parents:\r\n weight_tot += v.capacity #maybe use a dictionary to pair class names with classes\r\n print(weight_tot)\r\n print(type(v))\r\n for p in parents:\r\n print(type(p))\r\n p.request(sink,(p.capacity/weight_tot)*sink.demand)\r\n\r\n for tran in self.trans: \r\n parents = self.graphs[graph_index].isconnected(tran)\r\n print(parents)\r\n weight_tot = 0\r\n for v in parents:\r\n if v.desig == \"source\":\r\n weight_tot += v.max_output #maybe use a dictionary to pair class names with classes\r\n print(weight_tot)\r\n print(type(v))\r\n print(tran.requests)\r\n load = 0\r\n for r in tran.requests:\r\n load += r[1]\r\n print(load)\r\n\r\n for p in parents:\r\n if p.desig == \"source\":\r\n print(type(p))\r\n p.request(sink,(p.max_output/weight_tot)*load)\r\n #step 2: forward propogates supply\r\n for s in self.sources:\r\n \r\n if s.status == True:\r\n\r\n tot_load = 0\r\n for r in s.requests:\r\n tot_load += s.supply(r[1])\r\n if tot_load < s.max_output:\r\n for request in s.requests:\r\n if request[0].desig == \"transmission\":\r\n x = request[0].transmit(request[0].requests[1])\r\n if request[0].status == True:\r\n request[0].demand -= x #i took away .request[0] to make this work!!! go back and fix\r\n else:\r\n print(f\"{request[0].name} has overloaded!\")\r\n else:\r\n print(\"wrong type:\", type(request[0]))\r\n else:\r\n print(f\"{s.name} has overloaded!\")\r\n else:\r\n print(f\"{s.name} is off\")\r\n\r\n t+=1\r\n\r\n for so in self.sinks:\r\n so.demand += daily_demand\r\n\r\nsource1 = Source(\"source1\", True, 100000, 100, 1)\r\nsource2 = Source(\"source2\", True, 100000, 100, 1)\r\ntrans1 = Transmission(\"trans1\", True, 50)\r\ntrans2 = Transmission(\"trans2\", True, 50)\r\nsink1 = Sink(\"sink1\",True,10, 200)\r\nsink2 = Sink(\"sink2\", True, 10, 200)\r\nsink3 = Sink(\"sink3\", True, 10, 200)\r\npower_grid = Graph(\"power_grid\", None, [[source1,0,1,1,0,0,0],[source2,0,0,0,1,0,0,0],[trans1,1,0,0,0,1,1,0],[trans2,1,1,0,0,0,0,1],[sink1,0,0,1,0,0,0,0],[sink2,0,0,1,0,0,0,0],[sink3,0,0,0,1,0,0,0]])\r\n\r\n\r\nsource_list = [source1,source2]\r\ntrans_list = [trans1,trans2]\r\nsink_list = [sink1,sink2,sink3]\r\ngrid_graph = [power_grid]\r\nhello_world = Model(source_list, trans_list, sink_list,grid_graph)\r\n\r\nhello_world.sim(10,0,10)","sub_path":"stock_model1.py","file_name":"stock_model1.py","file_ext":"py","file_size_in_byte":9649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"24922452","text":"#!/usr/bin/env python3\n\nfrom typing import *\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def levelOrder(self, root: Optional[TreeNode]) -> List[List[int]]:\n levels_dict = {}\n\n def traverse(tn: TreeNode, level: int):\n if tn == None:\n return\n if not level in levels_dict:\n levels_dict[level] = []\n levels_dict[level].append(tn.val)\n traverse(tn.left, level+1)\n traverse(tn.right, level+1)\n\n traverse(root, 0)\n result = []\n for i in sorted(levels_dict):\n result.append(levels_dict[i])\n\n return result\n\n\na = TreeNode(10)\na.left = TreeNode(5)\na.left.left = TreeNode(4)\na.left.right = TreeNode(6)\na.right = TreeNode(5)\na.right.left = TreeNode(4)\na.right.right = TreeNode(6)\n\n\ns= Solution()\nprint(s.levelOrder(a))\n","sub_path":"leetcode/0102.py","file_name":"0102.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"216535555","text":"import json\n\nfile_path = '../config/help.json'\n\nwith open(file_path, 'r') as file:\n data = json.load(file)\n\nfor key, value in data.items():\n output = f\"## {key}\\n\"\n for index, command in enumerate(value, 1):\n output += f\"{index}. [{command['name']}]({key}_commands.md?id={command['name']})\\n\"\n\n output += \"\\n\"\n for command in value:\n output += f\"## {command['name']}\\n\"\n output += \"### Description {docsify-ignore}\\n\"\n output += f\"{command['description']}\\n\"\n output += \"### Syntax {docsify-ignore}\\n\\n\"\n output += f\"> {command['syntax']}\\n\\n\"\n output += \"### Example {docsify-ignore}\\n\\n\"\n output += f\"> {command['example']}\\n\\n\"\n\n print(output)\n","sub_path":"scripts/command_information_generator.py","file_name":"command_information_generator.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"118205306","text":"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom matplotlib import font_manager, rc\n\n# fontname = font_manager.FontProperties(fname='malgun.ttf').get_name()\n# rc('font', family=fontname)\n#\nd1 = pd.read_csv('data\\\\concat_1.csv')\n# print(d1)\nd2 = pd.read_csv('data\\\\concat_2.csv')\n# print(d2)[report['State'] == 'New Hampshire']\nd3 = pd.read_csv('data\\\\concat_3.csv')\n# print(d3)\n#\n#\n# print(data)\n# data = pd.concat([d1,d2,d3])\n# print(data.loc[1])\n# print(data.iloc[1])\n# data = pd.concat([d1,d2,d3], ignore_index=True)\n# print(data)\n\n# data = pd.concat([d1,d2,d3], axis=1)\n# print(data)\n# ------------------------\nperson = pd.read_csv('data\\\\survey_person.csv')\nsite = pd.read_csv('data\\\\survey_site.csv')\nsurvey = pd.read_csv('data\\\\survey_survey.csv')\nvisited = pd.read_csv('data\\\\survey_visited.csv')\nprint(person)\nps = person.merge(survey, left_on='ident', right_on='person')\nprint(survey)\nprint(ps)\nprint(site)\nprint(visited)\nsv = site.merge(visited, left_on='name', right_on='site')\nprint(sv)\n","sub_path":"ml/pan3.py","file_name":"pan3.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"465576728","text":"#!/usr/bin/env python\r\n# coding=utf-8\r\n\r\nfrom PyQt4.QtCore import QString\r\nfrom PyQt4.QtGui import QMainWindow, QStandardItemModel, QStandardItem, \\\r\n QDataWidgetMapper, QTableView\r\nfrom PyQt4 import uic\r\n\r\ntry:\r\n _fromUtf8 = QString.fromUtf8\r\nexcept AttributeError:\r\n def _fromUtf8(s):\r\n return s\r\n\r\nclass MainWindow (QMainWindow):\r\n def __init__(self,parent = None):\r\n super(MainWindow, self).__init__(parent)\r\n uic.loadUi(\"mainwindow.ui\", self)\r\n\r\n model = QStandardItemModel(3, 2, self)\r\n model.setItem(0, 0, QStandardItem(_fromUtf8(\"xiaoming\")))\r\n model.setItem(0, 1, QStandardItem(_fromUtf8(\"0\")))\r\n model.setItem(1, 0, QStandardItem(_fromUtf8(\"xiaogang\")))\r\n model.setItem(1, 1, QStandardItem(_fromUtf8(\"5\")))\r\n model.setItem(2, 0, QStandardItem(_fromUtf8(\"xiaohong\")))\r\n model.setItem(2, 1, QStandardItem(_fromUtf8(\"0\")))\r\n model.setItem(3, 0, QStandardItem(_fromUtf8(\"赵六\")))\r\n model.setItem(3, 1, QStandardItem(_fromUtf8(\"8\")))\r\n\r\n self.mapper = QDataWidgetMapper(self)\r\n # 设置模型\r\n self.mapper.setModel(model)\r\n # 设置窗口部件和模型中的列的映射\r\n self.mapper.addMapping(self.lineEdit, 0)\r\n self.mapper.addMapping(self.lineEdit_2, 1)\r\n # 显示模型中的第一行\r\n self.mapper.toFirst()\r\n\r\n #----------------------------------------------------------\r\n tableview = QTableView()\r\n tableview.setModel(model)\r\n tableview.show()\r\n\r\n # 上一条按钮\r\n def on_pushButton_clicked(self):\r\n self.mapper.toPrevious()\r\n\r\n # 下一条按钮\r\n def on_pushButton_2_clicked(self):\r\n self.mapper.toNext()\r\n\r\n","sub_path":"src/16/16-14/mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"78435795","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom telegram.ext import Updater\nimport export_to_telegraph\nimport link_extractor\nimport yaml\nimport traceback\nimport time\n\nsource = [\n\t'https://matters.news',\n\t'https://www.thinkingtaiwan.com',\n\t'https://whogovernstw.org', \n\t'https://squatting2047.com',\n]\n\nwith open('existing') as f:\n\texisting = set([x.strip() for x in f.readlines()])\nwith open('credential') as f:\n\tcredential = yaml.load(f, Loader=yaml.FullLoader)\n\nexport_to_telegraph.token = credential['telegraph_token']\n\ntele = Updater(credential['bot_token'], use_context=True)\ndebug_group = tele.bot.get_chat(-1001198682178)\ntaiwan_channel = tele.bot.get_chat(-1001250188871)\n\ndef add(link):\n\texisting.add(link)\n\twith open('existing', 'a') as f:\n\t\tf.write('\\n' + link)\n\ndef export():\n\tfor s in source:\n\t\tfor link, _ in link_extractor.getLinks(s):\n\t\t\tif link in existing:\n\t\t\t\tcontinue\n\t\t\tr = export_to_telegraph.export(link, force=True, \n\t\t\t\ttoSimplified=True, throw_exception=True)\n\t\t\ttaiwan_channel.send_message(r)\n\t\t\tadd(link)\n\t\t\tbreak # only add one for each source\n\ndef adhoc():\n\tfemale_channel = tele.bot.get_chat(-1001162153695)\n\twith open('所有链接.txt') as f:\n\t\tfor link in f.readlines():\n\t\t\ttry:\n\t\t\t\tr = export_to_telegraph.export(link.strip(), force=True, \n\t\t\t\t\tthrow_exception=True)\n\t\t\t\tfemale_channel.send_message(r)\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\t\t\t\ttraceback.print_tb(e)\n\nif __name__=='__main__':\n\t# adhoc()\n\texport()","sub_path":"aggregate.py","file_name":"aggregate.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"548417613","text":"# Import random, will be used for random.randint()\r\nimport random\r\n\r\nfirstName = input(\"Enter your first name: \")\r\n\r\nlastName = input(\"Enter your last name: \")\r\n# The random part of the email generator will be the numbers. It could be either one, two, three, or four digits long. \r\nnum = random.randint(1, 1199)\r\n\r\ngmail = \"@gmail.com\"\r\n#combine variables starting with lastName + \".\"(period) + firstName + num + gmail\r\nemail = lastName + \".\" + firstName + num + gmail\r\n# use .lower() function to make entire email lower case no matter the input.\r\nprint(email.lower())\r\n","sub_path":"ex36.py","file_name":"ex36.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"46371993","text":"from ..base import *\nfrom .props.base import ObjectTypes\n\n\nclass CreationManager(object):\n\n def __init__(self, menubar):\n\n creation_data = {}\n\n def get_handler(object_type):\n\n def handler():\n\n if not GlobalData[\"active_creation_type\"]:\n GlobalData[\"active_creation_type\"] = object_type\n Mgr.enter_state(\"creation_mode\")\n elif GlobalData[\"active_creation_type\"] != object_type:\n Mgr.update_app(\"interactive_creation\", \"changed\")\n GlobalData[\"active_creation_type\"] = object_type\n Mgr.enter_state(\"creation_mode\")\n Mgr.update_app(\"selected_obj_types\", (object_type,))\n Mgr.update_app(\"interactive_creation\", \"started\")\n Mgr.update_app(\"status\", \"create\", object_type, \"idle\")\n\n return handler\n\n for object_type, object_type_name in ObjectTypes.get_types().iteritems():\n handler = get_handler(object_type)\n creation_data[object_type] = {\"name\": object_type_name, \"handler\": handler}\n\n menubar.add_menu(\"create\", \"Create\")\n\n data = creation_data[\"plane\"]\n menubar.add_menu_item(\"create\", \"plane\", \"Create %s\" % data[\"name\"], data[\"handler\"])\n\n obj_types = (\"box\", \"sphere\", \"cylinder\", \"torus\")\n accelerators = (\"B\", \"S\", \"C\", \"T\")\n mod_code = wx.MOD_SHIFT | wx.MOD_CONTROL\n hotkeys = [(ord(accel), mod_code) for accel in accelerators]\n\n for obj_type, accel, hotkey in zip(obj_types, accelerators, hotkeys):\n data = creation_data[obj_type]\n menubar.add_menu_item(\"create\", obj_type, \"Create %s\\tSHIFT+CTRL+%s\" % (data[\"name\"], accel),\n data[\"handler\"], hotkey)\n\n data = creation_data[\"cone\"]\n menubar.add_menu_item(\"create\", \"cone\", \"Create %s\" % data[\"name\"], data[\"handler\"])\n\n menubar.add_menu_item_separator(\"create\")\n\n obj_types = (\"tex_projector\", \"dummy\")\n accelerators = (\"P\", \"D\")\n hotkeys = [(ord(accel), mod_code) for accel in accelerators]\n\n for obj_type, accel, hotkey in zip(obj_types, accelerators, hotkeys):\n data = creation_data[obj_type]\n menubar.add_menu_item(\"create\", obj_type, \"Create %s\\tSHIFT+CTRL+%s\" % (data[\"name\"], accel),\n data[\"handler\"], hotkey)\n\n data = creation_data[\"point_helper\"]\n menubar.add_menu_item(\"create\", \"point_helper\", \"Create %s\" % data[\"name\"], data[\"handler\"])\n\n def setup(self):\n\n def enter_creation_mode(prev_state_id, is_active):\n\n Mgr.do(\"set_viewport_border_color\", (220, 220, 100))\n Mgr.do(\"enable_components\")\n\n if prev_state_id in (\"selection_mode\", \"checking_creation_start\", \"processing\"):\n Mgr.do(\"display_next_obj_color\")\n\n add_state = Mgr.add_state\n add_state(\"creation_mode\", -10, enter_creation_mode)\n add_state(\"checking_creation_start\", -11, lambda prev_state_id, is_active:\n Mgr.do(\"disable_components\", show=False))\n","sub_path":"src/gui/components/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"603976562","text":"\"\"\"\nProject Euler #35\nTakes an insane amount of time!\n\"\"\"\nupper_bound = 1000000\n\ndef primes_sieve(limit):\n limitn = limit+1\n primes = dict()\n for i in range(2, limitn): primes[i] = True\n\n for i in primes:\n factors = range(i,limitn, i)\n for f in factors[1:]:\n primes[f] = False\n return [i for i in primes if primes[i]==True]\n\nprimes = primes_sieve(upper_bound)\n\ndef isPrime(n):\n if n in primes : return True\n else : return False\n\nfrom collections import deque\ndef shifter(num):\n strnum = deque(str(num))\n for i in range(len(strnum)):\n yield int(''.join(strnum))\n strnum.rotate()\n\nprint(sum(1 for i in range(upper_bound) if all(isPrime(p) for p in shifter(i))))\n","sub_path":"Competitive Programming/project_euler/35.py","file_name":"35.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"431350913","text":"# Serenity/Logician\n\nimport sqlite3\nimport random\nimport os\nimport os.path\nimport discord\nfrom discord.ext import commands\n\nownerIds = [\n 185877810760515585\n]\ndbName = \"logic.db\"\nbotToken = \"MjE1MjIzNTQxNjMxNjgwNTEy.Cqwnrg.70AtEEUSDdLIJPSbiBMyu3-jgUQ\"\nsubToken = \"MjIwNTA5MTUyNjA1MjQxMzQ1.CqzjCg.8nbVlfQxTxcQhhFBG0hGZGbBqZ4\"\nstartupExtensions = [\"azgame\",\"ttt\",\"response\",\"status\",\"admin\"]\n\ntoken = botToken\nif token == botToken:\n description = \"\"\"A simple bot for Discord.\"\"\"\n prefix = \"$\"\nelse:\n description = \"\"\"Testing version for Logician.\"\"\"\n prefix = \"*\"\n\nbot = commands.Bot(command_prefix=prefix,description=description)\n\n@bot.event\nasync def on_ready():\n print(\"Current login: \")\n print(bot.user.name)\n print(bot.user.id)\n print(\"Prefix: \" + prefix)\n print(\"Owners: \" + str(ownerIds))\n bot.owners = ownerIds\n\n bot.dbName = dbName\n print(\"Connecting to database...\")\n bot.db = sqlite3.connect(bot.dbName)\n print(\"Connected!\")\n\n for extension in startupExtensions:\n try:\n bot.load_extension(extension)\n except Exception as e:\n print(\"extension {} not loaded: \".format(extension))\n print(\"{}: {}\".format(type(e).__name__,e))\n\n print(\"Ready to begin!\")\n\n@bot.command()\nasync def echo(msg : str):\n \"\"\"Echoes the given string.\"\"\"\n print(\"Command received: echo {}\".format(msg))\n await bot.say(msg)\n\n@bot.command(pass_context = True)\nasync def uid(ctx):\n await bot.say(str(ctx.message.author.id))\n\nbot.run(token)\n","sub_path":"discord.py/logician.py","file_name":"logician.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"557942905","text":"import json\n\nfrom ..openapi.tator_openapi.models import CreateResponse\nfrom ..util.get_api import get_api\nfrom ..util.get_parser import get_parser\n\ndef parse_args():\n parser = get_parser()\n parser.add_argument('--project', type=int, help=\"Unique integer identifying a project.\")\n parser.add_argument('--media_type', type=int, help=\"Unique integer identifying a media type.\")\n parser.add_argument('--section', type=str, help=\"Name of section to upload to.\")\n parser.add_argument('--name', type=str, help=\"Name of file.\")\n parser.add_argument('--md5', type=str, help=\"md5 sum of file.\")\n parser.add_argument('--output', type=str, help=\"Where to dump media ID.\")\n return parser.parse_args()\n\ndef create_media(host, token, project, media_type, section, name, md5):\n \"\"\" Creates a media object and returns the ID.\n\n :param host: Host URL.\n :param token: API token.\n :param project: Unique integer identifying a project.\n :param media_type: Unique integer identifying a media type.\n :param section: Section name.\n :param name: File name.\n :param md5: md5 sum of file.\n \"\"\"\n api = get_api(host, token)\n response = api.create_media(project, media_spec={\n 'type': media_type,\n 'section': section,\n 'name': name,\n 'md5': md5,\n })\n assert isinstance(response, CreateResponse)\n media_id = response.id\n\n return media_id\n\nif __name__ == '__main__':\n args = parse_args()\n media_id = create_media(args.host, args.token, args.project, args.media_type,\n args.section, args.name, args.md5)\n with open(args.output, 'w') as f:\n f.write(str(media_id))\n","sub_path":"tator/transcode/create_media.py","file_name":"create_media.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"547547287","text":"\"\"\"\n获取表格中的最高和最低温度并画图\n\"\"\"\nimport csv\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime\nimport os\n\n# 读取CSV文件数据\nfilename = r\"sitka_weather_2014.csv\"\nwith open(filename) as f: # 打开这个文件,并将结果文件对象存储在f中\n reader = csv.reader(f) # 创建一个迭代器reader\n next(reader) # 返回文件中的下一行,第一行不是数据,所以直接跳过去\n dates, highs, lows = [], [], [] # 声明存储日期,最值的列表\n for row in reader:\n current_date = datetime.strptime(row[0], '%Y-%m-%d') # 将日期数据转换为datetime对象\n dates.append(current_date) # 存储日期\n high = int(row[1]) # 将字符串转换为数字\n highs.append(high) # 存储温度最大值\n low = int(row[3])\n lows.append(low) # 存储温度最小值\n\n# 根据数据绘制图形\nfig = plt.figure(dpi=128, figsize=(10, 6))\nplt.plot(dates, highs, c='red', alpha=0.5) # 实参alpha指定颜色的透明度,0表示完全透明,1(默认值)完全不透明\nplt.plot(dates, lows, c='blue', alpha=0.5)\nplt.fill_between(dates, highs, lows, facecolor='blue', alpha=0.1) # 给图表区域填充颜色\nplt.title('Daily high and low temperature-2004', fontsize=24)\nplt.xlabel('', fontsize=16)\nplt.ylabel('Temperature(F)', fontsize=16)\nplt.tick_params(axis='both', which='major', labelsize=16)\nfig.autofmt_xdate() # 绘制斜的日期标签\njpg_dir = os.path.dirname(__file__) + '/csv1.jpg'\nplt.savefig(jpg_dir)\nplt.show()\n# ---------------------\n# 作者:Asia - Lee\n# 来源:CSDN\n# 原文:https: // blog.csdn.net / asialee_bird / article / details / 79589307\n# 版权声明:本文为博主原创文章,转载请附上博文链接!\n\n# 如何写csv文件,参考pandas的例子\n# filename = r\"numpy_pandas_matplotlib.csv\"\n# with open(filename, 'wb') as wf: # 打开这个文件,并将结果文件对象存储在f中\n# writer = csv.writer(wf) # 创建一个迭代器\n# writer.writerow(['data', 'age', 'sex', 'addr', 'num'])\n# # 也可以一边读一遍写到其他文件\n# writer.writerow(reader.next())\n","sub_path":"numpy_pandas_matplotlib/04_pandas_csv_plot.py","file_name":"04_pandas_csv_plot.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"170363703","text":"__author__ = \"Omer Yampel @yampelo\"\n\nimport io\nimport os\nimport sys\nfrom shutil import rmtree\nfrom setuptools import find_packages, setup, Command\n\n\n# Package meta-data.\nNAME = \"pybeagle\"\nDESCRIPTION = \"Beagle is an incident response and digital forensics tool which transforms data sources and logs into graphs\"\nURL = \"https://github.com/yampelo/beagle\"\nAUTHOR = \"yampelo\"\nREQUIRES_PYTHON = \">=3.6.0,<3.7\"\nVERSION = \"1.0.1\"\nEMAIL = None\n\n\nEXTRAS = None\nREQUIRED = [\n \"acora==2.0\",\n \"aff4-snappy==0.5.1\",\n \"ansimarkup==1.4.0\",\n \"appnope==0.1.0 ; sys_platform == 'darwin'\",\n \"arrow==0.10.0\",\n \"artifacts==20170909\",\n \"atomicwrites==1.3.0\",\n \"attrs==19.1.0\",\n \"backcall==0.1.0\",\n \"better-exceptions-fork==0.2.1.post6\",\n \"certifi==2019.3.9\",\n \"chardet==3.0.4\",\n \"click==7.0\",\n \"colorama==0.4.1\",\n \"coverage==5.0a4\",\n \"decorator==4.4.0\",\n \"expiringdict==1.1.4\",\n \"filelock==2.0.6\",\n \"flask-sqlalchemy==2.3.2\",\n \"flask==1.0.2\",\n \"future==0.16.0\",\n \"graphistry[networkx]==1.0a11\",\n \"grpcio==1.19.0\",\n \"gunicorn==19.9.0\",\n \"hexdump==3.3\",\n \"html5lib==1.0.1\",\n \"httplib2==0.9.2\",\n \"idna==2.5\",\n \"intervaltree==2.1.0\",\n \"ipaddr==2.2.0\",\n \"ipython-genutils==0.2.0\",\n \"ipython==6.5.0\",\n \"isodate==0.6.0\",\n \"itsdangerous==1.1.0\",\n \"jedi==0.13.3\",\n \"jinja2==2.10\",\n \"loguru==0.2.5\",\n \"lxml==4.3.2\",\n \"markupsafe==1.1.1\",\n \"more-itertools==6.0.0 ; python_version > '2.7'\",\n \"neo4j==1.7.2\",\n \"neobolt==1.7.4\",\n \"neotime==1.7.4\",\n \"networkx==2.2\",\n \"numpy==1.16.2\",\n \"oauth2client==3.0.0\",\n \"pandas==0.24.2\",\n \"parsedatetime==2.4\",\n \"parso==0.3.4\",\n \"pathlib==1.0.1\",\n \"pexpect==4.6.0 ; sys_platform != 'win32'\",\n \"pickleshare==0.7.5\",\n \"pluggy==0.9.0\",\n \"portpicker==1.1.1\",\n \"prompt-toolkit==1.0.15\",\n \"protobuf==3.6.1\",\n \"psutil==5.6.1\",\n \"ptyprocess==0.6.0\",\n \"py==1.8.0\",\n \"pyaff4==0.26.post6\",\n \"pyarrow==0.12.1\",\n \"pyasn1-modules==0.2.4\",\n \"pyasn1==0.4.5\",\n \"pyblake2==0.9.3\",\n \"pycryptodome==3.4.7\",\n \"pydgraph==1.0.3\",\n \"pyelftools==0.24\",\n \"pygments==2.3.1\",\n \"pyparsing==2.1.5\",\n \"pytest-cov==2.6.1\",\n \"pytest==4.3.1\",\n \"python-dateutil==2.6.1\",\n \"python-evtx==0.6.1\",\n \"pytsk3==20170802\",\n \"pytz==2017.3\",\n \"pyyaml==3.12\",\n \"rdflib[sparql]==4.2.2\",\n \"readline==6.2.4.1\",\n \"rekall-agent==1.7.1\",\n \"rekall-capstone==3.0.5.post2\",\n \"rekall-core==1.7.2rc1\",\n \"rekall-efilter==1.6.0\",\n \"rekall-lib==1.7.2rc1\",\n \"rekall-yara==3.6.3.1\",\n \"rekall==1.7.2rc1\",\n \"requests==2.18.1\",\n \"rsa==4.0\",\n \"simplegeneric==0.8.1\",\n \"six==1.12.0\",\n \"sortedcontainers==1.5.7\",\n \"sparqlwrapper==1.8.2\",\n \"sqlalchemy==1.3.1\",\n \"sseclient==0.0.18\",\n \"traitlets==4.3.2\",\n \"urllib3==1.21.1\",\n \"wcwidth==0.1.7\",\n \"webencodings==0.5.1\",\n \"werkzeug==0.15.1\",\n]\n\n# The rest you shouldn't have to touch too much :)\n# ------------------------------------------------\n# Except, perhaps the License and Trove Classifiers!\n# If you do change the License, remember to change the Trove Classifier for that!\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# Import the README and use it as the long-description.\n# Note: this will only work if 'README.md' is present in your MANIFEST.in file!\ntry:\n with io.open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = \"\\n\" + f.read()\nexcept FileNotFoundError:\n long_description = DESCRIPTION\n\n# Load the package's __version__.py module as a dictionary.\n\nabout = {} # type: ignore\n\nif not VERSION:\n project_slug = NAME.lower().replace(\"-\", \"_\").replace(\" \", \"_\")\n with open(os.path.join(here, project_slug, \"__version__.py\")) as f:\n exec(f.read(), about)\nelse:\n about[\"__version__\"] = VERSION\n\n\nclass UploadCommand(Command):\n \"\"\"Support setup.py upload.\"\"\"\n\n description = \"Build and publish the package.\"\n user_options = [] # type: ignore\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print(\"\\033[1m{0}\\033[0m\".format(s))\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n self.status(\"Removing previous builds…\")\n rmtree(os.path.join(here, \"dist\"))\n except OSError:\n pass\n\n self.status(\"Building Source and Wheel (universal) distribution…\")\n os.system(\"{0} setup.py sdist bdist_wheel --universal\".format(sys.executable))\n\n self.status(\"Uploading the package to PyPI via Twine…\")\n os.system(\"twine upload dist/*\")\n\n self.status(\"Pushing git tags…\")\n os.system(\"git tag v{0}\".format(about[\"__version__\"]))\n os.system(\"git push --tags\")\n\n sys.exit()\n\n\n# Where the magic happens:\nsetup(\n name=NAME,\n version=about[\"__version__\"],\n description=DESCRIPTION,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=AUTHOR,\n author_email=EMAIL,\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=[\"test*\", \"beagle/web\"]),\n install_requires=REQUIRED,\n extras_require=EXTRAS,\n include_package_data=True,\n license=\"MIT\",\n classifiers=[\n # Trove classifiers\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Security\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Operating System :: OS Independent\",\n ],\n # $ setup.py publish support.\n cmdclass={\"upload\": UploadCommand},\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"424713807","text":"# This code was written with python 3.5.1\nimport configparser\nimport datetime\nimport json\nimport psutil\nimport schedule\nimport time\n\nconfig = configparser.ConfigParser()\nconfig.read('conf.ini')\noutput_type = config.get('common', 'type')\ninterval = config.get('common', 'interval')\nsnap = 1\n\n\nclass inherit(object):\n time_now = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')\n snap = 1\n\n def makedict(self, p):\n \"\"\"This function converts psutil format into dictionaries\"\"\"\n val = list(p)\n key = p._fields\n result_dict = dict(zip(key, val))\n return result_dict\n\n @classmethod\n def snap_increment(selfs):\n selfs.snap += 1\n\n\nclass sysstat_text(inherit):\n def sysstat_text_method(self, file=\"output.txt\"):\n \"\"\"This function writes results of monitoring into text file\"\"\"\n global snap\n print(\"Writing info in text file(SNAPSHOT #{})\".format(snap))\n timestamp = super().time_now\n text_file = open(file, \"a\")\n text_file.write(\"Snapshot #{0}, timestamp - {1}\\n\".format(snap, timestamp))\n text_file.write(\"CPU per core: {0}\\n\".format(psutil.cpu_percent(percpu=True)))\n text_file.write(\n \"Virtual memory usage : total = {0}, available = {1}, used = {2}\\n\".format(psutil.virtual_memory()[0],\n psutil.virtual_memory()[1],\n psutil.virtual_memory()[4]))\n text_file.write(\n \"Disk IO: read cout = {0}, write count = {1}\\n\".format(psutil.disk_io_counters(perdisk=False)[0],\n psutil.disk_io_counters(perdisk=False)[1]))\n text_file.write(\"Network info:\\n {}\\n\".format(psutil.net_io_counters(pernic=True)))\n text_file.write(\"Disk usage: {}\\n\".format(psutil.disk_usage('/')))\n text_file.write(\"\\n\")\n text_file.close()\n snap += 1\n\n\nclass sysstat_json(inherit):\n def sysstat_json_method(self, file=\"output.json\"):\n \"\"\"This function writes results of monitoring into json file (default \"output.json\")\"\"\"\n global snap\n print(\"Writing info in json file(SNAPSHOT #{})\".format(snap))\n timestamp = super().time_now\n json_file = open(file, \"a+\")\n json_file.write(\"Snapshot #{0}, timestamp - {1}\\n\".format(snap,\n timestamp))\n json_file.write(\"CPU per core\\n\")\n json.dump(psutil.cpu_percent(percpu=True),\n json_file,\n indent=4)\n json_file.write(\"\\nVirtual memory\\n\")\n json.dump(super().makedict(psutil.virtual_memory()),\n json_file,\n indent=4)\n json_file.write(\"\\nDisk IO\\n\")\n json.dump(super().makedict(psutil.disk_io_counters(perdisk=False)),\n json_file,\n indent=4)\n json_file.write(\"\\nNetwork info\\n\")\n json.dump(psutil.net_io_counters(pernic=True),\n json_file,\n indent=4)\n json_file.write(\"\\nDisk usage\\n\")\n json.dump(super().makedict(psutil.disk_usage('/')),\n json_file,\n indent=4)\n json_file.write(\"\\n\\n\")\n json_file.close()\n snap += 1\n\n\ndef call():\n if output_type == \"txt\":\n txt_obj.sysstat_text_method()\n elif output_type == \"json\":\n print('Output file type = ' + output_type + ', and interval is ' + interval + ' mins')\n js_obj.sysstat_json_method()\n else:\n print(\"Unknown file type set in config file\")\n quit()\n\n\njs_obj = sysstat_json()\ntxt_obj = sysstat_text()\nprint('Output file type = ' + output_type + ', and interval is ' + interval + ' mins')\ncall()\nschedule.every(int(interval)).minutes.do(call)\nwhile True:\n schedule.run_pending()\n time.sleep(30)\n","sub_path":"with-classes/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"398654574","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 11 12:22:13 2018\r\n\r\n@author: Evan Macdonald\r\n\r\nCode to train and test a Convolutional Neural Network to recognize activities\r\nDesigned for CMPT884 class and for later use in masters thesis\r\n\r\nHOW TO RUN\r\n start with fresh run of script (clear all variables and re-start kernel)\r\n optimize(num_iterations=#) - Can start small, check accuracy and then keep going from there\r\n check_accuracy(show_cm=True) - shows accuracy and CM of current state of network\r\n plot_solutions() - shows results on a plot\r\n\"\"\"\r\n\r\n#Imports\r\nimport pandas as pd\r\nimport numpy as np\r\nimport seaborn as sn\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\nimport time\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom data_assembly_two_insole_CMPT884 import combined_data_two_insole\r\nfrom Kintec_Functions_CMPT884 import segment_values_NO\r\nfrom Kintec_Functions_CMPT884 import normalize_2\r\n\r\n\r\n#%% import all data using data assembly script\r\n\r\nprint('Importing Data...')\r\ntrain_data, test_data = combined_data_two_insole()\r\nprint('Data imported...')\r\n\r\n#%% BUFFER\r\n\r\nprint('Buffering Data...')\r\n# buffer data for input into classifier\r\nbuffLen = 90 #45, 90, 135, 180, 225\r\nnumVars = 20\r\noverlap = buffLen/4 # Number of datapoints to overlap in the buffers\r\n\r\ntrain_segments, train_labels = segment_values_NO(train_data,buffLen)\r\ntrain_y = np.asarray(pd.get_dummies(train_labels), dtype = np.int8)\r\ntrain_x = normalize_2(train_segments.reshape(len(train_segments), 1, buffLen, numVars))\r\ntrain_x = np.rot90(train_x, k=3, axes=(2,3))\r\ntrain_x = np.transpose(train_x,(0,2,3,1))\r\n\r\ntest_segments, test_labels = segment_values_NO(test_data,buffLen)\r\ntest_y = np.asarray(pd.get_dummies(test_labels), dtype = np.int8)\r\ntest_y_cls = np.argmax(test_y, axis=1)\r\ntest_x = normalize_2(test_segments.reshape(len(test_segments), 1, buffLen, numVars))\r\ntest_x = np.rot90(test_x, k=3, axes=(2,3))\r\ntest_x = np.transpose(test_x,(0,2,3,1))\r\nprint('Data buffered...')\r\n\r\n#%% Heatmap of input image\r\n#row_names = ['FSR1R', 'FSR2R', 'FSR3R', 'FSR4R', 'FSR5R', 'FSR6R', 'FSR7R',\r\n# 'FSR1L', 'FSR2L', 'FSR3L', 'FSR4L', 'FSR5L', 'FSR6L', 'FSR7L', \r\n# 'XR', 'YR', 'ZR', 'XL', 'YL', 'ZL']\r\n#ax = sn.heatmap(train_x[105,:,:,0], #@ buffLen 90 - sit = 20, stand = 80, walk = 105\r\n# cmap=\"gray\", test_y_cls\r\n# square=True,\r\n# cbar = False, \r\n# yticklabels=row_names, \r\n# xticklabels=False)\r\n#plt.yticks(rotation=0) \r\n\r\n#%% CNN\r\n\r\n\r\n#Helper functions\r\ndef new_weights(shape):\r\n return tf.Variable(tf.truncated_normal(shape, stddev=0.05))\r\n\r\ndef new_biases(length):\r\n return tf.Variable(tf.constant(0.05, shape=[length]))\r\n\r\n#Layer constructors\r\ndef new_conv_layer(x,\r\n num_input_channels,\r\n filter_width,\r\n filter_height,\r\n num_filters,\r\n pool_height,\r\n pool_width):\r\n #Shape of the convolution kernel\r\n # [buffLen, 20, 1, num_filters]\r\n shape = [filter_width, filter_height, num_input_channels, num_filters]\r\n #Create weights\r\n weights = new_weights(shape=shape)\r\n #Create biases\r\n biases = new_biases(length=num_filters)\r\n #Create convolution layer activations\r\n activations = tf.nn.conv2d(input=x, filter=weights, strides=[1,1,1,1], padding='SAME')\r\n #Add in biases\r\n activations = tf.add(activations, biases)\r\n #Complete max pooiling\r\n activations = tf.nn.max_pool(value=activations, \r\n ksize=[1,pool_height,pool_width,1],\r\n strides=[1,pool_height,pool_width,1],\r\n padding='SAME')\r\n #Rectified Linear Unit (ReLU)\r\n # calcs max(x,0) for each input pixel\r\n # note this is okay to do after max pooling since relu(max_pool(x)) == max_pool(relu(x)) \r\n activations = tf.nn.relu(activations)\r\n #weights are being returned since there may be a use to look at them later\r\n return activations, weights\r\n\r\ndef flatten_layer(layer):\r\n #get shape of input layer\r\n # [#, img_height, img_width, 1]\r\n layer_shape = layer.get_shape()\r\n #Calculate number of features \r\n # =img_height*img_width*1\r\n num_features = layer_shape[1:4].num_elements()\r\n #Reshape the layer to flat\r\n # [#, img_height*img_width*1]\r\n layer_flat = tf.reshape(layer, [-1, num_features])\r\n return layer_flat, num_features\r\n\r\ndef new_fc_layer(x,\r\n num_inputs,\r\n num_outputs):\r\n #Create new weights and biases\r\n weights = new_weights(shape=[num_inputs, num_outputs])\r\n biases = new_biases(length=num_outputs)\r\n #create activations with biases added in and ReLU\r\n activations = tf.nn.relu(tf.add(tf.matmul(x, weights), biases))\r\n return activations\r\n\r\ndef new_logits(x,\r\n num_inputs,\r\n num_outputs):\r\n #Create new weights and biases\r\n weights = new_weights(shape=[num_inputs, num_outputs])\r\n biases = new_biases(length=num_outputs)\r\n #create activations with biases added in and ReLU\r\n activations = tf.matmul(x, weights) + biases\r\n return activations\r\n\r\n\r\n\r\ndef network_setup(filter_width,\r\n num_filters,\r\n pool_height,\r\n pool_width,\r\n num_nodes,):\r\n #Number of ouput classes\r\n num_classes = 3\r\n filter_height = 20 #numVars\r\n\r\n #Other parameters\r\n learning_rate = 0.0001\r\n '''Placeholder variables'''\r\n #Input 'images'\r\n X = tf.placeholder(tf.float32, shape=[None, numVars, buffLen, 1], name='X')\r\n #output[#,20,90,1][numsamples, numVars, buffLen, 1]\r\n Y = tf.placeholder(tf.float32, shape=[None,num_classes], name='Y')\r\n #output[#,3][numsamples,num_classes]\r\n Y_cls = tf.argmax(Y, axis=1)\r\n\r\n '''Create convolution layer'''\r\n c1, w1 = new_conv_layer(X, 1, filter_width, filter_height, num_filters, pool_height, pool_width)\r\n \r\n '''Create flattened layer'''\r\n flat, num_features = flatten_layer(c1)\r\n \r\n '''Create fully-connected layer'''\r\n f1 = new_fc_layer(flat, num_features, num_nodes)\r\n \r\n '''Create fully-connected output layer'''\r\n f2 = new_logits(f1, num_nodes, num_classes)\r\n \r\n '''Create softmax output'''\r\n y_ = tf.nn.softmax(f2)\r\n #Produces index of largest element\r\n y_pred_cls = tf.argmax(y_, axis=1)\r\n \r\n '''Cost function and optimizer'''\r\n # This is where the most improvement could come from\r\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=f2, labels=Y)\r\n cost = tf.reduce_mean(cross_entropy) #this is the mean of the cross entropy of all the image classifications\r\n \r\n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)\r\n \r\n '''Performance Measures'''\r\n correct_prediction = tf.equal(y_pred_cls, Y_cls)\r\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n #shockingly this actually works as an accuracy measure when you are using 1 and 0\r\n return X, Y, Y_cls, y_, y_pred_cls, cross_entropy, cost, optimizer, correct_prediction, accuracy \r\n\r\n\r\ndef next_batch(num, x, y):\r\n #Return a total of 'num' random samples and labels. \r\n idx = np.arange(len(x))\r\n np.random.shuffle(idx)\r\n idx = idx[:num]\r\n data_shuffle = [x[i] for i in idx]\r\n labels_shuffle = [y[ i] for i in idx]\r\n return np.asarray(data_shuffle), np.asarray(labels_shuffle)\r\n\r\ndef optimize(num_iterations):\r\n global total_iterations\r\n # Start-time used for printing time-usage below.\r\n# start_time = time.time()\r\n for i in range(total_iterations,\r\n total_iterations + num_iterations):\r\n # Get a batch of training examples.\r\n x_batch, y_true_batch = next_batch(train_batch_size, train_x, train_y)\r\n # Put the batch into a dict\r\n feed_dict_train = {X: x_batch, Y: y_true_batch}\r\n # Run the optimizer using this batch of training data.\r\n session.run([optimizer, cost], feed_dict=feed_dict_train)\r\n # Print status every 100 iterations.\r\n if i % 100 == 0:\r\n acc,cst = session.run([accuracy,cost], feed_dict=feed_dict_train)\r\n# msg = \"Iteration: {0:>6}, Tr. Accuracy: {1:>6.1%}, Cost: {2:>6.2}\"\r\n# print(msg.format(i + 1, acc, cst))\r\n # Update the total number of iterations performed.\r\n total_iterations += num_iterations\r\n # Get final stats\r\n acc,cst = session.run([accuracy,cost], feed_dict=feed_dict_train)\r\n # Ending time.\r\n# end_time = time.time()\r\n# time_dif = end_time - start_time\r\n# print(\"Training time usage: \" + str(timedelta(seconds=int(round(time_dif)))))\r\n return acc, cst\r\n\r\n# Maybe develop this later\r\n#def plot_example_errors(cls_pred, correct):\r\n# # cls_pred is an array of the predicted class-number for\r\n# # all images in the test-set.\r\n#\r\n# # correct is a boolean array whether the predicted class\r\n# # is equal to the true class for each image in the test-set.\r\n#\r\n# # Negate the boolean array.\r\n# incorrect = (correct == False)\r\n# \r\n# # Get the images from the test-set that have been\r\n# # incorrectly classified.\r\n# images = test_x[incorrect]\r\n# \r\n# # Get the predicted classes for those images.\r\n# cls_pred = cls_pred[incorrect]\r\n#\r\n# # Get the true classes for those images.\r\n# cls_true = test_y[incorrect]\r\n# \r\n# # Plot the first 9 images. (this is something to do in the future)\r\n## plot_images(images=images[0:9],\r\n## cls_true=cls_true[0:9],\r\n## cls_pred=cls_pred[0:9])\r\n# return images, cls_pred, cls_true\r\n\r\ndef plot_confusion_matrix(cls_pred):\r\n # cls_pred is an array of the predicted class-number for\r\n # all images in the test-set.\r\n \r\n # Get the confusion matrix using sklearn.\r\n cm = confusion_matrix(y_true=test_y_cls, y_pred=cls_pred)\r\n print(cm)\r\n sn.heatmap(cm, annot=True, fmt='g',annot_kws={\"size\": 30}, \r\n square=True, xticklabels=['Sit','Stand','Walk'],\r\n yticklabels=['Sit','Stand','Walk'])\r\n plt.xlabel('Predicted')\r\n plt.ylabel('True')\r\n plt.show()\r\n\r\n\r\ndef check_accuracy(cm=False):\r\n #Initialize the array to be filled in as it goes\r\n num_test = len(test_x)\r\n\r\n\r\n #Method for getting predicted classifications using a batch size\r\n# cls_pred = np.zeros(shape=num_test, dtype=np.int)\r\n# test_batch_size = 50\r\n# i = 0\r\n\r\n# while i < num_test:\r\n# # The ending index for the next batch is denoted j.\r\n# j = min(i + test_batch_size, num_test)\r\n# # Get the images and labels from the test-set between index i and j.\r\n# images = test_x[i:j, :]\r\n# labels = test_y[i:j, :]\r\n# # Create a feed-dict with these images and labels.\r\n# feed_dict = {X: images, Y: labels}\r\n# # Calculate the predicted class using TensorFlow.\r\n# cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict)\r\n# # Set the start-index for the next batch to the end-index of the current batch.\r\n# i = j\r\n images = test_x\r\n labels = test_y\r\n feed_dict = {X: images, Y: labels}\r\n \r\n #start timer\r\n start_time = time.time()\r\n \r\n # get predictions for test set\r\n cls_pred = session.run(y_pred_cls, feed_dict=feed_dict)\r\n \r\n #end timer\r\n end_time = time.time()\r\n time_dif = end_time - start_time\r\n\r\n # Create a boolean array whether each image is correctly classified.\r\n correct = (test_y_cls == cls_pred)\r\n # Calculate the number of correctly classified images.\r\n correct_sum = correct.sum()\r\n # Classification accuracy\r\n acc = float(correct_sum) / num_test\r\n # Print the accuracy.\r\n msg = \"Accuracy on Test-Set: {0:.1%} ({1} / {2})\"\r\n print(msg.format(acc, correct_sum, num_test))\r\n \r\n# msg = \"Classification time usage: {0:>6.5} seconds\"\r\n# print(msg.format(time_dif))\r\n\r\n # Plot some examples of mis-classifications, if desired.\r\n# if show_example_errors:\r\n# print(\"Example errors:\")\r\n# plot_example_errors(cls_pred=cls_pred, correct=correct)\r\n\r\n # Plot the confusion matrix, if desired.\r\n if cm:\r\n plot_confusion_matrix(cls_pred=cls_pred)\r\n return acc, correct_sum, num_test\r\n\r\ndef plot_solutions():\r\n\r\n images = test_x\r\n labels = test_y\r\n feed_dict = {X: images, Y: labels}\r\n \r\n # get predictions for test set\r\n y_ = session.run(y_pred_cls, feed_dict=feed_dict)\r\n \r\n # Reshape y_ to match test_data\r\n y_index = np.asarray(np.where(y_[:-1] != y_[1:]))\r\n y_index = np.reshape(y_index,(-1,1))\r\n index = y_index*(len(test_data)/len(y_))\r\n index = index.astype(dtype=int)\r\n y_out_val = np.empty(len(test_data),dtype=int)\r\n for i in range (0,len(index)-1):\r\n if i==0:\r\n y_out_val[0:index[(1,0)]] = y_[y_index[(i,0)]-1]\r\n y_out_val[index[(i,0)]:index[(i+1,0)]] = y_[y_index[(i+1,0)]-2]\r\n y_out_val[index[(-1,0)]:]=y_[-1]\r\n y_out_val = y_out_val + 1\r\n \r\n # Reshape test_labels to match y_out_val\r\n yt_index = np.asarray(np.where(test_labels[:-1] != test_labels[1:]))\r\n yt_index = np.reshape(yt_index,(-1,1))\r\n index = yt_index*(len(test_data)/len(test_labels))\r\n index = index.astype(dtype=int)\r\n y_test_val = np.empty(len(test_data),dtype=int)\r\n for i in range (0,len(index)-1):\r\n if i==0:\r\n y_test_val[0:index[(1,0)]] = test_labels[yt_index[(i,0)]-1]\r\n y_test_val[index[(i,0)]:index[(i+1,0)]] = test_labels[yt_index[(i+1,0)]-2]\r\n y_test_val[index[(-1,0)]:]=test_labels[-1]\r\n \r\n fig,ax=plt.subplots(sharex=True, nrows=3, ncols=1)\r\n ax[0].plot(test_data['FSR1_R'], linewidth=0.5)\r\n ax[0].plot(test_data['FSR2_R'], linewidth=0.5)\r\n ax[0].plot(test_data['FSR3_R'], linewidth=0.5)\r\n ax[0].plot(test_data['FSR4_R'], linewidth=0.5)\r\n ax[0].plot(test_data['FSR5_R'], linewidth=0.5)\r\n ax[0].plot(test_data['FSR6_R'], linewidth=0.5)\r\n ax[0].plot(test_data['FSR7_R'], linewidth=0.5)\r\n ax[0].set_ylim((0,900))\r\n ax[0].legend()\r\n ax[0].set_title('FSR data Right')\r\n ax[0].set_ylabel('FSR Output')\r\n\r\n ax[1].plot(test_data['FSR1_L'], linewidth=0.5)\r\n ax[1].plot(test_data['FSR2_L'], linewidth=0.5)\r\n ax[1].plot(test_data['FSR3_L'], linewidth=0.5)\r\n ax[1].plot(test_data['FSR4_L'], linewidth=0.5)\r\n ax[1].plot(test_data['FSR5_L'], linewidth=0.5)\r\n ax[1].plot(test_data['FSR6_L'], linewidth=0.5)\r\n ax[1].plot(test_data['FSR7_L'], linewidth=0.5)\r\n ax[1].set_ylim((0,900))\r\n ax[1].legend()\r\n ax[1].set_title('FSR Data Left')\r\n ax[1].set_ylabel('FSR Output')\r\n\r\n ax[2].plot(y_test_val, label='Actual')\r\n ax[2].plot(y_out_val, label='Predicted')\r\n ax[2].set_ylim((0.5,3.5))\r\n ax[2].legend()\r\n ax[2].set_title('Activity State.')\r\n ax[2].set_xlabel('Time (ms)')\r\n ax[2].set_ylabel('1=Sit, 2=Stand, 3=Walk')\r\n\r\n mng=plt.get_current_fig_manager() \r\n mng.window.showMaximized() #maximize figure \r\n plt.show()\r\n\r\n#%% Optimize loop\r\n#Configuration of parameters (these are what you can modify)\r\nfilter_width = [14] \r\nnum_filters = [45] #over 20 seems to not give much improvement\r\n#Pooling layer parameters\r\npool_height = [3]\r\npool_width = [3]\r\n#Fully-connected layer parameters\r\nnum_nodes = [10,20,40,60,80,100,200,300,600,800] #\r\ncombos = np.array(np.meshgrid(filter_width,\r\n num_filters,\r\n pool_height,\r\n pool_width,\r\n num_nodes)).T.reshape(-1,5)\r\n\r\n#initialize blank solution arrays to store results\r\naccList = np.zeros(len(combos))\r\ncorrectList = np.zeros([len(combos),2])\r\niterList = np.zeros(len(combos))\r\ncostList = np.zeros(len(combos))\r\n\r\n#iterate through all possible solutions\r\nfor i in range(len(combos)):\r\n #setup network\r\n X, Y, Y_cls, y_, y_pred_cls, cross_entropy, cost, optimizer, correct_prediction, accuracy = network_setup(combos[i,0], #filter_width\r\n combos[i,1], #num_filters\r\n combos[i,2], #pool_height\r\n combos[i,3], #pool_width\r\n combos[i,4],) #num_nodes\r\n # TensorFlow portion\r\n '''TensorFlow Session'''\r\n session = tf.Session()\r\n session.run(tf.global_variables_initializer())\r\n \r\n '''Optimization'''\r\n train_batch_size = 1000\r\n train_total_batches = train_x.shape[0] // train_batch_size\r\n # Counter for total number of iterations performed so far.\r\n total_iterations = 0\r\n # Optimize network\r\n# cost_step = 10 #arbitrarily large number for cost\r\n# while cost_step > 0.01 and total_iterations < 2000:\r\n# acc_train, cost_step = optimize(1)\r\n \r\n acc_train, cost_step = optimize(80)\r\n \r\n #Final stats \r\n msg = \"Combo #: {0:>2}, Iterations: {1:>2}, Tr. Accuracy: {2:>3.1%}, Cost: {3:>2.2}\"\r\n print(msg.format((i+1),total_iterations, acc_train, cost_step))\r\n #get accuracy stats\r\n accList[i], correctList[i,0], correctList[i,1] = check_accuracy()\r\n iterList[i] = total_iterations\r\n costList[i] = cost_step\r\n","sub_path":"CNN_optimization.py","file_name":"CNN_optimization.py","file_ext":"py","file_size_in_byte":17481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"429202089","text":"\"\"\"\n2^15 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.\nWhat is the sum of the digits of the number 2^1000?\n\"\"\"\n\ns = 0\na = pow(2, 1000)\nstring = str(a)\nfor i in range(len(string)):\n s = s + int(string[i])\nprint(s)\n","sub_path":"list1/task16.py","file_name":"task16.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"518858362","text":"# -*- encoding: utf-8 -*-\nfrom osv import fields,osv\nimport time\nfrom datetime import datetime\nfrom dateutil.relativedelta import *\nfrom tools.translate import _\nimport logging\nlogger = logging.getLogger('jasper_print')\n\nclass jasper_stock_reprocessamento_wizard(osv.osv_memory):\n _name='stock.reprocessamento.wizard'\n\n _columns ={\n 'product_ids': fields.many2many('product.product','product_stock_processing','wizard_id','product_id','Products', domain=[('type','=','product')]),\n }\n \n def run(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n products = ids or []\n for product in products:\n actual_med = 0\n actual_qty = 0\n prod_last_price = 0\n prod = self.pool.get('product.product').browse(cr, uid, product)\n movimentos = []\n #for report in prod.stock_report_ids:\n # if report.state == 'done':\n # movimentos.append(report.id)\n #movimentos = sorted(movimentos)\n movimentos = self.pool.get('dotcom.stock.report').search(cr, uid, [('prod_id','=',product), ('state','!=','draft')], order='ordem asc')\n #dotcom_stock_order_id = self.pool.get('dotcom.stock.order').get_model_id(cr, uid, 'dotcom.stock.order')\n \n for movimento in movimentos:\n moved = self.pool.get('dotcom.stock.report').browse(cr,uid,movimento)\n model = moved.model_id.model\n doc_type = self.pool.get(model).browse(cr,uid,moved.origin_id).doc_type or False\n if doc_type:\n updates = doc_type.update_last_med_price or False\n if updates:\n medio_anterior = actual_med\n quantidade_anterior = actual_qty\n qty = moved.qty or 0\n price = moved.price or 0\n try:\n pm = ((medio_anterior*quantidade_anterior) + (qty*price))/(qty+quantidade_anterior)\n except ZeroDivisionError:\n pm = 0\n actual_med = pm\n actual_qty = quantidade_anterior + qty\n vals = {}\n vals['average_cost'] = pm\n self.pool.get('dotcom.stock.report').write(cr,uid,movimento,vals)\n else:\n vals = {}\n vals['average_cost'] = actual_med\n quantidade_anterior = actual_qty\n qty = moved.qty or 0\n actual_qty = quantidade_anterior + qty\n self.pool.get('dotcom.stock.report').write(cr,uid,movimento,vals)\n last_price = doc_type.update_last_price or False\n if last_price:\n prod_last_price = moved.price or 0\n \n if model == 'dotcom.stock.order':\n if doc_type.type == 'reset':\n prod_last_price = moved.price or 0\n actual_med = moved.price or 0\n actual_qty = moved.qty or 0\n self.pool.get('dotcom.stock.report').write(cr, uid, movimento, {'average_cost':actual_med})\n self.pool.get('product.product').write(cr,uid,[product],{'stock_qty':actual_qty,'last_average_cost':actual_med,'last_cost_price':prod_last_price})\n return {'view_mode' : 'tree,form','type': 'ir.actions.act_window_close'}\n \n def calculate(self, cr, uid,ids, context={}):\n data = self.read(cr,uid,ids,['product_ids'])\n res = False\n if data:\n res = self.run(cr, uid, data[0]['product_ids'] or [], context=context)\n now = self.verificar_drafts(cr, uid, data[0]['product_ids'] or [], context=context)\n then = self.repost_quantities(cr, uid, data[0]['product_ids'], context=context)\n for di in ids:\n message = _(\"Stock Reprocess completed\")\n self.log(cr, uid, di, message)\n return res\n \n def run_products(self, cr, uid,ids, context={}):\n res = self.run(cr, uid, ids or [], context=context)\n now = self.verificar_drafts(cr, uid, ids or [], context=context)\n then = self.repost_quantities(cr, uid, ids or [], context=context)\n for di in ids:\n message = _(\"Products Updated\")\n self.log(cr, uid, di, message)\n return res\n \n def verificar_drafts(self, cr, uid, ids, context={}):\n for product in ids:\n produto = self.pool.get('product.product').browse(cr, uid, product)\n for movimento in produto.stock_report_ids:\n classe = movimento.model_id and movimento.model_id.model\n class_obj = self.pool.get(classe).browse(cr, uid, movimento.origin_id)\n state = class_obj and class_obj.state or ''\n if state in ['draft','reset']:\n self.pool.get('dotcom.stock.report').write(cr, uid, [movimento and movimento.id], {'state':'draft'})\n elif state == 'cancel':\n self.pool.get('dotcom.stock.report').unlink(cr, uid, [movimento and movimento.id])\n return False\n \n def repost_quantities(self, cr, uid, ids, context={}):\n if context is None:\n context = {}\n if ids:\n availability = self.pool.get('dotcom.stock.available')\n for produto in self.pool.get('product.product').browse(cr, uid, ids):\n lista = {}\n qty_available = 0\n total = 0\n reports_ids = self.pool.get('dotcom.stock.report').search(cr, uid, [('prod_id','=',produto and produto.id), ('state','!=','draft')], order='ordem asc')\n #reports_ids = sorted(reports_ids)\n for movimento in self.pool.get('dotcom.stock.report').browse(cr, uid, reports_ids):\n logger.info('\\nA processar Ordem: %s' % movimento.ordem)\n origin_classe = movimento and movimento.model_id and movimento.model_id.model\n origin_id = movimento and movimento.origin_id or None\n qty_available = 0\n is_not_act = True\n if origin_classe == 'dotcom.stock.order':\n this = self.pool.get(origin_classe).browse(cr, uid, origin_id)\n this_doc_type = this and this.doc_type\n if this_doc_type and this_doc_type.type == 'reset':\n is_not_act = False\n location = movimento.location_id and movimento.location_id.id\n \n if is_not_act:\n if lista.has_key(location):\n qty_available = lista.get(location) + movimento.qty or 0\n else:\n qty_available = movimento.qty or 0\n else:\n qty_available = movimento.qty or 0\n prod_id = movimento.prod_id.id\n self.pool.get('dotcom.stock.order').reset_stock_locations(cr, uid, location, prod_id, context=context)\n lista = {}\n lista[location] = qty_available\n \n result = availability.search(cr, uid, [('prod_id','=',produto.id)])\n availability.unlink(cr, uid, result)\n for locale in lista:\n new = {}\n new['prod_id'] = produto and produto.id\n new['location_id'] = locale\n new['qty_available'] = lista.get(locale)\n total = total + lista.get(locale)\n availability.create(cr, uid, new, context=context)\n self.pool.get('product.product').write(cr, uid, [produto and produto.id],{'stock_qty':total})\n return False\njasper_stock_reprocessamento_wizard()","sub_path":"dotcom_stock/wizard/processamento_stock.py","file_name":"processamento_stock.py","file_ext":"py","file_size_in_byte":8111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"356016589","text":"# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport tempfile\nimport unittest\n\nimport numpy as np\n\nimport transformers\nfrom transformers import GPT2Tokenizer, GPTNeoConfig, is_flax_available, is_torch_available\nfrom transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow\n\nfrom ...generation.test_flax_utils import FlaxGenerationTesterMixin\nfrom ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask\n\n\nif is_flax_available():\n import jax\n import jax.numpy as jnp\n\n from transformers.modeling_flax_pytorch_utils import (\n convert_pytorch_state_dict_to_flax,\n load_flax_weights_in_pytorch_model,\n )\n from transformers.models.gpt_neo.modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel\n\nif is_torch_available():\n import torch\n\n\nclass FlaxGPTNeoModelTester:\n def __init__(\n self,\n parent,\n batch_size=14,\n seq_length=7,\n is_training=True,\n use_input_mask=True,\n use_token_type_ids=False,\n use_labels=True,\n vocab_size=99,\n hidden_size=32,\n num_hidden_layers=2,\n num_attention_heads=4,\n attention_types=[[[\"global\", \"local\"], 1]],\n intermediate_size=37,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n window_size=7,\n initializer_range=0.02,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.is_training = is_training\n self.use_input_mask = use_input_mask\n self.use_token_type_ids = use_token_type_ids\n self.use_labels = use_labels\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.attention_types = attention_types\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.window_size = window_size\n self.initializer_range = initializer_range\n self.scope = None\n self.bos_token_id = vocab_size - 1\n self.eos_token_id = vocab_size - 1\n self.pad_token_id = vocab_size - 1\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n input_mask = None\n if self.use_input_mask:\n input_mask = random_attention_mask([self.batch_size, self.seq_length])\n\n config = GPTNeoConfig(\n vocab_size=self.vocab_size,\n hidden_size=self.hidden_size,\n num_layers=self.num_hidden_layers,\n num_heads=self.num_attention_heads,\n max_position_embeddings=self.max_position_embeddings,\n use_cache=False,\n bos_token_id=self.bos_token_id,\n eos_token_id=self.eos_token_id,\n pad_token_id=self.pad_token_id,\n window_size=self.window_size,\n attention_types=self.attention_types,\n )\n\n return (config, input_ids, input_mask)\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n config, input_ids, attention_mask = config_and_inputs\n inputs_dict = {\"input_ids\": input_ids, \"attention_mask\": attention_mask}\n return config, inputs_dict\n\n def check_use_cache_forward(self, model_class_name, config, input_ids, attention_mask):\n max_decoder_length = 20\n model = model_class_name(config)\n\n past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length)\n attention_mask = jnp.ones((input_ids.shape[0], max_decoder_length), dtype=\"i4\")\n\n position_ids = jnp.broadcast_to(\n jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1)\n )\n outputs_cache = model(\n input_ids[:, :-1],\n attention_mask=attention_mask,\n past_key_values=past_key_values,\n position_ids=position_ids,\n )\n\n position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype=\"i4\")\n outputs_cache_next = model(\n input_ids[:, -1:],\n attention_mask=attention_mask,\n past_key_values=outputs_cache.past_key_values,\n position_ids=position_ids,\n )\n\n outputs = model(input_ids)\n\n diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))\n self.parent.assertTrue(diff < 1e-3, msg=f\"Max diff is {diff}\")\n\n def check_use_cache_forward_with_attn_mask(self, model_class_name, config, input_ids, attention_mask):\n max_decoder_length = 20\n model = model_class_name(config)\n\n attention_mask_cache = jnp.concatenate(\n [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))],\n axis=-1,\n )\n\n past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length)\n position_ids = jnp.broadcast_to(\n jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1)\n )\n\n outputs_cache = model(\n input_ids[:, :-1],\n attention_mask=attention_mask_cache,\n past_key_values=past_key_values,\n position_ids=position_ids,\n )\n position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype=\"i4\")\n outputs_cache_next = model(\n input_ids[:, -1:],\n past_key_values=outputs_cache.past_key_values,\n attention_mask=attention_mask_cache,\n position_ids=position_ids,\n )\n\n outputs = model(input_ids, attention_mask=attention_mask)\n\n diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))\n self.parent.assertTrue(diff < 1e-3, msg=f\"Max diff is {diff}\")\n\n\n@require_flax\nclass FlaxGPTNeoModelTest(FlaxModelTesterMixin, FlaxGenerationTesterMixin, unittest.TestCase):\n all_model_classes = (FlaxGPTNeoModel, FlaxGPTNeoForCausalLM) if is_flax_available() else ()\n all_generative_model_classes = (FlaxGPTNeoForCausalLM,) if is_flax_available() else ()\n\n def setUp(self):\n self.model_tester = FlaxGPTNeoModelTester(self)\n\n def test_use_cache_forward(self):\n for model_class_name in self.all_model_classes:\n config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs()\n self.model_tester.check_use_cache_forward(model_class_name, config, input_ids, attention_mask)\n\n def test_use_cache_forward_with_attn_mask(self):\n for model_class_name in self.all_model_classes:\n config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs()\n self.model_tester.check_use_cache_forward_with_attn_mask(\n model_class_name, config, input_ids, attention_mask\n )\n\n @slow\n def test_batch_generation(self):\n tokenizer = GPT2Tokenizer.from_pretrained(\"gpt2\", pad_token=\"<|endoftext|>\", padding_side=\"left\")\n inputs = tokenizer([\"Hello this is a long string\", \"Hey\"], return_tensors=\"np\", padding=True, truncation=True)\n\n model = FlaxGPTNeoForCausalLM.from_pretrained(\"EleutherAI/gpt-neo-125M\")\n model.do_sample = False\n model.config.pad_token_id = model.config.eos_token_id\n\n jit_generate = jax.jit(model.generate)\n\n output_sequences = jit_generate(\n inputs[\"input_ids\"], attention_mask=inputs[\"attention_mask\"], pad_token_id=tokenizer.pad_token_id\n ).sequences\n\n output_string = tokenizer.batch_decode(output_sequences, skip_special_tokens=True)\n\n expected_string = [\n \"Hello this is a long string of text.\\n\\nI'm trying to get the text of the\",\n \"Hey, I'm a little late to the party. I'm going to\",\n ]\n\n self.assertListEqual(output_string, expected_string)\n\n # overwrite from common since `attention_mask` in combination\n # with `causal_mask` behaves slighly differently\n @is_pt_flax_cross_test\n def test_equivalence_pt_to_flax(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n with self.subTest(model_class.__name__):\n # prepare inputs\n prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)\n pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}\n\n # load corresponding PyTorch class\n pt_model_class_name = model_class.__name__[4:] # Skip the \"Flax\" at the beginning\n pt_model_class = getattr(transformers, pt_model_class_name)\n\n batch_size, seq_length = pt_inputs[\"input_ids\"].shape\n rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,))\n for batch_idx, start_index in enumerate(rnd_start_indices):\n pt_inputs[\"attention_mask\"][batch_idx, :start_index] = 0\n pt_inputs[\"attention_mask\"][batch_idx, start_index:] = 1\n prepared_inputs_dict[\"attention_mask\"][batch_idx, :start_index] = 0\n prepared_inputs_dict[\"attention_mask\"][batch_idx, start_index:] = 1\n pt_model = pt_model_class(config).eval()\n fx_model = model_class(config, dtype=jnp.float32)\n\n fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model)\n fx_model.params = fx_state\n\n with torch.no_grad():\n pt_outputs = pt_model(**pt_inputs).to_tuple()\n\n fx_outputs = fx_model(**prepared_inputs_dict).to_tuple()\n self.assertEqual(len(fx_outputs), len(pt_outputs), \"Output lengths differ between Flax and PyTorch\")\n for fx_output, pt_output in zip(fx_outputs, pt_outputs):\n self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2)\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n pt_model.save_pretrained(tmpdirname)\n fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True)\n\n fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict).to_tuple()\n self.assertEqual(\n len(fx_outputs_loaded), len(pt_outputs), \"Output lengths differ between Flax and PyTorch\"\n )\n for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs):\n self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4e-2)\n\n # overwrite from common since `attention_mask` in combination\n # with `causal_mask` behaves slighly differently\n @is_pt_flax_cross_test\n def test_equivalence_flax_to_pt(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n for model_class in self.all_model_classes:\n with self.subTest(model_class.__name__):\n # prepare inputs\n prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)\n pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}\n\n # load corresponding PyTorch class\n pt_model_class_name = model_class.__name__[4:] # Skip the \"Flax\" at the beginning\n pt_model_class = getattr(transformers, pt_model_class_name)\n\n pt_model = pt_model_class(config).eval()\n fx_model = model_class(config, dtype=jnp.float32)\n\n pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params)\n batch_size, seq_length = pt_inputs[\"input_ids\"].shape\n rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,))\n for batch_idx, start_index in enumerate(rnd_start_indices):\n pt_inputs[\"attention_mask\"][batch_idx, :start_index] = 0\n pt_inputs[\"attention_mask\"][batch_idx, start_index:] = 1\n prepared_inputs_dict[\"attention_mask\"][batch_idx, :start_index] = 0\n prepared_inputs_dict[\"attention_mask\"][batch_idx, start_index:] = 1\n\n # make sure weights are tied in PyTorch\n pt_model.tie_weights()\n\n with torch.no_grad():\n pt_outputs = pt_model(**pt_inputs).to_tuple()\n\n fx_outputs = fx_model(**prepared_inputs_dict).to_tuple()\n self.assertEqual(len(fx_outputs), len(pt_outputs), \"Output lengths differ between Flax and PyTorch\")\n for fx_output, pt_output in zip(fx_outputs, pt_outputs):\n self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2)\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n fx_model.save_pretrained(tmpdirname)\n pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True)\n\n with torch.no_grad():\n pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple()\n\n self.assertEqual(\n len(fx_outputs), len(pt_outputs_loaded), \"Output lengths differ between Flax and PyTorch\"\n )\n for fx_output, pt_output in zip(fx_outputs, pt_outputs_loaded):\n self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2)\n\n @slow\n def test_model_from_pretrained(self):\n for model_class_name in self.all_model_classes:\n model = model_class_name.from_pretrained(\"EleutherAI/gpt-neo-125M\")\n outputs = model(np.ones((1, 1)))\n self.assertIsNotNone(outputs)\n","sub_path":"tests/models/gpt_neo/test_modeling_flax_gpt_neo.py","file_name":"test_modeling_flax_gpt_neo.py","file_ext":"py","file_size_in_byte":14723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"430295431","text":"def get_vpc_tunnel_using_account(config):\n vpc = \"\" # set default, if found set that value\n accounts = config.get(\"ACCOUNTS\", {}).get(\"NUTANIX_PC\", [])\n for acc in accounts:\n if acc.get(\"NAME\") == \"NTNX_LOCAL_AZ\":\n for subnet in acc.get(\"OVERLAY_SUBNETS\", []):\n if subnet.get(\"VPC\", \"\") == \"vpc_name_1\":\n vpc = \"vpc_name_1\"\n break\n vpc_tunnel = config.get(\"VPC_TUNNELS\", {}).get(\"NTNX_LOCAL_AZ\", {}).get(vpc, {})\n\n return vpc_tunnel.get(\"name\", \"\")\n\n\ndef get_vpc_project(config):\n vpc_project = (\n config.get(\"VPC_PROJECTS\", {}).get(\"PROJECT1\", {}).get(\"NAME\", \"\") or \"default\"\n )\n return vpc_project\n","sub_path":"tests/tunnel_endpoints/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"52050642","text":"from flask import Flask, url_for, redirect\r\n\r\nimport os, Home, Auth, Place, User\r\n\r\nfrom DataBase import DataBase\r\n\r\n\r\ndef create_app(test_config=None):\r\n # crea e configura l'app\r\n app = Flask(__name__, instance_relative_config=True)\r\n app.config.from_mapping(\r\n SECRET_KEY='dev',\r\n DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),\r\n )\r\n\r\n if test_config is None:\r\n # carica i config, quando non si sta testando\r\n app.config.from_pyfile('config.py', silent=True)\r\n else:\r\n # carica i config passati in input\r\n app.config.update(test_config)\r\n\r\n\r\n try:\r\n os.makedirs(app.instance_path)\r\n except OSError:\r\n pass\r\n\r\n # registra i comandi del database\r\n DataBase.init_app(app)\r\n\r\n # registra tutti i blueprint\r\n app.register_blueprint(Home.bp)\r\n app.register_blueprint(Auth.bp)\r\n app.register_blueprint(Place.bp)\r\n app.register_blueprint(User.bp)\r\n\r\n # una pagina che ci ridirige verso /home\r\n @app.route('/')\r\n def prova():\r\n return redirect(url_for('Home.home'))\r\n\r\n return app\r\n\r\n\r\nif __name__ == '__main__':\r\n create_app().run()\r\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"333635999","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nimport dal\n\n\ndef syn(kType, para = 5, code = \"\"):\n if not code:\n sql = \"SELECT code FROM Stock s\"\n stocks = dal.queryAll(sql)\n for s in stocks:\n getStockK(kType, para, s[0])\n else:\n getStockK(kType, para, code)\n\n\ndef getStockK(kType, para, code):\n print(kType, \"分析均值\", code)\n sql = \"SELECT day,ma5,ma10 FROM StockK%s WHERE code='%s' ORDER BY day desc LIMIT %d OFFSET 0\" % (kType, code, para * 40)\n kLines = dal.queryAll(sql)\n states = []\n for k in kLines:\n if k[1] and k[2]:\n state = { \"day\": k[0], \"ma\": float(k[1]) - float(k[2]), \"matype\": 0, \"state\": \"吻\"}\n if state[\"ma\"] > 0:\n state[\"matype\"] = 1\n elif state[\"ma\"] < 0:\n state[\"matype\"] = -1\n states.insert(0, state)\n\n for i in range(para - 1, len(states)):\n if states[i][\"state\"] == 0:\n states[i][\"state\"] = states[i - 1][\"state\"]\n\n isTrend = True\n for j in range(1, para):\n if states[i - j][\"matype\"] != states[i][\"matype\"]:\n isTrend = False\n break\n if isTrend:\n state = \"男\"\n if states[i][\"matype\"] == 1:\n state = \"女\"\n for j in range(0, para):\n states[i - j][\"state\"] = state\n\n trands = []\n trand = {\"day\": states[0][\"day\"], \"area\": 0.0, \"avg\": 0.0, \"state\": states[0][\"state\"]}\n area = states[0][\"ma\"]\n count = 1\n for i in range(1, len(states)):\n if states[i][\"state\"] == states[i - 1][\"state\"]:\n area += states[i][\"ma\"]\n count += 1\n else:\n trand[\"area\"] = area\n trand[\"avg\"] = area / count\n trands.append(trand)\n trand = {\"day\": states[i][\"day\"], \"area\": 0.0, \"avg\": 0.0, \"state\": states[i][\"state\"]}\n area = states[i][\"ma\"]\n count = 1\n\n trand[\"area\"] = area\n trand[\"avg\"] = area / count\n trands.append(trand)\n\n s = summary(trands)\n sql = \"UPDATE Stock SET ma%s='%s' WHERE code='%s';\" % (kType, s, code)\n dal.run(sql)\n\n\ndef summary(trands):\n realTrands = []\n lastState = trands[-1][\"state\"]\n\n if len(trands) < 2:\n return \"\"\n\n now = \"线段\"\n if lastState == \"吻\":\n now = \"中枢\"\n realTrands = getRealTrands(trands[0 : len(trands) - 1])\n else:\n realTrands = getRealTrands(trands)\n\n trand = \"↓\"\n if realTrands[0][\"state\"] == \"女\":\n trand = \"↑\"\n\n count = len(realTrands)\n\n bc = 0\n if len(realTrands) > 1 and abs(realTrands[0][\"avg\"]) < abs(realTrands[1][\"avg\"]):\n bc = 1\n if len(realTrands) > 2:\n for i in range(2, len(realTrands)):\n if abs(realTrands[i - 1][\"area\"]) < abs(realTrands[i][\"area\"]):\n bc += 1\n else:\n break\n\n result = \"%s-%d-%s-%d\" % (trand, count, now, bc)\n return result\n\n\ndef getRealTrands(trands):\n realTrands = []\n lastState = trands[-1][\"state\"]\n if lastState == \"女\":\n for i in range(0, len(trands)):\n t = trands[-1 - i]\n if t[\"state\"] =='吻':\n continue\n elif t[\"state\"] =='男':\n break\n else:\n realTrands.append(t)\n elif lastState == \"男\":\n for i in range(0, len(trands)):\n t = trands[-1 - i]\n if t[\"state\"] =='吻':\n continue\n elif t[\"state\"] =='女':\n break\n else:\n realTrands.append(t)\n return realTrands","sub_path":"99.Code/PYServer/stockMA/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":3658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"215875399","text":"__author__ = 'thehoule'\n\nimport subprocess\nimport sys\nfrom PyQt4 import QtGui, QtCore\n\nrandomize = False\nanswers = False\nquestions = False\n\n#Control booleans\ninside_question = False\nin_num_question = False\nin_alpha = False\nset_answer = False\n\n\ninstruction_dict = []\nquestion_dict = []\nanswer_dict = []\ntemp_ans = []\n\nclass Example(QtGui.QMainWindow):\n\n global randomize\n\n #init\n def __init__(self):\n super(Example, self).__init__()\n\n self.initUI()\n\n #Create UI\n def initUI(self):\n\n btn1 = QtGui.QLabel(\"Upload File: \", self)\n btn1.move(10, 50)\n\n file_input = QtGui.QLineEdit(self)\n file_input.setMinimumWidth(285)\n file_input.setReadOnly(True)\n file_input.move(85, 50)\n\n btn2 = QtGui.QPushButton(\"Browse..\", self)\n btn2.move(375, 50)\n\n btn3 = QtGui.QPushButton(\"Convert\", self)\n btn3.move(375, 100)\n\n # btn1.clicked.connect(self.buttonClicked)\n btn2.clicked.connect(self.buttonClicked)\n btn3.clicked.connect(self.convert)\n\n #Selection options\n ans_bank = QtGui.QCheckBox('Randomize', self)\n ans_bank.move(20, 100)\n ans_bank.stateChanged.connect(self.rand_ans)\n\n self.statusBar()\n\n self.setGeometry(500, 500, 490, 170)\n self.setWindowTitle('Quiz Interpreter')\n self.show()\n\n def buttonClicked(self):\n\n #Linux --open file manager for testing\n subprocess.Popen(['xdg-open', '.'])\n\n #Mac --open file manager (import os to work)\n #os.system(\"open \"+filename)\n\n #Windows --open file manager (import os to work)\n #open(filename)\n\n sender = self.sender()\n self.statusBar().showMessage(sender.text() + ' was pressed')\n\n def rand_ans(self):\n global randomize\n if randomize == True:\n randomize = False\n else:\n randomize = True\n print(randomize)\n\n def convert(self):\n global instruction_dict, in_num_question, inside_question, in_alpha, answer, answer_dict, temp_ans, set_answer\n\n instruction = \"\"\n q_reading = False\n\n test_file = open(\"test_file.txt\", \"r\")\n\n test_int = \"6\"\n\n #Numbered question test\n if isinstance(int(test_int), int):\n print(\"this is string int test is successful: \", int(test_int))\n if isinstance(ord(\"A\"), int):\n print(ord(\"A\"))\n if isinstance(\"A\", str):\n print(\"A\")\n\n #print(\"this is letter type cast test: \", int('A'))\n\n while True:\n\n #Read in lines\n line = test_file.readline()\n\n #End of file check\n if not line:\n break\n\n #Check for new line (uncomment when sending to Canvas)\n if line == '\\n':\n continue\n\n q_check = line[:2]\n question = line[2:]\n answer = line[2:]\n\n #Question check\n if q_check[1:] == \".\":\n inside_question = False\n #TO work out later\n # if set_answer == True:\n # # print(temp_ans)\n # print(set_answer)\n # answer_dict.append(temp_ans)\n # print(answer_dict)\n # del temp_ans[:]\n # set_answer = False\n\n #Append to question list knowing instructions are over\n instruction_dict.append(instruction)\n #Find numeric questions\n try:\n #String Check for letter \"number\" questioning or multiple line questions\n if isinstance(int(q_check[:1]), int) or in_num_question == True:\n #Set bool flag\n inside_question = True\n print(\"This is int\", int(q_check[:1]))\n #Check if it's a multiple line question\n if q_check[-3:] == \"?\" or q_check[-3:] == \":\":\n in_num_question = False\n elif q_check[-1] == '\\n':\n in_num_question = True\n #Remove space after question number\n if question[:1] == \" \":\n question = question[1:]\n\n #Add question to list\n question_dict.append(question)\n\n #This is to add multiple line questions to list for \"letter numbering\"\n elif in_alpha == True:\n\n if q_check[-3:] == \"?\" or q_check[-3:] == \":\":\n in_alpha = False\n elif q_check[-1] == '\\n':\n in_alpha = True\n question_dict.append(line)\n\n #Catch typecast error if \"letter numbering\" is used\n except ValueError:\n #Set bool flag\n inside_question = True\n print(\"This is string\", q_check[:1])\n\n if q_check[-3:] == \"?\" or q_check[-3:] == \":\":\n in_alpha = False\n elif q_check[-1] == '\\n':\n print(\"True\")\n in_alpha = True\n #Remove space after question number\n if question[:1] == \" \":\n question = question[1:]\n\n #Add question to list\n question_dict.append(question)\n continue\n #Append to question\n else:\n instruction += line\n\n #Put answers into the list\n if q_check[:1] == \"-\" or inside_question == True:\n #Remove space after answer marker\n if answer[:1] == \" \" or answer[:1] == \".\":\n answer = answer[1:]\n # print(answer)\n answer_dict.append(answer)\n #set_answer = True\n\n for question in question_dict:\n #for ans in answer_dict:\n if question in answer_dict:\n answer_dict.remove(question)\n\n print(instruction_dict)\n print(question_dict)\n print(answer_dict)\n\n\n\ndef main():\n\n app = QtGui.QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"643006168","text":"__all__ = 'FastAPI', 'APIRouter'\n\n\nfrom fastapi import FastAPI, APIRouter\nfrom fastapi.responses import ORJSONResponse, Response, JSONResponse\n\nclass BaseAPIMeta(type):\n def __new__(cls, name, bases, attrs):\n meths = 'get post delete put patch options trace head'\n\n def generate_method(meth_name):\n def wrap(self, path, **kwargs):\n meth = getattr(super(self.__class__, self), meth_name)\n def wrap(f):\n if 'response_model' in kwargs and 'response_class' in kwargs:\n raise ValueError('don\\'t override response_model and response_class at once')\n elif ('response_model' in kwargs or 'response_class' in kwargs) and \\\n 'return' in getattr(f, '__annotations__', {}):\n raise ValueError('either ambiguity or override')\n\n response_model_or_class = (\n getattr(f, '__annotations__', {})\n .get('return', kwargs\n .get('response_model', kwargs\n .get('response_class', \n ORJSONResponse))))\n\n if issubclass(response_model_or_class, Response):\n kwargs['response_class'] = response_model_or_class\n else:\n kwargs['response_model'] = response_model_or_class\n kwargs['response_class'] = ORJSONResponse\n\n responses = kwargs.get('responses', {})\n\n if responses and not isinstance(responses, dict):\n kwargs['responses'] = responses.to_response_fmt()\n return meth(path, **kwargs)(f)\n return wrap\n return wrap\n\n attrs.update({meth_name: generate_method(meth_name) for meth_name in meths.split()})\n\n return super().__new__(cls, name, bases, attrs)\n\n\nclass FastAPI(FastAPI, metaclass=BaseAPIMeta): ...\n\nclass APIRouter(APIRouter, metaclass=BaseAPIMeta): ...","sub_path":"myfastapi.py","file_name":"myfastapi.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"229478567","text":"from datetime import datetime\nfrom recipe import Recipe\n\nclass Book:\n def __init__(self, name: str):\n self.name = name\n self.creation_date = datetime.now()\n self.recipes_list = {\n \"starter\": [],\n \"lunch\": [],\n \"dessert\": [],\n }\n self.last_date = self.creation_date\n\n def get_recipe_by_name(self, name: str):\n for food_type in [\"starter\", \"lunch\", \"dessert\"]:\n for recipe in self.recipes_list[food_type]:\n if (recipe.name == name):\n print(recipe)\n\n def get_recipe_by_type(self, recipe_type: str):\n for recipe in self.recipes_list[recipe_type]:\n print(recipe)\n\n def add_recipe(self, recipe: Recipe):\n self.recipes_list[recipe.recipe_type].append(recipe)\n self.last_date = datetime.now()\n\nif __name__ == '__main__':\n book = Book(\"recipe book\")\n\n recipe = Recipe(\"food\", 5, 5, [\"ham\", \"egg\", \"spam\"], \"food\", \"lunch\")\n book.add_recipe(recipe)\n book.get_recipe_by_name(\"food\")\n book.get_recipe_by_type(\"lunch\")\n","sub_path":"module01/ex00/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"121910944","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\n\n# 판다스 DataFrame() 힘수로 데이터프레임 변환. 변수 df1, df2에 저장\ndata1 = {'name' : ['Jerry', 'Rian', 'Paul'],\n 'algol' : ['A+', 'A', 'B+'],\n 'basic' : ['C', 'B', 'B'],\n 'C++' : ['B+', 'C', 'C']}\n\ndata2 = {'c0' : [1,2,3],\n 'c1' : [4,5,6],\n 'c2' : [7,8,9],\n 'c3' : [10,11,12],\n 'c4' : [13,14,15]}\n\ndf1 = pd.DataFrame(data1)\ndf1.set_index('name',inplace=True)\nprint(df1,'\\n')\n\ndf2 = pd.DataFrame(data2)\ndf2.set_index('c0',inplace=True)\nprint(df2,'\\n')\n\n# df1을 'sheet1'으로 df2를 'sheet2'로 저장 (Excel 파일명은 \"df_excelwriter.xlsx\")\nwriter = pd.ExcelWriter(\"./df_excelwriter.xlsx\")\ndf1.to_excel(writer,sheet_name=\"sheet1\")\ndf2.to_excel(writer,sheet_name=\"sheet2\")\nwriter.save()","sub_path":"part2/2.10_excelwriter.py","file_name":"2.10_excelwriter.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"482746440","text":"import pickle\nimport sys\nimport timeit\nimport os\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport itertools\nimport pandas as pd\n\n#from sklearn.metrics import roc_auc_score, precision_score, recall_score\n\n\nclass GraphNeuralNetwork(nn.Module):\n def __init__(self):\n super(GraphNeuralNetwork, self).__init__()\n self.embed_fingerprint = nn.Embedding(n_fingerprint, dim)\n self.W_fingerprint = nn.ModuleList([nn.Linear(dim, dim)\n for _ in range(hidden_layer)])\n self.W_output = nn.ModuleList([nn.Linear(dim+2, dim+2)\n for _ in range(output_layer)])\n self.W_property = nn.Linear(dim+2, 2)\n\n def pad(self, matrices, pad_value):\n \"\"\"Pad adjacency matrices for batch processing.\"\"\"\n sizes = [d.shape[0] for d in matrices]\n D = sum(sizes)\n pad_matrices = pad_value + np.zeros((D, D))\n m = 0\n for i, d in enumerate(matrices):\n s_i = sizes[i]\n pad_matrices[m:m+s_i, m:m+s_i] = d\n m += s_i\n return torch.FloatTensor(pad_matrices).to(device)\n\n def sum_axis(self, xs, axis):\n y = list(map(lambda x: torch.sum(x, 0), torch.split(xs, axis)))\n return torch.stack(y)\n\n def mean_axis(self, xs, axis):\n y = list(map(lambda x: torch.mean(x, 0), torch.split(xs, axis)))\n return torch.stack(y)\n\n def gnn(self, xs, A, M, i):\n hs = torch.relu(self.W_fingerprint[i](xs))\n if update == 'sum':\n return xs + torch.matmul(A, hs)\n if update == 'mean':\n return xs + torch.matmul(A, hs) / (M-1)\n\n def forward(self, inputs):\n\n Smiles, fingerprints, adjacencies, docking_scores = inputs\n axis = list(map(lambda x: len(x), fingerprints))\n\n M = np.concatenate([np.repeat(len(f), len(f)) for f in fingerprints])\n M = torch.unsqueeze(torch.FloatTensor(M), 1)\n\n fingerprints = torch.cat(fingerprints)\n fingerprint_vectors = self.embed_fingerprint(fingerprints)\n adjacencies = self.pad(adjacencies, 0)\n\n for i in range(hidden_layer):\n fingerprint_vectors = self.gnn(fingerprint_vectors,\n adjacencies, M, i)\n\n if output == 'sum':\n molecular_vectors = self.sum_axis(fingerprint_vectors, axis)\n if output == 'mean':\n molecular_vectors = self.mean_axis(fingerprint_vectors, axis)\n\n \"\"\"getting docking scores and concatenate them with molecular vectors\"\"\"\n \n docking_scores = torch.from_numpy(np.asarray(docking_scores)).to(device)\n \n y_cat = torch.cat((docking_scores, molecular_vectors), 1)\n \n for j in range(output_layer):\n y_cat = torch.relu(self.W_output[j](y_cat))\n\n# print(y_cat)\n\n predicted_properties = self.W_property(y_cat)\n\n return Smiles, predicted_properties\n\n def __call__(self, data_batch):\n\n inputs = data_batch[:]\n Smiles, predicted_properties = self.forward(inputs)\n\n ys = F.softmax(predicted_properties, 1).to('cpu').data.numpy()\n predicted_labels = list(map(lambda x: np.argmax(x), ys))\n predicted_scores = list(map(lambda x: x[1], ys))\n return predicted_labels, predicted_scores\n\n\ndef load_tensor(filename, dtype, allow_pickle=True):\n return [dtype(d).to(device) for d in np.load(filename + '.npy', allow_pickle=True)]\n\n\ndef load_numpy(filename, allow_pickle=True):\n return np.load(filename + '.npy', allow_pickle=True)\n\n\ndef load_pickle(filename):\n with open(filename, 'rb') as f:\n return pickle.load(f)\n\n\ndef shuffle_dataset(dataset, seed):\n np.random.seed(seed)\n np.random.shuffle(dataset)\n return dataset\n\n\ndef split_dataset(dataset, ratio):\n n = int(ratio * len(dataset))\n dataset_1, dataset_2 = dataset[:n], dataset[n:]\n return dataset_1, dataset_2\n\n\nif __name__ == \"__main__\":\n\n \"\"\"Hyperparameters.\"\"\"\n (DATASET, radius, update, output, dim, hidden_layer, output_layer, batch,\n lr, lr_decay, decay_interval, weight_decay, iteration,\n setting) = sys.argv[1:]\n (dim, hidden_layer, output_layer, batch, decay_interval,\n iteration) = map(int, [dim, hidden_layer, output_layer, batch,\n decay_interval, iteration])\n lr, lr_decay, weight_decay = map(float, [lr, lr_decay, weight_decay])\n\n \"\"\"CPU or GPU.\"\"\"\n if torch.cuda.is_available():\n device = torch.device('cuda')\n print('The code uses GPU...')\n else:\n device = torch.device('cpu')\n print('The code uses CPU!!!')\n\n \"\"\"Load preprocessed test data.\"\"\"\n dir_input = ('synthetic_test/radius' + radius + '/')\n with open(dir_input + 'Smiles.txt') as f:\n Smiles = f.read().strip().split()\n# print(Smiles)\n Molecules = load_tensor(dir_input + 'Molecules', torch.LongTensor)\n adjacencies = load_numpy(dir_input + 'adjacencies')\n docking_scores = load_numpy(dir_input + 'docking_scores')\n\n with open(dir_input + 'fingerprint_dict.pickle', 'rb') as f:\n fingerprint_dict = pickle.load(f)\n fingerprint_dict = load_pickle(dir_input + 'fingerprint_dict.pickle')\n n_fingerprint = len(fingerprint_dict)\n\n \"\"\"Create a dataset and split it into train/dev/test.\"\"\"\n dataset = list(zip(Smiles, Molecules, adjacencies, docking_scores))\n\n\n \"\"\"Set a model.\"\"\"\n torch.manual_seed(1234)\n model = GraphNeuralNetwork()\n all_model_preds = []\n dataframe = pd.DataFrame()\n# dataframe['Smiles'] = Smiles ####removed to take all predictions only\n for filename in os.listdir('fullmodel/'):\n if filename.endswith('.txt'):\n pass\n else:\n checkpoint = torch.load('fullmodel/'+ filename)\n model.load_state_dict(checkpoint)\n model = model.to(device)\n \n print('Testing model')\n start = timeit.default_timer()\n\n N = len(dataset)\n Correct_labels, Predicted_labels, Predicted_scores = [], [], []\n batch = 150\n\n# data_batch = list(zip(*dataset[:]))\n \n predicts = []\n for i in range(0, N, batch):\n data_batch = list(zip(*dataset[i:i+batch]))\n predicts.append(model(data_batch))\n# print(predicts)\n\n predictions = []\n softmax_scores = []\n \n all150s = []\n for n in range(0,len(predicts)):\n for m in range(0,2):\n all150s.append(predicts[n][m])\n# print(\"150s=\",all150s)\n \n labels = []\n scores = []\n for l in range(0,len(all150s)):\n if l%2 == 0:\n labels.append(all150s[l])\n else:\n scores.append(all150s[l])\n# print(\"labels=\",labels)\n# print(\"scores=\",scores)\n \n \n merged_labels = list(itertools.chain(*labels))\n merged_scores = list(itertools.chain(*scores))\n \n# print(merged_labels)\n# print(len(merged_labels))\n# print(merged_scores)\n# print(len(merged_scores))\n\n results = pd.DataFrame(\n {'Smiles': Smiles,\n 'prediction_label': merged_labels,\n 'softmax_score': merged_scores,\n })\n print(results)\n results.to_csv('bootstrapping_results/' + str(filename) +'EGNN_results.csv', header=True, index=False)\n all_model_preds.append(merged_scores)\n dataframe[str(filename)] = merged_scores\n \n \n \n \n dataframe.to_csv('bootstrapping_results/' + 'all_bootstrapping_predictions.csv', header=True, index=False)\n dataframe['average'] = dataframe.mean(axis=1)\n dataframe['std'] = dataframe.std(axis=1)\n dataframe['Smiles'] = Smiles\n\n\n final=dataframe[['Smiles','average','std']]\n final.to_csv('bootstrapping_results/' + 'EGNN_Bootstrapping_average_results.csv', header=True, index=False)\n\n \n \n \n","sub_path":"run_test.py","file_name":"run_test.py","file_ext":"py","file_size_in_byte":8076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"540945872","text":"# -*- coding:utf-8 -*-\nimport tornado.web\nimport torndb\nimport os\n\n\nclass Application(tornado.web.Application):\n def __init__(self, handlers):\n\n # handlers 供子类重写\n handlers = handlers\n\n # setting文件\n settings = dict(\n template_path=os.path.join(os.path.dirname(__file__), 'templates'),\n static_path=os.path.join(os.path.dirname(__file__), 'statics'),\n static_url_prefix='/running/',\n log_path=os.path.join(os.path.dirname(__file__), 'logs/log'),\n\n # cookie_secret='JArhbLQvSYi6zQUH4JGQUwsicJsZvkdltUIFZ8ebl5Q=',\n # xsrf_cookies=True,\n login_url='/login',\n debug=True,\n autoescape=None,\n\n )\n\n super(Application, self).__init__(handlers, **settings)\n\n # 创建一个全局mysql连接实例供handler使用\n self.db = torndb.Connection(\n host='192.168.0.120',\n # database='tornado_running',\n database='shopping',\n user='root',\n password='mysql'\n )\n","sub_path":"running_wechat/app_setting.py","file_name":"app_setting.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"230571267","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 14 22:36:53 2018\n\n@author: smart\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport talib as ta\nimport tushare as ts\n\nsh = ts.get_hist_data('sh').sort_index()\n\nclose = sh[['close','volume']]\n\nma10 = ta.MA(close['close'],10)\nclose['ma10'] = ma10\n\n#close[['close','ma10']].plot()\n\n# 布林线\nupper,middle,lower = ta.BBANDS( sh['close'].values,\n timeperiod = 20,\n nbdevup = 2,\n nbdevdn = 2,\n matype = 0)\nclose.loc[:,'upper'] = upper\nclose.loc[:,'middle'] = middle\nclose.loc[:,'lower'] = lower\n# 绘制布林带\nclose[['close','middle','upper','lower']].plot(figsize=(10,5))\n\n\n","sub_path":"量化交易/简单的交易demo02.py","file_name":"简单的交易demo02.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"612086119","text":"#!/usr/bin/python\n\n##\n# @file hsdschctrl.py\n# @brief Define base class for HSDSCH contrl msg\n# @author Eric - eric.xia@nsn.com\n# @version 1.0\n# @date 2013-04-12\n\n\nimport pkt\n\n# only CA response msg parsed\nHSDSCHCTRL_TYPE_CA_TYPE1 = 0xb # type 1 capacity allocation response\nHSDSCHCTRL_TYPE_CA_TYPE2 = 0xc # type 2 capacity allocation response\n\nHSDSCHCTRL_TYPE_CR = 0xa # Capacity Request \nHSDSCHCTRL_TYPE_DL_NODE_SYNC = 0x6 # DL Node Sync Control Frame Type\nHSDSCHCTRL_TYPE_UL_NODE_SYNC = 0x7 # UL Node Sync Control Frame Type\n\nclass HSDSCHCTRL(pkt.Packet):\n \"\"\"docstring for HSDSCHCTRL\"\"\"\n __fields__ = [\n ('ctrlft', 8) # ctrl frame type\n ]\n _typesw = {}\n\n def unpack(self, buf):\n \"\"\"docstring for unpack\"\"\"\n pkt.Packet.unpack(self, buf)\n buf = buf[self.__pkt_fields_len__:]\n try:\n self.data = self._typesw[self.ctrlft](buf)\n setattr(self, self.data.__class__.__name__.lower(), self.data)\n except (KeyError, pkt.UnpackError):\n self.data = buf\n\n @classmethod\n def set_type(cls, t, pktclass):\n cls._typesw[t] = pktclass\n\n @classmethod\n def get_type(cls, t):\n return cls._typesw[t]\n\ndef __load_types():\n g = globals()\n for k, v in g.iteritems():\n if k.startswith('HSDSCHCTRL_TYPE_'):\n name = k[16:]\n modname = name.lower()\n try:\n mod = __import__(modname, g)\n except ImportError:\n continue\n HSDSCHCTRL.set_type(v, getattr(mod, name))\n\nif not HSDSCHCTRL._typesw:\n __load_types()\n\n\n","sub_path":"FpBlindParser/hsdschctrl.py","file_name":"hsdschctrl.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"330440361","text":"from datetime import timedelta\nimport itertools\nimport tempfile\n\nimport pytest\n\nsa = pytest.importorskip('sqlalchemy')\npytest.importorskip('psycopg2')\n\nimport numpy as np\nimport pandas as pd\n\nimport pandas.util.testing as tm\n\nfrom odo import odo, resource, drop, discover\nfrom odo.utils import tmpfile\nfrom blaze import symbol, compute, concat, join, sin, cos, radians, atan2\nfrom blaze import sqrt, transform, Data\nfrom blaze.utils import example, normalize\n\n\nnames = ('tbl%d' % i for i in itertools.count())\n\n\n@pytest.fixture\ndef url():\n return 'postgresql://postgres@localhost/test::%s'\n\n\n@pytest.yield_fixture\ndef sql(url):\n try:\n t = resource(url % next(names), dshape='var * {A: string, B: int64}')\n except sa.exc.OperationalError as e:\n pytest.skip(str(e))\n else:\n t = odo([('a', 1), ('b', 2)], t)\n try:\n yield t\n finally:\n drop(t)\n\n\n@pytest.yield_fixture(scope='module')\ndef nyc():\n with open(example('nyc.csv'), 'rb') as f:\n raw = f.read()\n with tmpfile('.csv') as name:\n with open(name, 'wb') as g:\n g.write(raw)\n try:\n t = odo(name, 'postgresql://postgres@localhost/test::nyc')\n except sa.exc.OperationalError as e:\n pytest.skip(str(e))\n else:\n try:\n yield t\n finally:\n drop(t)\n\n\n@pytest.yield_fixture\ndef sqla(url):\n try:\n t = resource(url % next(names), dshape='var * {A: ?string, B: ?int32}')\n except sa.exc.OperationalError as e:\n pytest.skip(str(e))\n else:\n t = odo([('a', 1), (None, 1), ('c', None)], t)\n try:\n yield t\n finally:\n drop(t)\n\n\n@pytest.yield_fixture\ndef sqlb(url):\n try:\n t = resource(url % next(names), dshape='var * {A: string, B: int64}')\n except sa.exc.OperationalError as e:\n pytest.skip(str(e))\n else:\n t = odo([('a', 1), ('b', 2)], t)\n try:\n yield t\n finally:\n drop(t)\n\n\n@pytest.yield_fixture\ndef sql_with_dts(url):\n try:\n t = resource(url % next(names), dshape='var * {A: datetime}')\n except sa.exc.OperationalError as e:\n pytest.skip(str(e))\n else:\n t = odo([(d,) for d in pd.date_range('2014-01-01', '2014-02-01')], t)\n try:\n yield t\n finally:\n drop(t)\n\n\n@pytest.yield_fixture\ndef sql_two_tables(url):\n dshape = 'var * {a: int32}'\n try:\n t = resource(url % next(names), dshape=dshape)\n u = resource(url % next(names), dshape=dshape)\n except sa.exc.OperationalError as e:\n pytest.skip(str(e))\n else:\n try:\n yield u, t\n finally:\n drop(t)\n drop(u)\n\n\n@pytest.yield_fixture\ndef sql_with_float(url):\n try:\n t = resource(url % next(names), dshape='var * {c: float64}')\n except sa.exc.OperationalError as e:\n pytest.skip(str(e))\n else:\n try:\n yield t\n finally:\n drop(t)\n\n\ndef test_postgres_create(sql):\n assert odo(sql, list) == [('a', 1), ('b', 2)]\n\n\ndef test_postgres_isnan(sql_with_float):\n data = (1.0,), (float('nan'),)\n table = odo(data, sql_with_float)\n sym = symbol('s', discover(data))\n assert odo(compute(sym.isnan(), table), list) == [(False,), (True,)]\n\n\ndef test_insert_from_subselect(sql_with_float):\n data = pd.DataFrame([{'c': 2.0}, {'c': 1.0}])\n tbl = odo(data, sql_with_float)\n s = symbol('s', discover(data))\n odo(compute(s[s.c.isin((1.0, 2.0))].sort(), tbl), sql_with_float),\n tm.assert_frame_equal(\n odo(sql_with_float, pd.DataFrame).iloc[2:].reset_index(drop=True),\n pd.DataFrame([{'c': 1.0}, {'c': 2.0}]),\n )\n\n\ndef test_concat(sql_two_tables):\n t_table, u_table = sql_two_tables\n t_data = pd.DataFrame(np.arange(5), columns=['a'])\n u_data = pd.DataFrame(np.arange(5, 10), columns=['a'])\n odo(t_data, t_table)\n odo(u_data, u_table)\n\n t = symbol('t', discover(t_data))\n u = symbol('u', discover(u_data))\n tm.assert_frame_equal(\n odo(\n compute(concat(t, u).sort('a'), {t: t_table, u: u_table}),\n pd.DataFrame,\n ),\n pd.DataFrame(np.arange(10), columns=['a']),\n )\n\n\ndef test_concat_invalid_axis(sql_two_tables):\n t_table, u_table = sql_two_tables\n t_data = pd.DataFrame(np.arange(5), columns=['a'])\n u_data = pd.DataFrame(np.arange(5, 10), columns=['a'])\n odo(t_data, t_table)\n odo(u_data, u_table)\n\n # We need to force the shape to not be a record here so we can\n # create the `Concat` node with an axis=1.\n t = symbol('t', '5 * 1 * int32')\n u = symbol('u', '5 * 1 * int32')\n\n with pytest.raises(ValueError) as e:\n compute(concat(t, u, axis=1), {t: t_table, u: u_table})\n\n # Preserve the suggestion to use merge.\n assert \"'merge'\" in str(e.value)\n\n\ndef test_timedelta_arith(sql_with_dts):\n delta = timedelta(days=1)\n dates = pd.Series(pd.date_range('2014-01-01', '2014-02-01'))\n sym = symbol('s', discover(dates))\n assert (\n odo(compute(sym + delta, sql_with_dts), pd.Series) == dates + delta\n ).all()\n assert (\n odo(compute(sym - delta, sql_with_dts), pd.Series) == dates - delta\n ).all()\n\n\ndef test_coerce_bool_and_sum(sql):\n n = sql.name\n t = symbol(n, discover(sql))\n expr = (t.B > 1.0).coerce(to='int32').sum()\n result = compute(expr, sql).scalar()\n expected = odo(compute(t.B, sql), pd.Series).gt(1).sum()\n assert result == expected\n\n\ndef test_distinct_on(sql):\n t = symbol('t', discover(sql))\n computation = compute(t[['A', 'B']].sort('A').distinct('A'), sql)\n assert normalize(str(computation)) == normalize(\"\"\"\n SELECT DISTINCT ON (anon_1.\"A\") anon_1.\"A\", anon_1.\"B\"\n FROM (SELECT {tbl}.\"A\" AS \"A\", {tbl}.\"B\" AS \"B\"\n FROM {tbl}) AS anon_1 ORDER BY anon_1.\"A\" ASC\n \"\"\".format(tbl=sql.name))\n assert odo(computation, tuple) == (('a', 1), ('b', 2))\n\n\ndef test_join_type_promotion(sqla, sqlb):\n t, s = symbol(sqla.name, discover(sqla)), symbol(sqlb.name, discover(sqlb))\n expr = join(t, s, 'B', how='inner')\n result = set(map(tuple, compute(expr, {t: sqla, s: sqlb}).execute().fetchall()))\n expected = set([(1, 'a', 'a'), (1, None, 'a')])\n assert result == expected\n\n\n@pytest.mark.parametrize(['n', 'column'],\n [(1, 'A'), (-1, 'A'),\n (1, 'B'), (-1, 'B'),\n (0, 'A'), (0, 'B')])\ndef test_shift_on_column(n, column, sql):\n t = symbol('t', discover(sql))\n expr = t[column].shift(n)\n result = odo(compute(expr, sql), pd.Series)\n expected = odo(sql, pd.DataFrame)[column].shift(n)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize('n', [-1, 0, 1])\ndef test_shift_arithmetic(sql, n):\n t = symbol('t', discover(sql))\n expr = t.B - t.B.shift(n)\n result = odo(compute(expr, sql), pd.Series)\n df = odo(sql, pd.DataFrame)\n expected = df.B - df.B.shift(n)\n tm.assert_series_equal(result, expected)\n\n\ndef test_dist(nyc):\n def distance(lat1, lon1, lat2, lon2, R=3959):\n # http://andrew.hedges.name/experiments/haversine/\n dlon = radians(lon2 - lon1)\n dlat = radians(lat2 - lat1)\n a = sin(dlat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.0) ** 2\n return R * 2 * atan2(sqrt(a), sqrt(1 - a))\n\n t = symbol('t', discover(nyc))\n\n filtered = t[\n (t.pickup_latitude >= 40.477399) &\n (t.pickup_latitude <= 40.917577) &\n (t.dropoff_latitude >= 40.477399) &\n (t.dropoff_latitude <= 40.917577) &\n (t.pickup_longitude >= -74.259090) &\n (t.pickup_longitude <= -73.700272) &\n (t.dropoff_longitude >= -74.259090) &\n (t.dropoff_longitude <= -73.700272) &\n (t.passenger_count < 6)\n ]\n dist = distance(filtered.pickup_latitude, filtered.pickup_longitude,\n filtered.dropoff_latitude, filtered.dropoff_longitude)\n transformed = transform(filtered, dist=dist)\n assert (\n odo(compute(transformed.dist.max(), nyc), float) ==\n odo(compute(transformed.dist, nyc), pd.Series).max().item()\n )\n\n\ndef test_multiple_columns_in_transform(nyc):\n t = symbol('t', discover(nyc))\n t = t[\n (t.pickup_latitude >= 40.477399) &\n (t.pickup_latitude <= 40.917577) &\n (t.dropoff_latitude >= 40.477399) &\n (t.dropoff_latitude <= 40.917577) &\n (t.pickup_longitude >= -74.259090) &\n (t.pickup_longitude <= -73.700272) &\n (t.dropoff_longitude >= -74.259090) &\n (t.dropoff_longitude <= -73.700272) &\n (t.passenger_count < 6)\n ]\n hours = t.trip_time_in_secs.coerce('float64') / 3600.0\n avg_speed_in_mph = t.trip_distance / hours\n d = transform(t, avg_speed_in_mph=avg_speed_in_mph, mycol=avg_speed_in_mph + 1)\n df = odo(compute(d[d.avg_speed_in_mph <= 200], nyc), pd.DataFrame)\n assert not df.empty\n\n\ndef test_coerce_on_select(nyc):\n t = symbol('t', discover(nyc))\n t = t[\n (t.pickup_latitude >= 40.477399) &\n (t.pickup_latitude <= 40.917577) &\n (t.dropoff_latitude >= 40.477399) &\n (t.dropoff_latitude <= 40.917577) &\n (t.pickup_longitude >= -74.259090) &\n (t.pickup_longitude <= -73.700272) &\n (t.dropoff_longitude >= -74.259090) &\n (t.dropoff_longitude <= -73.700272) &\n (t.passenger_count < 6)\n ]\n t = transform(t, pass_count=t.passenger_count + 1)\n result = compute(t.pass_count.coerce('float64'), nyc)\n s = odo(result, pd.Series)\n expected = odo(compute(t, nyc),\n pd.DataFrame).passenger_count.astype('float64') + 1.0\n assert list(s) == list(expected)\n\n\ndef test_interactive_len(sql):\n t = Data(sql)\n assert len(t) == int(t.count())\n","sub_path":"blaze/compute/tests/test_postgresql_compute.py","file_name":"test_postgresql_compute.py","file_ext":"py","file_size_in_byte":9848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"153209159","text":"import pygame, os\nfrom os import path\nfrom pygame.sprite import Sprite\n\nBLACK = (0, 0, 0)\nbum_img = ['images/bum01.png','images/bum02.png','images/bum03.png']\n\n# Hiệu ứng khi va chạm\nclass Explosion(Sprite):\n\tdef __init__(self, center, screen, quantity):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.screen = screen\n\t\tself.image = pygame.image.load(bum_img[quantity])\n\t\tself.rect = self.image.get_rect()\n\t\t#self.screen_rect = screen.get_rect()\n\t\tself.rect.center = center\n\n\tdef blitme(self):\n\t\tself.screen.blit(self.image, self.rect)","sub_path":"_my/explosions.py","file_name":"explosions.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"586005183","text":"\"\"\"\nA database for rules.\n\"\"\"\nimport abc\nfrom collections import defaultdict\nfrom typing import Dict, Iterable, Iterator, List, MutableMapping, Optional, Set, Tuple\n\nfrom logzero import logger\n\nfrom comb_spec_searcher.equiv_db import EquivalenceDB\nfrom comb_spec_searcher.exception import SpecificationNotFound\nfrom comb_spec_searcher.strategies import AbstractStrategy, VerificationRule\nfrom comb_spec_searcher.strategies.rule import AbstractRule\nfrom comb_spec_searcher.tree_searcher import (\n Node,\n iterative_proof_tree_finder,\n iterative_prune,\n proof_tree_generator_dfs,\n prune,\n random_proof_tree,\n smallish_random_proof_tree,\n)\n\n__all__ = [\"RuleDBBase\", \"RuleDB\"]\n\nSpecification = Tuple[List[Tuple[int, AbstractStrategy]], List[List[int]]]\nRuleKey = Tuple[int, Tuple[int, ...]]\n\n\nclass RuleDBBase(abc.ABC):\n \"\"\"A database for rules found.\"\"\"\n\n def __init__(self) -> None:\n self.equivdb = EquivalenceDB()\n\n @property\n @abc.abstractmethod\n def rule_to_strategy(self,) -> MutableMapping[RuleKey, AbstractStrategy]:\n pass\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Check if all stored information is the same.\"\"\"\n if not isinstance(other, self.__class__):\n return NotImplemented\n return bool(self.rule_to_strategy == other.rule_to_strategy)\n\n def add(self, start: int, ends: Iterable[int], rule: AbstractRule) -> None:\n \"\"\"\n Add a rule to the database.\n\n - start is a single integer.\n - ends is a tuple of integers, representing the non-empty children.\n - rule is a Rule that creates start -> ends.\n \"\"\"\n ends = tuple(sorted(ends))\n if isinstance(rule, VerificationRule):\n self.set_verified(start)\n is_equiv = len(ends) == 1 and rule.strategy.can_be_equivalent()\n if is_equiv:\n self.set_equivalent(start, ends[0])\n if len(ends) != 1 or is_equiv or not self.are_equivalent(start, ends[0]):\n # to avoid overwriting an equivalence rule with a non-equivalence\n # rule, we only save if an equivalence rule, or does not have the\n # same start -> ends as some equivalence rule.\n self.rule_to_strategy[(start, ends)] = rule.strategy\n\n def is_verified(self, label: int) -> bool:\n \"\"\"Return True if label has been verified.\"\"\"\n return bool(self.equivdb.is_verified(label))\n\n def set_verified(self, label: int) -> None:\n \"\"\"Mark label as verified.\"\"\"\n self.equivdb.set_verified(label)\n\n def are_equivalent(self, label: int, other: int) -> bool:\n \"\"\"Return true if label and other are equivalent.\"\"\"\n return bool(self.equivdb.equivalent(label, other))\n\n def set_equivalent(self, label: int, other: int) -> None:\n \"\"\"Mark label and other as equivalent.\"\"\"\n self.equivdb.union(label, other)\n\n def rules_up_to_equivalence(self) -> Dict[int, Set[Tuple[int, ...]]]:\n \"\"\"Return a defaultdict containing all rules up to the equivalence.\"\"\"\n rules_dict: Dict[int, Set[Tuple[int, ...]]] = defaultdict(set)\n for start, ends in self:\n rules_dict[self.equivdb[start]].add(\n tuple(sorted(self.equivdb[e] for e in ends))\n )\n return rules_dict\n\n def all_rules(self) -> Iterator[Tuple[int, Tuple[int, ...], AbstractStrategy]]:\n \"\"\"Yield all the rules found so far.\"\"\"\n for start, ends in self:\n yield start, ends, self.rule_to_strategy[(start, ends)]\n\n def __iter__(self) -> Iterator[Tuple[int, Tuple[int, ...]]]:\n \"\"\"Iterate through rules as the pairs (start, end).\"\"\"\n for start, ends in self.rule_to_strategy:\n if len(ends) != 1 or not self.are_equivalent(start, ends[0]):\n yield start, ends\n\n def contains(self, start: int, ends: Tuple[int, ...]) -> bool:\n \"\"\"Return true if the rule start -> ends is in the database.\"\"\"\n ends = tuple(sorted(ends))\n return (start, ends) in self.rule_to_strategy\n\n def status(self) -> str:\n \"\"\"Return a string describing the status of the rule database.\"\"\"\n status = \"RuleDB status:\\n\"\n status += \"\\tTotal number of combinatorial rules is {}\".format(\n len(self.rule_to_strategy)\n )\n # TODO: strategy verified, verified, equivalence sets?\n return status\n\n ################################################################\n # Below are methods for finding a combinatorial specification. #\n ################################################################\n\n def has_specification(self, label: int) -> bool:\n \"\"\"Return True if a specification has been found, false otherwise.\"\"\"\n return self.is_verified(label)\n\n def rule_from_equivalence_rule(\n self, eqv_start: int, eqv_ends: Iterable[int]\n ) -> Optional[Tuple[int, Tuple[int, ...]]]:\n \"\"\"\n Return a rule that satisfies the equivalence rule.\n\n Returns None if no such rule exists.\n \"\"\"\n eqv_start = self.equivdb[eqv_start]\n eqv_ends = tuple(sorted(map(self.equivdb.__getitem__, eqv_ends)))\n for rule in self:\n start, ends = rule\n temp_start = self.equivdb[start]\n temp_ends = tuple(sorted(map(self.equivdb.__getitem__, ends)))\n if eqv_start == temp_start and eqv_ends == temp_ends:\n return start, ends\n return None\n\n def find_specification(\n self, label: int, minimization_time_limit: float, iterative: bool = False\n ) -> Node:\n \"\"\"Search for a specification based on current data found.\"\"\"\n rules_dict = self.rules_up_to_equivalence()\n # Prune all unverified labels (recursively)\n if iterative:\n rules_dict = iterative_prune(rules_dict, root=label)\n else:\n prune(rules_dict) # this function removes rules not in a specification.\n\n # only verified labels in rules_dict, in particular, there is a\n # specification if a label is in the rules_dict\n for ver_label in rules_dict.keys():\n self.set_verified(ver_label)\n\n if self.equivdb[label] in rules_dict:\n if iterative:\n specification = iterative_proof_tree_finder(\n rules_dict, root=self.equivdb[label]\n )\n else:\n specification = smallish_random_proof_tree(\n rules_dict, self.equivdb[label], minimization_time_limit\n )\n else:\n raise SpecificationNotFound(\"No specification for label {}\".format(label))\n return specification\n\n def get_specification_rules(\n self, label: int, minimization_time_limit: float, iterative: bool = False\n ) -> Specification:\n \"\"\"\n Return a list of pairs (label, rule) which form a specification.\n The specification returned is random, so two calls to the function\n may result in a a different output.\n \"\"\"\n proof_tree_node = self.find_specification(\n label=label,\n iterative=iterative,\n minimization_time_limit=minimization_time_limit,\n )\n return self._get_specification_rules(label, proof_tree_node)\n\n def _get_specification_rules(\n self, label: int, proof_tree_node: Node\n ) -> Specification:\n children: Dict[int, Tuple[int, ...]] = dict()\n internal_nodes = set([label])\n for node in proof_tree_node.nodes():\n eqv_start, eqv_ends = (\n node.label,\n tuple(child.label for child in node.children),\n )\n rule = self.rule_from_equivalence_rule(eqv_start, eqv_ends)\n if rule is not None:\n start, ends = rule\n children[start] = ends\n internal_nodes.update(ends)\n res = []\n eqv_paths = []\n for start, ends in children.items():\n for eqv_label in internal_nodes:\n if self.are_equivalent(start, eqv_label):\n path = self.equivdb.find_path(eqv_label, start)\n for a, b in zip(path[:-1], path[1:]):\n try:\n strategy = self.rule_to_strategy[(a, (b,))]\n res.append((a, strategy))\n except KeyError:\n strategy = self.rule_to_strategy[(b, (a,))]\n res.append((b, strategy))\n if len(path) > 1:\n eqv_paths.append(path)\n strategy = self.rule_to_strategy[(start, ends)]\n res.append((start, strategy))\n return res, eqv_paths\n\n def all_specifications(\n self, label: int, iterative: bool = False\n ) -> Iterator[Specification]:\n \"\"\"\n A generator that yields all specifications in the universe for\n the given label.\n \"\"\"\n if iterative:\n raise NotImplementedError(\n \"There is no method for yielding all iterative proof trees.\"\n )\n rules_dict = self.rules_up_to_equivalence()\n # Prune all unverified labels (recursively)\n prune(rules_dict)\n\n if self.equivdb[label] in rules_dict:\n proof_trees = proof_tree_generator_dfs(rules_dict, root=self.equivdb[label])\n for proof_tree_node in proof_trees:\n yield self._get_specification_rules(label, proof_tree_node)\n\n def get_smallest_specification(\n self, label: int, iterative: bool = False\n ) -> Specification:\n \"\"\"\n Return the smallest specification in the universe for label. It uses\n exponential search to find it.\n This doesn't consider the length of the equivalence paths.\n \"\"\"\n if iterative:\n raise NotImplementedError(\n \"There is no method for finding smallest iterative proof trees.\"\n )\n rules_dict = self.rules_up_to_equivalence()\n prune(rules_dict)\n\n if not self.equivdb[label] in rules_dict:\n raise SpecificationNotFound(\"No specification for label {}\".format(label))\n tree = random_proof_tree(rules_dict, root=self.equivdb[label])\n minimum = 1\n maximum = len(tree)\n logger.info(\n \"Found a specification of size %s. Looking for the smallest.\", len(tree)\n )\n # Binary search to find a smallest proof tree.\n while minimum < maximum:\n middle = (minimum + maximum) // 2\n logger.info(\"Looking for specification of size %s\", middle)\n try:\n tree = next(\n proof_tree_generator_dfs(\n rules_dict, root=self.equivdb[label], maximum=middle\n )\n )\n maximum = min(middle, len(tree))\n except StopIteration:\n minimum = middle + 1\n logger.info(\"The smallest specification is of size %s.\", len(tree))\n return self._get_specification_rules(label, tree)\n\n\nclass RuleDB(RuleDBBase):\n def __init__(self) -> None:\n super().__init__()\n self._rule_to_strategy: Dict[Tuple[int, Tuple[int, ...]], AbstractStrategy] = {}\n\n @property\n def rule_to_strategy(self) -> Dict[Tuple[int, Tuple[int, ...]], AbstractStrategy]:\n return self._rule_to_strategy\n","sub_path":"comb_spec_searcher/rule_db/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":11406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"428889068","text":"from bot.conversation_thread import ConversationThread\nfrom bot.corpus import Corpus\n\nclass GreetingThread(ConversationThread):\n def __init__(self, conversation):\n super().__init__(\"greetings\", conversation)\n\n def compute_current_step(self):\n c = 0\n if self.completed(0):\n c = 1\n if self.completed(1):\n c = 2\n return c\n\n def accept_message(self, analysis):\n intent = analysis.get('intent')\n slug = intent.get('slug') if intent is not None else None\n if slug == \"greetings\":\n return True\n if len(self.completed_steps) == 0 and slug in [None, \"greetings\", \"naming\"]:\n return True\n if self.current_step == 1:\n return True\n return False\n\n def process_message(self, analysis):\n super().process_message(analysis)\n intent = analysis.get('intent')\n slug = intent.get('slug') if intent is not None else None\n if not self.completed(0):\n name = \"\"\n entities = analysis.get('entities')\n person = entities.get(\"person\")\n pronoun = entities.get(\"pronoun\")\n if person is not None and pronoun is not None:\n person = person.pop(0)\n pronoun = pronoun.pop(0)\n if pronoun.get('person') == 1:\n name = person.get('fullname')\n self.conversation.save_data('client_name', name)\n\n self.reply_text(Corpus.get('root_greetings01').format(name))\n self.reply_text(Corpus.get('root_greetings02'))\n self.complete_step(0)\n return True\n\n if not self.completed(1):\n if slug not in [\"yes\", \"no\"]:\n self.reply_text(Corpus.get('common_yesno'))\n self.reply_text(Corpus.get('root_greetings02'))\n return True\n\n self.push_new_threads([\"thread1\", \"thread2\"])\n if slug == \"yes\":\n self.reply_text(Corpus.get('root_greetings03'))\n if slug == \"no\":\n self.reply_text(Corpus.get('root_greetings03_2'))\n self.push_new_threads(['informations'])\n\n self.reply_text(Corpus.get('root_greetings04'))\n self.reply_text(Corpus.get('root_start01'))\n self.complete_step(1)\n self.complete_thread()\n # Push new threads\n return None # Process immediately the next topic\n return True\n","sub_path":"bot/threads/greeting_thread.py","file_name":"greeting_thread.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"147493194","text":"#!python\nfrom math import sin, cos, sqrt, log, exp, isnan, isinf\nimport pandas\nimport csv\nimport random\nimport time\nfrom grammar import Grammar\nfrom genetic import Cromossome, fight\n\n# Open .csv file and import it in a dictionary structure\n#data = pandas.read_csv(\"csv-files/reduced.csv\")\n#data = pandas.read_csv(\"csv-files/training.csv\")\n\nwith open('csv-files/training.csv', 'r') as f:\n data = list(csv.reader(f, delimiter=\",\"))\ndata.pop(0)\n\nfor element in data:\n if element == []:\n data.remove(element)\n\n# Variable names (hardcoded)\nID = 0\nCement = 1\nBlasr = 2\nFlyAsh = 3\nWater = 4\nSuperplasticizer = 5\nCoarseAggregate = 6\nFineAggregate = 7\nAge = 8\nstrenght = 9\n\n\n# Genetic parameters\nC_SIZE = 30\nPOPULATION = 1000 # Must be an even number\nC_RANGE = 20\nMIX_PROB = 0.8\nMUT_PROB = 0.1\nNUM_GENERATIONS = 30\n\n# Initialize grammar\ngrammar = Grammar()\ngrammar.setVariables(['ID', 'Cement', 'Blasr', 'FlyAsh', 'Water', 'Superplasticizer', 'CoarseAggregate', 'FineAggregate', 'Age'])\n\n# Initial cromossomes\ncromossomes = []\nfor n in range(POPULATION):\n C = Cromossome()\n C.setRandomList(C_SIZE, C_RANGE)\n cromossomes.append(C)\n\nfor generation in range(NUM_GENERATIONS):\n message = \"\\n---- Generation: \" + str(generation) + \" ----\"\n print(message)\n\n # Convert cromossomes to expressions\n start = time.time()\n\n for C in cromossomes:\n if not C.isEvaluated():\n C.setExpression(grammar.cromossomeToExpression(C))\n\n # Evaluate cromossomes scores\n\n # Evaluate expression\n for C in cromossomes:\n mathErrorFlag = False\n avgScore = 0\n count = 0\n v = {'ID': 0, 'Cement': 0, 'Blasr': 0, 'FlyAsh': 0, 'Water': 0, 'Superplasticizer': 0, 'CoarseAggregate': 0, 'FineAggregate': 0, 'Age': 0}\n compiled = compile(C.getExpression(), '', 'eval')\n\n if not C.isErroneous():\n for row in data:\n v['ID'] = float(data[count][ID])\n v['Cement'] = float(data[count][Cement])\n v['Blasr'] = float(data[count][Blasr])\n v['FlyAsh'] = float(data[count][FlyAsh])\n v['Water'] = float(data[count][Water])\n v['Superplasticizer'] = float(data[count][Superplasticizer])\n v['CoarseAggregate'] = float(data[count][CoarseAggregate])\n v['FineAggregate'] = float(data[count][FineAggregate])\n v['Age'] = float(data[count][Age])\n\n try:\n calculated = eval(compiled)\n except:\n mathErrorFlag = True\n calculated = 0\n score = (float(data[count][strenght]) - calculated) ** 2\n \n avgScore = avgScore + score\n count = count + 1\n \n avgScore = avgScore / count\n if isnan(avgScore) or mathErrorFlag:\n avgScore = 999999999\n C.setScore(avgScore)\n\n min_score = cromossomes[0].getScore()\n min_index = 0\n index = 0\n for C in cromossomes:\n if C.getScore() < min_score:\n min_index = index\n min_score = C.getScore()\n index = index + 1\n\n print(cromossomes[min_index].getExpression())\n print(\"Score: \" + str(cromossomes[min_index].getScore()))\n\n\n\n # Tournament\n winners = []\n for n in range(POPULATION):\n randomC1 = random.randint(0, POPULATION - 1)\n randomC2 = random.randint(0, POPULATION - 1)\n\n winner = fight(cromossomes[randomC1], cromossomes[randomC2])\n winners.append(winner)\n\n # Genetic combination\n i = 0\n newGeneration = []\n while i < POPULATION:\n randomNumber = random.randint(0, 100)\n randomNumber = randomNumber / 100.0\n if randomNumber < MIX_PROB:\n C1 = cromossomes[i].getList()\n C2 = cromossomes[i+1].getList()\n newC1 = []\n newC2 = []\n randomPosition = random.randint(0, C_SIZE - 1)\n for position in range(C_SIZE):\n if position < randomPosition:\n newC1.append(C1[position])\n newC2.append(C2[position])\n else:\n newC1.append(C2[position])\n newC2.append(C1[position])\n \n newCromossome1 = Cromossome()\n newCromossome1.setCromossomeList(newC1)\n newCromossome2 = Cromossome()\n newCromossome2.setCromossomeList(newC2)\n newGeneration.append(newCromossome1)\n newGeneration.append(newCromossome2)\n else:\n newGeneration.append(cromossomes[i])\n newGeneration.append(cromossomes[i+1])\n \n i = i + 2\n\n # Mutation\n for element in newGeneration:\n randomNumber = random.randint(0, 100)\n randomNumber = randomNumber / 100.0\n if randomNumber < MUT_PROB:\n randomPosition = random.randint(0, C_SIZE - 1)\n newValue = random.randint(0, C_RANGE - 1)\n element.changeCromossomeAtPosition(randomPosition, newValue)\n\n cromossomes = newGeneration\n\n\n# Last generation selection\n\nfor C in cromossomes:\n C.setExpression(grammar.cromossomeToExpression(C))\n\n# Evaluate cromossomes scores\nfor C in cromossomes:\n mathErrorFlag = False\n avgScore = 0\n count = 0\n\n for row in data:\n v['ID'] = float(data[count][ID])\n v['Cement'] = float(data[count][Cement])\n v['Blasr'] = float(data[count][Blasr])\n v['FlyAsh'] = float(data[count][FlyAsh])\n v['Water'] = float(data[count][Water])\n v['Superplasticizer'] = float(data[count][Superplasticizer])\n v['CoarseAggregate'] = float(data[count][CoarseAggregate])\n v['FineAggregate'] = float(data[count][FineAggregate])\n v['Age'] = float(data[count][Age])\n\n try:\n calculated = eval(C.getExpression())\n except:\n mathErrorFlag = True\n calculated = 0\n score = (float(data[count][strenght]) - calculated) ** 2\n \n avgScore = avgScore + score\n count = count + 1\n \n avgScore = avgScore / count\n if isnan(avgScore) or mathErrorFlag:\n avgScore = 999999999\n C.setScore(avgScore)\n\nmin_score = cromossomes[0].getScore()\nmin_index = 0\nindex = 0\nfor C in cromossomes:\n if C.getScore() < min_score:\n min_index = index\n min_score = C.getScore()\n index = index + 1\n\nprint(\"\\n---- LAST GEN ----\")\nprint(cromossomes[min_index].getExpression())\nprint(\"Score: \" + str(cromossomes[min_index].getScore()))","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"243138930","text":"#Ejercicio 2.1. Aplicando las reglas matemáticas de asociatividad, decidir cuáles de las siguientes\n#expresiones son iguales entre sí:\nimport FuncionesNumeros02\n\nNumero10 = 10.0\nNumero100 = 100.0\nNumero1000 = 1000.0\n\nresultado1 = FuncionesNumeros02.CalcularReto0201PuntoA(Numero10, Numero100, Numero1000)\nresultado2 = FuncionesNumeros02.CalcularReto0201PuntoB(Numero10, Numero100, Numero1000)\nresultado3 = FuncionesNumeros02.CalcularReto0201PuntoC(Numero10, Numero100, Numero1000)\nresultado4 = FuncionesNumeros02.CalcularReto0201PuntoD(Numero10, Numero100, Numero1000)\nresultado5 = FuncionesNumeros02.CalcularReto0201PuntoE(Numero100)\nresultado6 = FuncionesNumeros02.CalcularReto0201PuntoF(Numero100)\n\nprint (\"Resultado1=> \", resultado1)\nprint (\"Resultado2=> \", resultado2)\nprint (\"Resultado3=> \", resultado3)\nprint (\"Resultado4=> \", resultado4)\nprint (\"Resultado5=> \", resultado5)\nprint (\"Resultado6=> \", resultado6)","sub_path":"capitulo02/Reto0203.py","file_name":"Reto0203.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"422774944","text":"#===============================================================================\n# Copyright (c) 2013 by Qualcomm Technologies, Inc. All Rights Reserved.\n# QUALCOMM Proprietary/GTDR\n#===============================================================================\nimport os\nimport fnmatch\nimport sys\nimport re\nimport qurtutils\nimport SCons\nImport('qurtenv')\n\nenv = qurtenv.Clone()\nenv.Replace(QURT_TOP = os.getcwd())\nqurtenv['QURT_BUILDENV'] = env\n\n#setup target\nenv.Tool('target_tools',toolpath=['.'])\nif env.subst('$TRGT') is None:\n print(\"error TRGT not defined\")\n Exit(1)\n\n#setup builders\nenv.Tool('qurt_builders',toolpath=['.'])\n\n#Initialize utilities\nqurtutils.Init(env)\n\n#Set Hacks\n#Open for better solutions\nenv.SetHacks()\n\nif env.subst('$USE_LLVM') == 'True':\n env['USE_LLVM_OPT'] = '--use_llvm'\n env.Replace(HEXAGON_WARN = env.subst('$HEXAGON_WARN') + \" -Wno-error=unused-variable -Wno-error=unused-parameter \"+\n \"-Wno-error=missing-declarations -Wno-error=cast-align -Wno-error=sizeof-pointer-memaccess -Wno-error=enum-conversion \"+\n \"-Wno-error=tautological-compare\")\nelif env.subst('$Q6VERSION') == 'v55':\n env.Replace(HEXAGON_WARN = env.subst('$HEXAGON_WARN') + \" -Wno-error=unused-but-set-variable -Wno-error=unused-but-set-parameter -Wno-error=unused-variable\")\n\n#Set up warning flags differently depending on value of EXTRA_WARNINGS\nif env['EXTRA_WARNINGS']:\n ''' No -Werror here; when we enable extra warnings, we consider them advisory only '''\n env.Replace(HEXAGON_WARN = env.subst('$HEXAGON_WARN -Wmissing-declarations -Wstrict-prototypes -Wredundant-decls -Wnested-externs').replace('-Werror',''))\n\n#CPPFLAGS hack for Assembly\nenv['ASFLAGS'] = env['CCFLAGS']\n\n#Creating installation directories\nenv['INSTALL_LIB'] = env.subst('$INSTALL_DIR') + '/lib'\nenv['INSTALL_DBG'] = env.subst('$INSTALL_DIR') + '/debugger'\nenv['INSTALL_SCRIPTS'] = env.subst('$INSTALL_DIR') + '/scripts'\nenv['INSTALL_INCLUDE'] = env.subst('$INSTALL_DIR') + '/include'\n\nclass QurtTargets_class:\n def __init__(self, env):\n self._env = env\n self._targetlist = []\n def add(self, tgt):\n self._targetlist += self._env.Flatten([tgt])\n def getlist(self):\n return self._targetlist\n\n# Subsidiary SConscripts can add their own targets into the top level build\n# by using env.QurtTargets.add(targetlist...)\n# For instance, the qurtos SConscript uses this to request that its global\n# symbol analysis report be built.\n\nenv.QurtTargets = QurtTargets_class(env)\nqlibs = []\n#Installing Scripts\nScripts = env.RecursiveInstall(env.subst('$INSTALL_SCRIPTS'), env.subst('$QURT_TOP')+\"/scripts\")\nScripts.extend(env.Install('${INSTALL_SCRIPTS}', \"target_tools.py\"))\nScripts.extend(env.Install('${INSTALL_SCRIPTS}', \"qurt_builders.py\"))\nenv.QurtTargets.add(Scripts)\n\n#libqurtcfs.a\nlib_c_fs_objs = SConscript('libs/c/fs/SConscript', exports='env', variant_dir=env.subst('$BUILD_DIR') + '/libs/c/fs', duplicate=0)\nlibqurtcfs = env.Library (\"$INSTALL_LIB/libqurtcfs.a\", [lib_c_fs_objs])\nqlibs.append(libqurtcfs)\nenv.QurtTargets.add(libqurtcfs)\n\n#libposix.a\nlib_posix_objs = SConscript('libs/posix/SConscript', exports='env', variant_dir=env.subst('$BUILD_DIR')+'/libs/posix', duplicate=0)\nlibposix = env.Library (\"$INSTALL_LIB/libposix.a\", [lib_posix_objs])\nqlibs.append(libposix)\nenv.QurtTargets.add(libposix)\n\n#libqube_compat.a\nlib_qube_objs = SConscript('libs/qube/SConscript', exports='env', variant_dir=env.subst('$BUILD_DIR')+'/libs/qube', duplicate=0)\nlibqube_compat = env.Library (\"$INSTALL_LIB/libqube_compat.a\", [lib_qube_objs])\nqlibs.append(libqube_compat)\nenv.QurtTargets.add(libqube_compat)\n\n#libqurt.a\nlib_c_sys_objs = SConscript('libs/c/sys/SConscript', exports='env', variant_dir=env.subst('$BUILD_DIR')+'/libs/c/sys', duplicate=0)\nlib_qurt_objs = SConscript('libs/qurt/SConscript', exports='env', variant_dir=env.subst('$BUILD_DIR')+'/libs/qurt', duplicate=0)\nlibqurt = env.Library (\"$INSTALL_LIB/libqurt.a\", [lib_c_sys_objs, lib_qurt_objs])\nqlibs.append(libqurt)\nenv.QurtTargets.add(libqurt)\n\n#libtimer.a\nlibtimer_objs = SConscript('libs/timer/SConscript', exports='env', variant_dir=env.subst('$BUILD_DIR')+'/libs/timer', duplicate=0)\nlibtimer = env.Library (\"$INSTALL_LIB/libtimer.a\", [libtimer_objs])\nqlibs.append(libtimer)\nenv.QurtTargets.add(libtimer)\n\n#libqurtkernel.a\nkernel_objs, crt0_obj = SConscript('kernel/SConscript', exports='env', variant_dir=env.subst('$BUILD_DIR')+'/kernel', duplicate=0)\nqurtos_obj = SConscript('qurtos/SConscript', exports='env', variant_dir=env.subst('$BUILD_DIR')+'/qurtos', duplicate=0)\nlibqurtkernel = env.Library (\"$INSTALL_LIB/libqurtkernel.a\", [kernel_objs, qurtos_obj])\nqlibs.append(libqurtkernel)\nqlibs.append(crt0_obj)\nenv.QurtTargets.add(libqurtkernel)\n\n#crt1.o\ncrt1env = env.Clone()\ncrt1env.Append(HEXAGON_LANIND = \"-fno-builtin\")\ncrt1env.Replace(HEXAGONCC_WARN = \"-Wall\")\ncrt1env.VariantDir(crt1env.subst('$BUILD_DIR'), '.', duplicate=0)\ncrt1_obj = crt1env.Object(crt1env.subst('$BUILD_DIR')+'/libs/c/sys/asm/crt1.S')\ncrt1_installed = crt1env.Install('${INSTALL_LIB}', crt1_obj)\nqlibs.append(crt1_installed)\nenv.QurtTargets.add(crt1_installed)\n\n#build_params\nbuildparam = env.build_param_builder(env.subst('$INSTALL_SCRIPTS')+\"/Input/build_params.txt\", Scripts)\n\n#Constant auto-generation subsidiary script\nconsts_autogen = SConscript('const/SConscript', exports='env', variant_dir=env.subst('$BUILD_DIR')+'/const', duplicate=0)\n\n#Installing Debugger files\nDbg1 = env.RecursiveInstall(env.subst('$INSTALL_DBG')+\"/T32\", env.subst('$QURT_TOP')+\"/osam/bin/T32/\"+env.subst('$Q6VERSION')) \nDbg2 = env.RecursiveInstall(env.subst('$INSTALL_DBG')+\"/cygwin\", env.subst('$QURT_TOP')+\"/osam/bin/cygwin/\"+env.subst('$Q6VERSION')) \nDbg3 = env.RecursiveInstall(env.subst('$INSTALL_DBG')+\"/lnx32\", env.subst('$QURT_TOP')+\"/osam/bin/lnx32/\"+env.subst('$Q6VERSION'))\nDbg4 = env.RecursiveInstall(env.subst('$INSTALL_DBG')+\"/lnx64\", env.subst('$QURT_TOP')+\"/osam/bin/lnx64/\"+env.subst('$Q6VERSION')) \nDebugger = Dbg1 + Dbg2 + Dbg3 + Dbg4\nenv.QurtTargets.add(Debugger)\n\n#Installing include files\nInclude = env.RecursiveInstall(env.subst('$INSTALL_INCLUDE'), env.subst('$QURT_TOP')+\"/api\")\nenv.QurtTargets.add(Include)\n\n#Pushing build environment information\nppendConfig = env.cust_config_builder(env.subst('$INSTALL_SCRIPTS')+\"/Input/cust_config.c\", \n [env.subst('$INSTALL_SCRIPTS')+'/Input/cust_config_template.c',consts_autogen])\nenv.QurtTargets.add(ppendConfig)\n\nenv.Depends(qlibs, buildparam)\nenv.Depends(qlibs, Include)\nenv.Depends(qlibs, ppendConfig)\nenv.Depends(qlibs, Debugger)\n\n''' Could just be Default(env.QurtTargets.getlist()) here, but that's a bit too verbose '''\nenv.Alias('qurt', env.QurtTargets.getlist())\nDefault('qurt')\n\nqlibs.append(env)\nReturn('qlibs')\n","sub_path":"modem_proc/core/kernel/qurt/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":6839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"597555116","text":"# -*- coding: utf-8 -*-\n\nfrom jobparser.items import JobparserItem\nfrom typing import Dict\nimport re\nfrom pymongo import MongoClient\n\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nclass JobparserPipeline(object):\n def __init__(self):\n client = MongoClient('localhost', 27017)\n self.mongo_db = client['scrapyDB']\n\n\n def process_item(self, item, spider):\n salary = self.parse_compensation(comp=item['salary'])\n result = {}\n result.update(item)\n del result['salary']\n result.update(salary)\n result['source'] = spider.name\n\n collection = self.mongo_db[spider.name]\n collection.insert_one(result)\n\n return item\n\n\n def parse_compensation(self, comp: str) -> Dict:\n salary_from = salary_to = units = ''\n comp_search = re.search('(от)?([0-9 ]+)?(до|-)?([0-9 ]+)? (.*)$', comp.lower().replace('\\xa0', ''))\n if comp_search:\n if comp_search.group(2):\n salary_from = int(comp_search.group(2))\n if comp_search.group(3) and '-' in comp_search.group(3) or comp_search.group(\n 3) and 'до' in comp_search.group(3):\n salary_to = int(comp_search.group(4))\n elif comp_search.group(1) and 'до' in comp_search.group(1):\n salary_to = int(comp_search.group(2))\n if salary_to or salary_from:\n units = comp_search.group(5)\n\n return {'salary_from': salary_from, 'salary_to': salary_to, 'units': units}\n","sub_path":"jobparser/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"475490807","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User\nfrom SERVER.models.db.db_profil import Profil, Post, Commentary, Like\nfrom SERVER.models.forms.forms_profil import PostForm, CommentaryForm\nfrom django.http import HttpResponse\n\n\ndef profil(request, userId):\n\n # form post\n Postform = PostForm()\n\n # form commentary\n Commentaryform = CommentaryForm()\n\n # page user\n user = User.objects.get(id=userId)\n profil = Profil.objects.get(user_id=userId)\n\n # list users\n users = User.objects.all()\n\n # list friends\n friends = profil.friends.all()\n\n # list posts\n posts = profil.post_set.all()\n\n # list of my likes on the profil user\n likesRequest = request.user.profil.like_set.all()\n postsLikedRequest = []\n for likes in likesRequest:\n if likes.post in profil.post_set.all():\n postsLikedRequest.append(likes.post)\n\n # count files user\n countFiles =0\n for post in posts:\n if post.file != \"False\":\n countFiles += 1\n\n\n return render(request, \"profil/profil_index.html\",\n {\"user\": user,\n \"users\": users,\n \"friends\": friends,\n \"posts\": posts,\n \"Postform\": Postform,\n \"Commentaryform\": Commentaryform,\n \"postsLikedRequest\": postsLikedRequest,\n \"countFiles\" : countFiles\n })\n\n\ndef invitation(request, userId):\n if request.method == 'POST':\n profil = Profil.objects.get(user_id=request.user.id)\n friend = User.objects.get(id=userId)\n profil.friends.add(friend)\n return redirect('profil',userId)\n\n\ndef commentary(request, postId, userId):\n if request.method == 'POST':\n Commentaryform = CommentaryForm(request.POST)\n if Commentaryform.is_valid():\n text = Commentaryform.cleaned_data.get('text')\n author = Profil.objects.get(user=request.user)\n commentary = Commentary.objects.create(text=text, author_id=author.id, post_id=postId)\n commentary.save()\n\n return redirect('profil',userId)\n\n\ndef post(request, userId):\n if request.method == 'POST':\n Postform = PostForm(request.POST, request.FILES)\n if Postform.is_valid():\n title = Postform.cleaned_data.get('title')\n text = Postform.cleaned_data.get('text')\n file = Postform.cleaned_data.get('file')\n author = Profil.objects.get(user=request.user)\n post = Post.objects.create(title=title, text=text, author_id=author.id, file=file)\n post.save()\n return redirect('profil', userId)\n\n\ndef like(request, postId, userId):\n if request.method == 'POST':\n author = Profil.objects.get(user_id=request.user.id)\n like = Like.objects.create(author_id=author.id, post_id=postId)\n like.save()\n return redirect('profil',userId)","sub_path":"SERVER/views/profil.py","file_name":"profil.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"543611129","text":"from tensorflow import keras\nfrom tensorflow.keras import backend as K\n\nfrom ..generic import FunctionApproximator\n\n\n__all__ = (\n 'ConnectFourFunctionApproximator',\n)\n\n\nclass ConnectFourFunctionApproximator(FunctionApproximator):\n def body(self, S, variable_scope):\n assert variable_scope in ('primary', 'target')\n\n def v(name):\n return '{}/{}'.format(variable_scope, name)\n\n def extract_state(S):\n return K.cast(S[:, 1:, :, :], 'float')\n\n def extract_available_actions_mask(S):\n return K.cast(S[:, 0, :, 0], 'bool')\n\n # extract the mask over available actions from the state observation\n self.available_actions_mask = keras.layers.Lambda(\n extract_available_actions_mask,\n name='extract_available_actions_mask')(S)\n\n layers = [\n keras.layers.Lambda(extract_state, name='extract_state'),\n keras.layers.Conv2D(\n name=v('conv1'), filters=20, kernel_size=4, strides=1,\n activation='relu'),\n keras.layers.Conv2D(\n name=v('conv2'), filters=40, kernel_size=2, strides=1,\n activation='relu'),\n keras.layers.Flatten(name=v('flatten')),\n keras.layers.Dense(\n name=v('dense1'), units=64, activation='linear'),\n ]\n\n # forward pass\n X = S\n for layer in layers:\n X = layer(X)\n\n return X\n","sub_path":"keras_gym/function_approximators/predefined/connect_four.py","file_name":"connect_four.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"455475807","text":"#!/usr/bin/env python3\nfrom pid import PID\nimport os\nimport time\n\nclass Rotation:\n def __init__(self, motor_left, motor_right, gyro):\n self.left_motor = motor_left\n self.right_motor = motor_right\n self.gyro = gyro\n\n self.pid = PID(1,1,0, max_val=self.left_motor.max_speed/2, min_val=-self.left_motor.max_speed/2, debug=True)\n os.system(\"cat debug_rotation.log >> debug_rotation.log.old; rm debug_rotation.log\")\n # Povprečje 3 meritev 10x360° (CW): 35+15+27,5 / 3 = 28,83° napake\n # CCW je pribl natančen\n # torej za CW je:\n # 3600+28,83°= 3627.83° realni kot za kok se je obrnu\n # torej na en krog je to 2.883° napake\n # torej je za 1 izmerjeno stopinjo realno 1+2.883/360=1+0,0080083°=1,008008° stopinje nrjene\n self.drift = 0 \n self.drift_koef_cw = 0.005\n self.drift_koef_ccw = 0\n\n def print_to_file(self, string):\n with open(\"debug_rotation.log\",'a') as f:\n f.write(string)\n\n def calc_angle(self, time_delta):\n rate = 0\n for i in range(10):\n rate += self.gyro.rate\n\n return rate/10*time_delta\n\n def __call__(self, abs_degrees):\n self.print_to_file(\"---start-rotation---\\n\")\n num_of_end = 0\n self.left_motor.ramp_up_sp = 500\n self.right_motor.ramp_up_sp = 500\n deg_current = 0\n start_time = time.time()\n\n #self.gyro.get\n\n \n #old_meas = self.gyro.angle\n while 1:\n '''\n deg_measurement = self.gyro.angle\n if deg_measurement > old_meas:\n self.drift += self.drift_koef_cw*(deg_measurement-old_meas)\n elif deg_measurement < old_meas:\n self.drift += self.drift_koef_ccw*(old_meas-deg_measurement)\n old_meas=deg_measurement\n '''\n\n if abs(deg_current-abs_degrees)<0.2:\n \n self.left_motor.command=\"stop\"\n self.right_motor.command=\"stop\"\n self.left_motor.speed_sp = 0\n self.right_motor.speed_sp = 0\n \n time.sleep(0.5)\n if abs(deg_current-abs_degrees)<0.2:\n self.pid.reset()\n self.print_to_file(\"...end-rotation...\\n\")\n break\n \n \n \n\n \n \n \n err = (abs_degrees-deg_current)\n reg, true_reg = self.pid(err)\n self.print_to_file(str(abs_degrees)+\",\"+str(deg_current)+\",\"+str(err)+\",\"+ str(reg)+\",\"+ str(true_reg)+\"\\n\")\n self.left_motor.speed_sp = reg\n self.right_motor.speed_sp = -reg\n self.left_motor.command='run-forever'\n self.right_motor.command='run-forever'\n end_time = time.time()\n time_delta = end_time-start_time\n start_time=time.time()\n deg_current+=self.calc_angle(time_delta)\n\n","sub_path":"rotation_by_rot_speed.py","file_name":"rotation_by_rot_speed.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"520654922","text":"\"\"\"\nApproach: 1) BFS 2) DFS\nfor both\nTC: O(n)\nSC: O(n)\n\"\"\"\n\nfrom collections import deque\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n # Approach : BFS\n \"\"\"\n def largestValues(self, root: TreeNode) -> List[int]:\n result = []\n if not root:\n return result\n\n\n q = deque()\n q.append(root)\n\n while q:\n size = len(q)\n level_max = float('-inf')\n for i in range(size):\n curr = q.popleft()\n if curr.val > level_max:\n level_max = curr.val\n if curr.left: q.append(curr.left)\n if curr.right: q.append(curr.right)\n result.append(level_max)\n return result\n \"\"\"\n\n # Approach : DFS\n def largestValues(self, root: TreeNode) -> List[int]:\n result = []\n if not root:\n return result\n\n self.dfs(root, 0, result)\n return result\n\n def dfs(self, root, level, result):\n # base\n if not root: return\n\n # logic\n if level == len(result):\n result.append(root.val)\n else:\n result[level] = max(root.val, result[level])\n\n self.dfs(root.left, level + 1, result)\n self.dfs(root.right, level + 1, result)\n\n\n\n","sub_path":"Problem-1.py","file_name":"Problem-1.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"529430667","text":"from flask import Flask, request\nimport os \n\n\napp = Flask(__name__)\n@app.route('/')\n\ndef testSH():\n ueID = request.headers.get(\"Ueid\")\n os.system('./test.sh {} {}' .format('TestRegistration', ueID)) \n return 'Test Started!'\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\",port=8080)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"217419345","text":"H, W = map(int, input().split())\n\nblack = [[0] * W for _ in range(H)]\nwhite = [[0] * W for _ in range(H)]\n\nfor h in range(H):\n for w, c in enumerate(map(int, input().split())):\n if (h + w) % 2 == 0:\n black[h][w] = c\n else:\n white[h][w] = c\n\nblackSum = [[0] * (W + 1) for _ in range(H + 1)]\nwhiteSum = [[0] * (W + 1) for _ in range(H + 1)]\n\nfor h in range(1, H + 1):\n for w in range(1, W + 1):\n blackSum[h][w] = blackSum[h - 1][w] + blackSum[h][w - 1] - blackSum[h - 1][w - 1] + black[h - 1][w - 1]\n whiteSum[h][w] = whiteSum[h - 1][w] + whiteSum[h][w - 1] - whiteSum[h - 1][w - 1] + white[h - 1][w - 1]\n\nans = 0\nfor top in range(H):\n for left in range(W):\n for bottom in range(top + 1, H + 1):\n for right in range(left + 1, W + 1):\n blackC = blackSum[bottom][right] - blackSum[bottom][left] - blackSum[top][right] + blackSum[top][left]\n whiteC = whiteSum[bottom][right] - whiteSum[bottom][left] - whiteSum[top][right] + whiteSum[top][left]\n if blackC == whiteC:\n ans = max(ans, (bottom - top) * (right - left))\n\nprint(ans)","sub_path":"AtCoder/arc/025b.py","file_name":"025b.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"285064613","text":"import os\nimport argparse\nfrom src.bigdata.api import data_get\n\nAPP_KEY = os.environ['APP_KEY']\n# connect to our source\nif __name__ == '__main__':\n\t# define parser to pass arguments for our function\n\tparser = argparse.ArgumentParser(description='Retrieving OCPV Data')\n\tparser.add_argument('--page_size',type = int, help = \"Size of a page\" )\n\tparser.add_argument('--num_pages', type = int, default= -1, help = 'Number of Pages')\n\tparser.add_argument('--output', type = str, default= 'print', help = 'Use only if you want to write in file. 4 print - leave out')\n\targs = parser.parse_args()\n\n\tdata_get(APP_KEY,args.page_size,args.num_pages,args.output)\n\n\t","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"584627787","text":"def ascii_prime(s):\r\n\r\n s_num = \"\"\r\n for i in s:\r\n s_num += str(ord(i))\r\n print(s_num)\r\n\r\n divided_by = 0\r\n i = 1\r\n while divided_by <= 2 and i <=int(s_num):\r\n if int(s_num)%i == 0:\r\n divided_by += 1\r\n i += 1\r\n\r\n if divided_by == 2:\r\n print(\"Is a prime number!\")\r\n else:\r\n print(\"Is NOT a prime number!\")\r\n\r\n\r\nuser_string = input(\"Type a (preferably short) word or phrase: \")\r\nascii_prime(user_string)\r\n\r\ninput()\r\n","sub_path":"Ergasia4.py","file_name":"Ergasia4.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"178061237","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 5 20:55:24 2019\n\n@author: Erdo\n\"\"\"\n# %% libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# %% \n\"\"\" Data Import \"\"\"\ndata = pd.read_excel('Iris.xls') \ndata.head()\n\n#%%\nx_data = data.iloc[:,0:4].values\ny_data = data.iloc[:,-1:].values\n\n#%%\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nx = sc.fit_transform(x_data)\n\n#%%\nfrom sklearn.model_selection import train_test_split\nx_train,x_test,y_train,y_test = train_test_split(x,y_data, test_size = 0.33 , random_state = 0)\nscores =[]\nmethods =[]\n\n#%%\n#------------------------------Logistic Regression-----------------------------\n\nfrom sklearn.linear_model import LogisticRegression\nlr = LogisticRegression()\nlr.fit(x_train,y_train)\ny_lr_pred = lr.predict(x_test)\nprint(\"Logistic regression score:\",lr.score(x_test,y_test))\ncm0 = confusion_matrix(y_test,y_lr_pred)\nprint(cm0)\nscores.append(lr.score(x_test,y_test))\nmethods.append(\"lr\")\n\n#%%\n#------------------------------KNN Classifier----------------------------------\n\nfrom sklearn.neighbors import KNeighborsClassifier\nknn = KNeighborsClassifier(n_neighbors=4, metric='minkowski')\nknn.fit(x_train,y_train)\ny_knn_pred = knn.predict(x_test)\nprint(\"KNN score :\",knn.score(x_test,y_test))\n\ncm1 =confusion_matrix(y_test,y_knn_pred)\nprint(cm1)\nscores.append(knn.score(x_test,y_test))\nmethods.append(\"knn\")\n#%%\n# ------------------------------Decision tree classifier-----------------------\n\nfrom sklearn.tree import DecisionTreeClassifier\ndt = DecisionTreeClassifier( criterion = 'gini')\ndt.fit(x_train,y_train)\ny_dt_pred = dt.predict(x_test)\nscore = dt.score(x_test,y_test)\n\nprint(\"Decision tree score:\",score)\n\ncm2 = confusion_matrix(y_test,y_dt_pred)\nprint(cm2) \nscores.append(dt.score(x_test,y_test))\nmethods.append(\"Dt\")\n\n#%%\n#---------------------------- Random Forest Classifier-------------------------\n\nfrom sklearn.ensemble import RandomForestClassifier\nrfc = RandomForestClassifier(n_estimators=5, criterion = 'gini')\nrfc.fit(x_train,y_train)\ny_rfc_pred = rfc.predict(x_test)\ncm3 =confusion_matrix(y_test,y_rfc_pred)\n\nprint(\"Random forest score:\",rfc.score(x_test,y_test))\nprint(cm3)\nscores.append(rfc.score(x_test,y_test))\nmethods.append(\"rfc\")\n\n#%%\n#---------------------------SVM Classification---------------------------------\nfrom sklearn.svm import SVC\nsvc = SVC(kernel='rbf')\nsvc.fit(x_train,y_train)\ny_svc_pred = svc.predict(x_test)\nprint(\"SVM score:\",svc.score(x_test,y_test))\ncm4 = confusion_matrix(y_test,y_svc_pred)\nprint(cm4)\nscores.append(svc.score(x_test,y_test))\nmethods.append(\"SVM\")\n\n#%%\n#------------------------Naive Bayes Classification----------------------------\n\nfrom sklearn.naive_bayes import GaussianNB\ngnb = GaussianNB()\ngnb.fit(x_train,y_train)\ny_gnb_pred = gnb.predict(x_test)\nprint(\"Gaussian Naive bayes:\",gnb.score(x_test,y_test))\ncm5 = confusion_matrix(y_test,y_gnb_pred)\nprint(cm5)\nscores.append(gnb.score(x_test,y_test))\nmethods.append(\"Gnb\")\n\n#%%\n#------------------------------------------------------------------------------\n# Visualization of Scores\n\ns = scores.sort()\nplt.figure(figsize =(12,6))\nplt.plot(methods,scores)\nplt.xlabel(\"Classification Methods\")\nplt.ylabel(\"Accuracy Scores\")\nplt.show()","sub_path":"16. Classification Bölüm Sonu/hw2-fis.py","file_name":"hw2-fis.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"653419752","text":"# -*- coding: UTF-8 -*-\n__Author__ = \"Sky Huang\"\nimport logging\nimport os\nfrom datetime import datetime\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nROOT = os.path.dirname(BASE_DIR)\n\nclass MyLogger(object):\n logger = logging.getLogger('my_color_logger')\n resultPath = os.path.join(ROOT, \"result\")\n\n # 如果不存在文件夹创建文件夹\n if not os.path.exists(resultPath):\n os.makedirs(resultPath)\n # 以时间格式命名运行结果文件名\n logPath = os.path.join(resultPath, str(datetime.now().strftime(\"%Y%m%d\")))\n # 创建保持日志文件夹\n if not os.path.exists(logPath):\n os.makedirs(logPath)\n\n # 定义日志级别\n if not logger.handlers:\n logger.setLevel(logging.INFO)\n logger_handler = logging.FileHandler(os.path.join(logPath, str(datetime.now().strftime(\"%Y%m%d\")) + \".log\"))\n formatter = logging.Formatter('[%(asctime)s] - %(name)s - %(levelname)s - %(message)s')\n logger_handler.setFormatter(formatter)\n logger.addHandler(logger_handler)\n\n @classmethod\n def debug(cls, msg):\n cls.logger.debug(str(msg))\n\n @classmethod\n def info(cls, msg):\n cls.logger.info(str(msg))\n\n @classmethod\n def error(cls, msg):\n cls.logger.error(str(msg))\n\n @classmethod\n def warn(cls, msg):\n cls.logger.warning(str(msg))\n\n# logs = MyLogger()\n# logs.info(\"huangwenliang\")","sub_path":"utils/log/mylog.py","file_name":"mylog.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"333193129","text":"import os\nfrom argparse import Namespace\n\nfrom luscious_dl.logger import logger_file_handler, logger\nfrom luscious_dl.parser import is_a_valid_integer\nfrom luscious_dl.start import start\nfrom luscious_dl.utils import cls, create_default_files, open_config_menu, get_config_setting, read_list, info, \\\n ListOrganizer\n\n\ndef list_txt_organizer(items: list[str], prefix: str) -> None:\n \"\"\"\n :param items: List of urls or ids\n :param prefix: album/user\n \"\"\"\n for item in items:\n ListOrganizer.remove(item)\n ListOrganizer.add(f'{prefix}-{int(item)}' if is_a_valid_integer(item) else item)\n\n\ndef create_namespace(album_inputs=None, user_inputs=None, keyword=None, search_download=False, page=1, max_pages=1,\n directory=None, threads=os.cpu_count(), retries=5, timeout=30, delay=0) -> Namespace:\n return Namespace(album_inputs=album_inputs, user_inputs=user_inputs, keyword=keyword, search_download=search_download,\n page=page, max_pages=max_pages, directory=directory, threads=threads, retries=retries,\n timeout=timeout, delay=delay)\n\n\ndef menu() -> None:\n \"\"\"Menu\"\"\"\n info()\n create_default_files()\n logger_file_handler()\n output_dir = os.path.abspath(os.path.normcase(get_config_setting('directory')))\n pool_size = get_config_setting('pool')\n retries = get_config_setting('retries')\n timeout = get_config_setting('timeout')\n delay = get_config_setting('delay')\n\n while True:\n option = input('Options:\\n'\n '1 - Download albums by URL or ID.\\n'\n '2 - Download all user albums\\n'\n '3 - Download albums from list.txt.\\n'\n '4 - Search albums by keyword.\\n'\n '5 - Settings.\\n'\n '0 - Exit.\\n'\n '> ')\n cls()\n\n if option in ['1', '2']:\n inputs = input('0 - Back.\\n'\n f'Enter {\"album\" if option == \"1\" else \"user\"} URL or ID.\\n> ')\n cls()\n if inputs != '0':\n args = create_namespace(album_inputs=inputs if option == '1' else None,\n user_inputs=inputs if option == '2' else None,\n directory=output_dir, threads=pool_size, retries=retries, timeout=timeout, delay=delay)\n start(args)\n list_txt_organizer([input_.strip() for input_ in inputs.split(',')], 'album' if option == '1' else 'user')\n logger.log(5, 'URLs/IDs added to completed list.')\n\n elif option == '3':\n list_txt = read_list()\n args = create_namespace(album_inputs=','.join(list_txt),\n directory=output_dir, threads=pool_size, retries=retries, timeout=timeout, delay=delay)\n start(args)\n list_txt_organizer(list_txt, 'album')\n logger.log(5, 'URLs/IDs added to completed list.')\n\n elif option == '4':\n keyword = input('Enter keyword\\n> ')\n if not keyword:\n print('Please enter a keyword.\\n')\n return\n page = input('Enter starting page number or leave blank\\n> ')\n page = int(page) if is_a_valid_integer(page) else 1\n max_pages = input('Enter max page or leave blank\\n> ')\n max_pages = int(max_pages) if is_a_valid_integer(max_pages) else 1\n search_download = True if input('Download search results? (\"Y/N\") ').strip() in 'yY' else False\n args = create_namespace(keyword=keyword, search_download=search_download, page=page, max_pages=max_pages)\n start(args)\n\n elif option == '5':\n open_config_menu()\n\n elif option == '0':\n exit()\n\n else:\n print('Invalid Option.\\n')\n\n\nif __name__ == '__main__':\n menu()\n","sub_path":"luscious_dl/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"246899696","text":"S = input()\nx, y = map( int, input().split())\nc = 0\nver = []\nrow = []\nl = 0\nfor s in S:\n if s == \"F\":\n c += 1\n else:\n if l%2 == 0:\n row.append(c)\n else:\n ver.append(c)\n l += 1\n c = 0\nif l%2 == 0:\n row.append(c)\nelse:\n ver.append(c)\n\nx -= row.pop(0)\nrow.sort( key = None, reverse = True)\nver.sort( key = None, reverse = True)\nnx, ny = 0, 0\nfor a in row:\n if a >= 0:\n if nx <= x:\n nx += a\n else:\n nx -= a\n else:\n if nx > x:\n nx += a\n else:\n nx -= a\nfor a in ver:\n if a >= 0:\n if ny <= y:\n ny += a\n else:\n ny -= a\n else:\n if ny > y:\n ny += a\n else:\n ny -= a\n\nif x == nx and y == ny:\n print(\"Yes\")\nelse:\n print(\"No\")\n\n#dpっぽい\n","sub_path":"beginner/082/D3.py","file_name":"D3.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"548512951","text":"#!-*-coding:utf-8-*-\nimport pandas as pd\n\npd.options.mode.chained_assignment = None # default='warn'\n\n\ndef compare(df_wg_origin, df_zy_origin):\n \"\"\"对比网管和资源的数据,返回对比结果\n\n :df_wg_origin: 网管数据,DataFrame\n :df_zy_origin: 资源数据,DataFrame\n :returns: 对比结果数据,DataFrame\n\n \"\"\"\n # '''读取网管和资源系统原始数据csv'''\n # df_wg_origin = pd.read_csv('wg_device.csv', encoding='utf-8-sig')\n # df_zy_origin = pd.read_csv('device.csv', encoding='utf-8-sig')\n # ''' 对网管原始数据做处理,加一列标注地市'''\n df_wg_origin['wg_city'] = df_wg_origin.apply(\n lambda r: '广州'\n if\n r['wg_device_name'].startswith('GZ') or\n r['wg_device_name'].startswith('SCC-GZ') or\n r['wg_device_name'].startswith('GD-GZ') else '深圳'\n if r['wg_device_name'].startswith('SZ') or\n r['wg_device_name'].startswith('GD-SZ') else '东莞'\n if r['wg_device_name'].startswith('DG') or\n r['wg_device_name'].startswith('GD-DG') else '佛山'\n if r['wg_device_name'].startswith('FS') or\n r['wg_device_name'].startswith('GD-FS') else '中山'\n if r['wg_device_name'].startswith('ZS') or\n r['wg_device_name'].startswith('GD-ZS') else '江门'\n if r['wg_device_name'].startswith('JM') or\n r['wg_device_name'].startswith('GD-JM') else '珠海'\n if r['wg_device_name'].startswith('ZH') or\n r['wg_device_name'].startswith('GD-ZH') else '惠州'\n if r['wg_device_name'].startswith('HZ') or\n r['wg_device_name'].startswith('GD-HZ') else '肇庆'\n if r['wg_device_name'].startswith('ZQ') or\n r['wg_device_name'].startswith('GD-ZQ') else '汕尾'\n if r['wg_device_name'].startswith('SW') or\n r['wg_device_name'].startswith('GD-SW') else '清远'\n if r['wg_device_name'].startswith('QY') or\n r['wg_device_name'].startswith('GD-QY') else '河源'\n if r['wg_device_name'].startswith('HY') or\n r['wg_device_name'].startswith('GD-HY') else '韶关'\n if r['wg_device_name'].startswith('SG') or\n r['wg_device_name'].startswith('GD-SG') else '汕头'\n if r['wg_device_name'].startswith('ST') or\n r['wg_device_name'].startswith('GD-ST') else '揭阳'\n if r['wg_device_name'].startswith('JY') or\n r['wg_device_name'].startswith('GD-JY') else '潮州'\n if r['wg_device_name'].startswith('CZ') or\n r['wg_device_name'].startswith('GD-CZ') else '梅州'\n if r['wg_device_name'].startswith('MZ') or\n r['wg_device_name'].startswith('GD-MZ') else '湛江'\n if r['wg_device_name'].startswith('ZJ') or\n r['wg_device_name'].startswith('GD-ZJ') else '茂名'\n if r['wg_device_name'].startswith('MM') or\n r['wg_device_name'].startswith('GD-MM') else '阳江'\n if r['wg_device_name'].startswith('YJ') or\n r['wg_device_name'].startswith('GD-YJ') else '云浮'\n if r['wg_device_name'].startswith('YF') or\n r['wg_device_name'].startswith('GD-YF') else '', axis=1)\n # ''' 对网管原始数据做处理,挑选有新综数据的地市'''\n df_wg_origin = df_wg_origin[\n (df_wg_origin['wg_city'] == '深圳') | (\n df_wg_origin['wg_city'] == '珠海') | (\n df_wg_origin['wg_city'] == '江门') | (\n df_wg_origin['wg_city'] == '东莞') | (\n df_wg_origin['wg_city'] == '湛江') | (\n df_wg_origin['wg_city'] == '肇庆') | (\n df_wg_origin['wg_city'] == '阳江') | (\n df_wg_origin['wg_city'] == '茂名') | (\n df_wg_origin['wg_city'] == '云浮')]\n # 提取资源的分公司信息\n df_zy_origin['tl_city'] = df_zy_origin.apply(\n lambda r: '广州'\n if\n r['tl_ems_name'].startswith('GZ') or\n r['tl_ems_name'].startswith('SCC-GZ') or\n r['tl_ems_name'].startswith('GD-GZ') else '深圳'\n if r['tl_ems_name'].startswith('SZ') or\n r['tl_ems_name'].startswith('GD-SZ') else '东莞'\n if r['tl_ems_name'].startswith('DG') or\n r['tl_ems_name'].startswith('GD-DG') else '佛山'\n if r['tl_ems_name'].startswith('FS') or\n r['tl_ems_name'].startswith('GD-FS') else '中山'\n if r['tl_ems_name'].startswith('ZS') or\n r['tl_ems_name'].startswith('GD-ZS') else '江门'\n if r['tl_ems_name'].startswith('JM') or\n r['tl_ems_name'].startswith('GD-JM') else '珠海'\n if r['tl_ems_name'].startswith('ZH') or\n r['tl_ems_name'].startswith('GD-ZH') else '惠州'\n if r['tl_ems_name'].startswith('HZ') or\n r['tl_ems_name'].startswith('GD-HZ') else '肇庆'\n if r['tl_ems_name'].startswith('ZQ') or\n r['tl_ems_name'].startswith('GD-ZQ') else '汕尾'\n if r['tl_ems_name'].startswith('SW') or\n r['tl_ems_name'].startswith('GD-SW') else '清远'\n if r['tl_ems_name'].startswith('QY') or\n r['tl_ems_name'].startswith('GD-QY') else '河源'\n if r['tl_ems_name'].startswith('HY') or\n r['tl_ems_name'].startswith('GD-HY') else '韶关'\n if r['tl_ems_name'].startswith('SG') or\n r['tl_ems_name'].startswith('GD-SG') else '汕头'\n if r['tl_ems_name'].startswith('ST') or\n r['tl_ems_name'].startswith('GD-ST') else '揭阳'\n if r['tl_ems_name'].startswith('JY') or\n r['tl_ems_name'].startswith('GD-JY') else '潮州'\n if r['tl_ems_name'].startswith('CZ') or\n r['tl_ems_name'].startswith('GD-CZ') else '梅州'\n if r['tl_ems_name'].startswith('MZ') or\n r['tl_ems_name'].startswith('GD-MZ') else '湛江'\n if r['tl_ems_name'].startswith('ZJ') or\n r['tl_ems_name'].startswith('GD-ZJ') else '茂名'\n if r['tl_ems_name'].startswith('MM') or\n r['tl_ems_name'].startswith('GD-MM') else '阳江'\n if r['tl_ems_name'].startswith('YJ') or\n r['tl_ems_name'].startswith('GD-YJ') else '云浮'\n if r['tl_ems_name'].startswith('YF') or\n r['tl_ems_name'].startswith('GD-YF') else '', axis=1)\n # ''' 对网管原始数据做处理,挑选有新综数据的地市'''\n # 不比对BAS和IDC\n df_zy_origin.drop(\n df_zy_origin\n [df_zy_origin['tl_ems_name'].str.contains('BAS|IDC|\\.I\\.gd')].index,\n inplace=True)\n df_wg_origin.drop(\n df_wg_origin\n [df_wg_origin['wg_device_name'].str.contains('BAS|IDC|\\.I\\.gd')].index,\n inplace=True)\n # ''' 对资源原始数据做处理,翻译字段'''\n df_zy_origin['tl_life_status'] = df_zy_origin.apply(\n lambda r: '已用'\n if r['tl_life_status'] == 80204847 else '在建'\n if r['tl_life_status'] == 80204848 else '退网'\n if r['tl_life_status'] == 100375 else '闲置'\n if r['tl_life_status'] == 100376 else '报废'\n if r['tl_life_status'] == 102383 else '调拨'\n if r['tl_life_status'] == 80204684 else '计划退网'\n if r['tl_life_status'] == 80206209 else '', axis=1)\n df_zy_origin['tl_project_status'] = df_zy_origin.apply(\n lambda r: '预录入'\n if r['tl_project_status'] == 80204666 else '待验收'\n if r['tl_project_status'] == 80204667 else '已验收'\n if r['tl_project_status'] == 80204668 else '', axis=1)\n df_zy_origin['tl_network_layer'] = df_zy_origin.apply(\n lambda r: 'MCE层'\n if r['tl_network_layer'] == 80206934 else '城域核心层'\n if r['tl_network_layer'] == 80206932 else '汇聚层'\n if r['tl_network_layer'] == 100655 else '接入层'\n if r['tl_network_layer'] == 100656 else '省核心层'\n if r['tl_network_layer'] == 80206933 else '', axis=1)\n df_zy_origin['tl_owner_net'] = df_zy_origin.apply(\n lambda r: 'IDC'\n if r['tl_owner_net'] == 102566 else 'IPTV'\n if r['tl_owner_net'] == 102567 else '视频网络'\n if r['tl_owner_net'] == 80204680 else 'CN2'\n if r['tl_owner_net'] == 102562 else '宽带接入网'\n if r['tl_owner_net'] == 80204678 else 'CHINANET'\n if r['tl_owner_net'] == 102561 else 'IPRAN'\n if r['tl_owner_net'] == 101397 else 'DCN'\n if r['tl_owner_net'] == 100741 else '金融专网'\n if r['tl_owner_net'] == 80204679 else '客户网络'\n if r['tl_owner_net'] == 102587 else '城域网'\n if r['tl_owner_net'] == 102578 else '基础数据网'\n if r['tl_owner_net'] == 102585 else 'CDMA'\n if r['tl_owner_net'] == 101387 else '软交换'\n if r['tl_owner_net'] == 101413 else 'WIFI'\n if r['tl_owner_net'] == 102576 else '', axis=1)\n df_zy_origin['tl_role'] = df_zy_origin.apply(\n lambda r: 'PE'\n if r['tl_role'] == 100661 else 'CE'\n if r['tl_role'] == 100662 else 'PI路由器'\n if r['tl_role'] == 101442 else 'ASBR'\n if r['tl_role'] == 102279 else 'BR'\n if r['tl_role'] == 102280 else 'CR'\n if r['tl_role'] == 102282 else 'P'\n if r['tl_role'] == 102283 else 'RR'\n if r['tl_role'] == 102285 else '出口路由器'\n if r['tl_role'] == 102286 else '软交换专用数据路由器(ROT)'\n if r['tl_role'] == 102287 else 'A'\n if r['tl_role'] == 80206747 else 'B'\n if r['tl_role'] == 80206748 else 'D'\n if r['tl_role'] == 80206749 else 'M'\n if r['tl_role'] == 80206750 else 'X'\n if r['tl_role'] == 80206751 else 'EPC CE'\n if r['tl_role'] == 80206752 else 'BSC CE'\n if r['tl_role'] == 80206753 else 'MCE'\n if r['tl_role'] == 80206754 else '其它'\n if r['tl_role'] == 80206969 else 'SR'\n if r['tl_role'] == 80206931 else 'BAS'\n if r['tl_role'] == 80206930 else '', axis=1)\n df_zy_origin['tl_speciality'] = df_zy_origin.apply(\n lambda r: '数据'\n if r['tl_speciality'] == 80204670 else '光缆'\n if r['tl_speciality'] == 80204675 else '电缆'\n if r['tl_speciality'] == 80204676 else '公共'\n if r['tl_speciality'] == 80204674 else '传输'\n if r['tl_speciality'] == 80204669 else '支撑'\n if r['tl_speciality'] == 80204677 else '动力'\n if r['tl_speciality'] == 80204673 else '交换'\n if r['tl_speciality'] == 80204671 else '无线'\n if r['tl_speciality'] == 80204672 else '', axis=1)\n # ''' 筛选资源数据,已用/在建的设备才需要比对'''\n df_zy_origin = df_zy_origin[\n (df_zy_origin['tl_life_status'] == '已用') |\n (df_zy_origin['tl_life_status'] == '在建')]\n\n # '''资源和网管都有'''\n # '''资源多条记录、网管多条记录'''\n # '''资源有网管无'''\n # '''网管有资源无'''\n df_wg = df_wg_origin.copy(deep=True)\n df_zy = df_zy_origin.copy(deep=True)\n df_wg.loc[:, 'duplicated_wg'] = df_wg.duplicated(\n ['wg_device_telnet_ip', 'wg_device_name'])\n df_zy.loc[:, 'duplicated_zy'] = df_zy.duplicated(\n ['tl_telnet_ip', 'tl_ems_name'])\n df_merge_1 = pd.merge(\n df_wg, df_zy, how='outer', left_on=[\n 'wg_device_telnet_ip', 'wg_device_name'], right_on=[\n 'tl_telnet_ip', 'tl_ems_name'])\n df_merge_1['result'] = df_merge_1.apply(\n lambda r: '资源和网管都有'\n if r['duplicated_zy'] == False and r['duplicated_wg'] == False else\n '网管多条记录'\n if r['duplicated_wg'] == True and r['duplicated_zy'] == False else\n '资源多条记录'\n if r['duplicated_zy'] == True and r['duplicated_wg'] == False else\n '资源有网管无'\n if pd.isnull(r['wg_device_telnet_ip']) | pd.isnull(\n r['wg_device_name']) else '网管有资源无'\n if pd.isnull(r['tl_telnet_ip']) | pd.isnull(r['tl_ems_name']) else\n '资源和网管多条记录', axis=1)\n df_merge_1 = df_merge_1.drop(['duplicated_wg', 'duplicated_zy'], axis=1)\n df_merge_1 = df_merge_1.drop_duplicates()\n\n # '''IP一致设备名称不一致'''\n df_wg = df_wg_origin.copy(deep=True)\n df_zy = df_zy_origin.copy(deep=True)\n df_merge_2 = pd.merge(\n df_wg,\n df_zy,\n how='inner',\n left_on=['wg_device_telnet_ip'],\n right_on=['tl_telnet_ip'])\n df_merge_2['result'] = df_merge_2.apply(\n lambda r: 'IP一致设备名称不一致'\n if r['wg_device_name'] != r['tl_ems_name'] else '', axis=1)\n df_merge_2 = df_merge_2[df_merge_2['result'] == 'IP一致设备名称不一致']\n\n # '''IP不一致设备名称一致'''\n df_wg = df_wg_origin.copy(deep=True)\n df_zy = df_zy_origin.copy(deep=True)\n df_merge_3 = pd.merge(\n df_wg,\n df_zy,\n how='inner',\n left_on=['wg_device_name'],\n right_on=['tl_ems_name'])\n df_merge_3['result'] = df_merge_3.apply(\n lambda r: 'IP不一致设备名称一致'\n if r['wg_device_telnet_ip'] != r['tl_telnet_ip'] else '', axis=1)\n df_merge_3 = df_merge_3[df_merge_3['result'] == 'IP不一致设备名称一致']\n\n frames = [df_merge_1, df_merge_2, df_merge_3]\n result = pd.concat(frames, ignore_index=True)\n\n result_tmp_1 = result.copy(deep=True)\n result_tmp_2 = result.copy(deep=True)\n result_tmp_3 = result.copy(deep=True)\n result_tmp_1 = result_tmp_1[result_tmp_1['result'].str.contains(\n '资源和网管都有|网管多条记录|资源多条记录|资源和网管多条记录')]\n result_tmp_2 = result_tmp_2[result_tmp_2[\n 'result'].str.contains('网管有资源无|资源有网管无')]\n result_tmp_3 = result_tmp_3[result_tmp_3['result'].str.contains(\n 'IP不一致设备名称一致|IP一致设备名称不一致')]\n\n name_list = result_tmp_3['tl_ems_name'].values\n ip_list = result_tmp_3['wg_device_telnet_ip'].values\n\n for name in name_list:\n result_tmp_2.drop(\n result_tmp_2[\n result_tmp_2['wg_device_name'] == name].index,\n inplace=True)\n result_tmp_2.drop(\n result_tmp_2[\n result_tmp_2['tl_ems_name'] == name].index,\n inplace=True)\n for ip in ip_list:\n result_tmp_2.drop(\n result_tmp_2[\n result_tmp_2['wg_device_telnet_ip'] == ip].index,\n inplace=True)\n result_tmp_2.drop(\n result_tmp_2[\n result_tmp_2['tl_telnet_ip'] == ip].index,\n inplace=True)\n\n frames = [result_tmp_1, result_tmp_2, result_tmp_3]\n result = pd.concat(frames, ignore_index=True)\n result = result.drop_duplicates()\n\n # count = result['result'].value_counts()\n # print(count)\n\n # 完善责任分公司信息\n result['city'] = result['wg_city'].combine_first(result['tl_city'])\n\n result.rename(columns={'wg_device_type': '网管设备类型',\n 'wg_device_manufactory': '网管设备厂家',\n 'wg_device_name': '网管设备名称',\n 'wg_device_telnet_ip': '网管设备IP',\n 'wg_device_accessserver_ip': '网管设备接入IP',\n 'wg_device_version': '网管设备版本号',\n 'wg_device_model': '网管设备型号',\n 'wg_device_attribute': '网管设备属性',\n 'wg_city': '网管分公司',\n 'tl_assemblename': '资源拼装名称',\n 'tl_create_date': '资源创建时间',\n 'tl_ems_name': '资源设备网管名称',\n 'tl_life_status': '资源生命状态',\n 'tl_marketing_area': '资源营销中心',\n 'tl_meid': '资源设备meid',\n 'tl_model': '资源设备类型',\n 'tl_modify_date': '资源修改时间',\n 'tl_name': '资源设备名称',\n 'tl_network_layer': '资源设备网络层次',\n 'tl_owner_net': '资源设备所属网络',\n 'tl_project_status': '资源验收状态',\n 'tl_reg_area': '资源区域',\n 'tl_role': '资源设备角色',\n 'tl_room': '资源设备机框',\n 'tl_small_country': '资源设备区域',\n 'tl_speciality': '资源类型',\n 'tl_specification': '资源所属分类',\n 'tl_standard_name': '资源设备标准名称',\n 'tl_telnet_ip': '资源设备IP',\n 'tl_tml_area': '资源设备机楼',\n 'tl_vendor': '资源设备厂家',\n 'tl_cityname': '资源分公司全称',\n 'tl_city': '资源分公司',\n 'city': '责任分公司',\n 'result': '对比结果'}, inplace=True)\n return result\n\nif __name__ == \"__main__\":\n import sys\n sys.path.insert(0, '../')\n from db import mysql2df, mongo2df\n import pandas as pd\n import conf\n engine = mysql2df.get_engine(\n drivername=conf.DB_DRIVER,\n username=conf.DB_USER,\n password=conf.DB_PASSWD,\n database=conf.DB_DB,\n host=conf.DB_HOST,\n port=conf.DB_PORT\n )\n\n # '''读取原始数据'''\n df_wg_origin = pd.read_sql_table('wg_man_device', engine)\n df_zy_origin = mongo2df.read_mongo(\n db=conf.MONGO_DATABASE,\n host=conf.MONGO_HOST,\n port=conf.MONGO_PORT,\n collection='device',\n query={})\n result = compare(df_wg_origin, df_zy_origin)\n result.to_csv('result_device.csv', index=False)\n","sub_path":"mancompare/compare/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":17853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"481912947","text":"#!/usr/bin/env python\n\n\"\"\"\nThis script executes a single slot of an array job on an HPC compute node.\nIt is intended to be used with Sun Grid Engine or Torque job schedulers.\nIt assumes every instance of the job array runs the same command but with\ndifferent arguments. This script performs the work of looking-up the\narguments in a text file and substituting those arguments into the command\nto be executed.\n\nParameters\n----------\nThe script takes 3 arguments:\n\n1. Name of the environment variable that contains the sub-task number.\n You should use SGE_TASK_ID for grid engine.\n You should use PBS_ARRAYID for torque.\n\n2. Name of the file containing the arguments for each sub-task with one line\n per sub-task. This script will extract the arguments for this sub-task\n at the line number identified by the environment variable above. The\n line is parsed and substituted into the command, replacing the parameter placeholders\n with the actual arguments.\n\n3. The remainder of the command line is the command to be executed with parameter\n placeholders of the form {1}, {2}, {3} ...\n\nExamples\n--------\n# Sort some txt files, writing the sorted output to new files\nls *.txt > files.txt\necho 'qarrayrun.py SGE_TASK_ID files.txt sort -o sorted.{1} {1}' | qsub -t 1-$(cat files.txt | wc -l) -cwd -j y -V -o log\n\n# Your input file might have multiple columns, use {2} for the 2nd column\n# Sort the largest files first\nls *.txt | xargs -n 1 wc -c | sort -n -r > files.txt\necho 'qarrayrun.py SGE_TASK_ID files.txt sort -o sorted.{2} {2}' | qsub -t 1-$(cat files.txt | wc -l) -cwd -j y -V -o log\n\n# Use the --shell option and quote your pipeline when you need shell redirection\n# Remove blanks before sorting files\nls *.txt > files.txt\necho 'qarrayrun.py --shell SGE_TASK_ID files.txt \"cat {1} | tr -d [:blank:] | sort > sorted.{1}\"' | qsub -t 1-$(cat files.txt | wc -l) -cwd -j y -V -o log\n\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport re\nimport shlex\nimport subprocess\nimport sys\nimport textwrap\n\n\ndef get_file_line(file_name, line_num):\n \"\"\"Returns the line at the specified line number from the specified file.\n\n Parameters\n ----------\n file_name : str\n Path to the file\n line_num : int\n Line number to extract from the file\n\n Returns\n -------\n line : str\n Line at line_num in file or None if the line_num is larger than the number of lines in the file\n\n Raises\n ------\n ValueError if line_num is less than 1\n \"\"\"\n if line_num <= 0:\n raise ValueError(\"line_num must be greater than zero\")\n\n line = \"\"\n line_counter = 0\n with open(file_name) as f:\n for line in f:\n line_counter += 1\n if line_counter == line_num:\n return line\n return None\n\n\ndef substitute_arguments(command_line, arguments):\n \"\"\"Replace the parameter placeholders in a command line with actual arguments.\n\n Parameters\n ----------\n command_line : str\n Command line with parameter placeholders like {1}, {2}, {3}\n arguments : list of str\n List of arguments numbered 0,1,2 ....\n arguments[0] corresponds to placeholder {1}.\n\n Returns\n -------\n command_line : str\n Command line with actual arguments ready for execution\n\n Examples\n --------\n >>> substitute_arguments(\"cmd {0}/{1}/{2} -- {3}{4}\", [\"aa\", \"bb\", \"cc\"])\n 'cmd /aa/bb -- cc'\n \"\"\"\n args = [\"\"] # put an empty string at index 0\n args.extend(arguments)\n\n # Get a list of all the parameter numbers appearing in the command line\n param_nums = re.findall(\"{([0-9]+)}\", command_line)\n param_nums = [int(param_num) for param_num in param_nums]\n\n # Replace the parameters with actual arguments\n for param_num in param_nums:\n placeholder = \"{%s}\" % param_num\n if param_num == 0 or param_num >= len(args):\n command_line = command_line.replace(placeholder, \"\")\n else:\n command_line = command_line.replace(placeholder, args[param_num])\n\n return command_line\n\n\ndef run(argv):\n \"\"\"Run the qarrayrun program with the passed command line arguments.\n\n Parameters\n ----------\n argv : list of str\n List of command line arguments. Usually sys.argv[1:].\n\n Returns\n -------\n The return code of the executed command is passed to sys.exit()\n\n Examples\n --------\n # Setup tests\n >>> os.environ[\"SGE_TASK_ID\"] = \"2\"\n >>> from tempfile import NamedTemporaryFile\n >>> f = NamedTemporaryFile(delete=False, mode='w')\n >>> arg_file = f.name\n >>> print(\"A B C\", file=f)\n >>> print(\"Argument1 Argument2 Argument3\", file=f)\n >>> f.close()\n >>> f = NamedTemporaryFile(delete=False, mode='w')\n >>> out_file = f.name\n\n # Write the arguments in reverse order to out_file\n >>> cmd = \"python -c '\"\n >>> cmd += 'f = open(\"%s\", \"w\");' % out_file\n >>> cmd += 'f.write(\"{3} {2} {1}\"); f.close()'\n >>> cmd += \"'\"\n >>> run([\"SGE_TASK_ID\", arg_file, cmd])\n 0\n\n # Read the file just created to verify reverse order\n >>> f = open(out_file)\n >>> s = f.read(); f.close();\n >>> s\n 'Argument3 Argument2 Argument1'\n\n # Verify non-zero exit code is returned\n >>> run([\"--shell\", \"SGE_TASK_ID\", arg_file, \"exit 100\"])\n 100\n\n # Clean up temp files\n >>> os.unlink(arg_file)\n >>> os.unlink(out_file)\n \"\"\"\n description = textwrap.dedent(\"\"\"\n Executes a single slot of an array job on an HPC computational node. This is\n intended to be used with Sun Grid Engine or Torque job schedulers when every\n instance of the job array runs the same command but with different arguments.\n This script performs the work of looking-up the arguments in a text file and\n substituting those arguments into the command to be executed.\"\"\")\n\n epilog = textwrap.dedent(\"\"\"\n Examples\n --------\n # Sort some txt files, writing the sorted output to new files\n ls *.txt > files.txt\n echo 'qarrayrun SGE_TASK_ID files.txt sort -o sorted.{1} {1}' | qsub -t 1-$(cat files.txt | wc -l) -cwd -j y -V -o log\n\n # Your input file might have multiple columns, use {2} for the 2nd column\n # Sort the largest files first\n ls *.txt | xargs -n 1 wc -c | sort -n -r > files.txt\n echo 'qarrayrun SGE_TASK_ID files.txt sort -o sorted.{2} {2}' | qsub -t 1-$(cat files.txt | wc -l) -cwd -j y -V -o log\n\n # Use the --shell option and quote your pipeline when you need shell redirection\n # Remove blanks before sorting files\n ls *.txt > files.txt\n echo 'qarrayrun --shell SGE_TASK_ID files.txt \"cat {1} | tr -d [:blank:] | sort > sorted.{1}\"' | qsub -t 1-$(cat files.txt | wc -l) -cwd -j y -V -o log\n \"\"\")\n\n formatter_class = argparse.RawDescriptionHelpFormatter\n parser = argparse.ArgumentParser(description=description, epilog=epilog, formatter_class=formatter_class)\n\n parser.add_argument(dest=\"subtask_var\", type=str, metavar=\"NAME\", help=\"\"\"Name of the environment variable that contains the sub-task number.\n You should use SGE_TASK_ID with Grid Engine and PBS_ARRAYID with Torque.\"\"\")\n parser.add_argument(dest=\"array_file\", type=str, help=\"\"\"Name of the file containing the arguments for each sub-task with one line\n per sub-task. This script will extract the arguments for this sub-task\n at the line number identified by the sub-task environment variable\n (SGE_TASK_ID or PBS_ARRAYID). The line is parsed and substituted\n into the command, replacing the parameter placeholders with the actual\n arguments.\"\"\")\n parser.add_argument(dest=\"command\", help=\"\"\"The remainder of the command line is the command to be executed with parameter\n placeholders of the form {1}, {2}, {3} ...\"\"\")\n parser.add_argument(\"--shell\", dest=\"shell\", action=\"store_true\", help=\"Run the command through the shell.\")\n\n args, remainder = parser.parse_known_args(argv)\n command = [args.command] + remainder\n\n # Which sub-task number am I?\n subtask_num = os.environ.get(args.subtask_var)\n if not subtask_num:\n print(\"Error: the %s environment variable is not defined.\" % args.subtask_var, file=sys.stderr)\n exit(1)\n subtask_num = int(subtask_num)\n\n # Read and parse the substitution arguments from the input file\n line = get_file_line(args.array_file, subtask_num)\n if not line:\n exit(0) # Silently ignore attempts to process beyond the end of the file\n arguments = line.split()\n\n # Build the command with substituted arguments\n command_line = ' '.join(command)\n command_line = substitute_arguments(command_line, arguments)\n\n # Execute the command\n if args.shell:\n return_code = subprocess.call(command_line, shell=True)\n else:\n command_split = shlex.split(command_line)\n return_code = subprocess.call(command_split, shell=False)\n return return_code\n\n\ndef main():\n \"\"\"This is the main function which is magically turned into an executable\n qarrayrun script by the setuptools entry_points. See setup.py.\n\n To run this function as a script, first install the package:\n $ python setup.py develop\n or\n $ pip install --user snp-pipeline\n\n\n Parameters\n ----------\n This function must not take any parameters\n\n Returns\n -------\n The return code of the executed command is passed to sys.exit()\n \"\"\"\n return_code = run(sys.argv[1:])\n sys.exit(return_code)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"snppipeline/qarrayrun.py","file_name":"qarrayrun.py","file_ext":"py","file_size_in_byte":9940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"87370147","text":"from base64 import standard_b64encode\n# from base64 import standard_b64decode as decode_base64 - Not used yet\nfrom Crypto.Hash import SHA512\n\n__author__ = 'bren1337'\n\n\nclass StringEditor:\n edit_string = ''\n\n def __init__(self, s):\n \"\"\" Simple init method. \"\"\"\n self.edit_string = s\n\n def display_changes(self):\n \"\"\" Simply prints out the results of the functions. \"\"\"\n t_reversed = self._reverse(self.edit_string)\n t_switched = self._case_switch(self.edit_string)\n t_random = self._string_mix(self.edit_string)\n t_both = self._reverse(self.edit_string)\n t_both = self._case_switch(t_both)\n t_encrypt = self._encryption(self.edit_string)\n\n print('Reversed: ' + t_reversed)\n print('Case switch: ' + t_switched)\n print('Reversed and case switched: ' + t_both)\n print('Input randomly jumbled: ' + t_random)\n print('Encrypted: ' + t_encrypt)\n\n @staticmethod\n def _reverse(string):\n \"\"\" This function reverses the string.\n :rtype : string\n \"\"\"\n\n temp_array = []\n for letter in string:\n temp_array.append(letter)\n\n # Now we have an array of chars, reverse it.\n temp_array = reversed(temp_array)\n\n # Construct message.\n msg = ''\n for element in temp_array:\n msg += element\n\n return msg\n\n @staticmethod\n def _string_mix(string):\n \"\"\" Randomly jumbles the characters.\n :rtype : string\n \"\"\"\n return '--WORK IN PROGRESS--'\n\n @staticmethod\n def _case_switch(string):\n \"\"\" Switches uppercase letters to lowercase letters, and visa-versa.\n :rtype : string\n \"\"\"\n undercase_alphabet = 'abcdefghijklmnopqrstuvwxyz'\n\n # Construct message.\n msg = ''\n for letter in string:\n if letter in undercase_alphabet:\n j = letter.upper()\n msg += j\n else:\n j = letter.lower()\n msg += j\n\n return msg\n\n @staticmethod\n def _encryption(string):\n \"\"\" Run the string through multiple loops of base64 string encryption, other encryptions later.\n :rtype : string\n \"\"\"\n\n # Run it through base64 encryption 7 times.\n msg = string\n for i in range(7):\n msg = standard_b64encode(msg)\n\n # Run the base64*7 encoded message through SHA512*7 encryption.\n for i in range(7):\n msg = SHA512.new(msg).hexdigest()\n\n # base64 message one more time.\n msg = standard_b64encode(msg)\n\n return msg\n\n\ndef start_program():\n user_string = raw_input('Please enter your message to manipulate: ')\n\n se = StringEditor(user_string)\n se.display_changes()\n\nif __name__ == '__main__':\n start_program()\n","sub_path":"Scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"359340635","text":"import requests\n\n\ndef qiwi_pay_check():\n qiwi_number = 'PHONE NUMBER'\n api_access_token = 'API TOKEN' #https://qiwi.com/api\n headers = {'Accept': 'application/json', 'Content-Type': 'application/json', \n 'Authorization': 'Bearer ' + api_access_token}\n try:\n qiwi_autoriz = requests.get('https://edge.qiwi.com/person-profile/v1/profile/current?authInfoEnabled',\n headers=headers)\n qiwi_paymates = requests.get('https://edge.qiwi.com/payment-history/v1/persons/' + str(\n qiwi_number) + '/payments?rows=10&operation=IN', headers=headers) #rows = 10 - количество транзакций; operation=IN входящие транзакции\n last_upd = qiwi_paymates.json()\n for i in range(0, 10):\n try:\n print(last_upd)\n except:\n print('Index out of range')\n except:\n print('Ошибка в подключении или авторизации QIWI_API')\n","sub_path":"QIWI_API.py","file_name":"QIWI_API.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"295078160","text":"import PyECLOUD.myfilemanager as mfm\nimport PyECLOUD.mystyle as ms\n\nimport numpy as np\nfrom scipy.constants import e as qe\n\n# fname = 'Pyecltest_highQmax_100us_3turns.mat'; n_slots_plot = 10500\n\n# fname = 'Pyecltest_example_20eV_1e-13C_0.5us.mat'; n_slots_plot = 700\n# fname = 'Pyecltest_example_20eV_1e-13C_1us.mat'; n_slots_plot = 700\n# fname = 'Pyecltest_example_20eV_1e-13C_2us.mat'; n_slots_plot = 700\n# fname = 'Pyecltest_example_20eV_1e-13C.mat'; n_slots_plot = 700\n# fname = 'Pyecltest_example_0.01eV_1e-13C.mat'; n_slots_plot = 700\nfname = 'Pyecltest.mat'; n_slots_plot = 700\n\n\nTrev = 88.9e-6\n\nob = mfm.myloadmat_to_obj(fname)\n\ncompare_against_charge_in_chamber = False\nplot_charge_from_post_processing = False\n\nimport matplotlib.pyplot as plt\nplt.close('all')\nms.mystyle_arial(fontsz=16)\n\nfig2 = plt.figure(2, figsize=(8,6*1.5))\nfig2.set_facecolor('w')\nsp1 = plt.subplot(3,1,1)\n\nif compare_against_charge_in_chamber:\n sub = ob.Nel_timep[0]\nelse:\n sub = 0.\n\nsp1.plot(ob.t/ob.b_spac, ob.Nel_timep - sub, linewidth=2)\nsp1.set_ylabel('Number of e-\\n[1/m]')\nsp2 = plt.subplot(3,1,2, sharex=sp1)\nsp2.semilogy(ob.t/ob.b_spac, ob.Qpatch_ave, linewidth=2)\nsp2.set_ylabel('Q on the patch\\n[C/m^2]')\nsp3 = plt.subplot(3,1,3, sharex=sp1)\nsp3.plot(ob.t/ob.b_spac, ob.sey_at_emax_patch, linewidth=2)\nsp3.set_ylabel('SEY at Emax\\n(patch)')\n# sp4 = plt.subplot(4,1,4, sharex=sp1)\n# sp4.plot(ob.t/ob.b_spac, ob.lam_t_array)\n\nfrom matplotlib.ticker import MaxNLocator\nfor sp in [sp1, sp2, sp3]:\n if sp is not sp2:\n sp.yaxis.set_major_locator(MaxNLocator(4))\n sp.grid(True)\nsp3.set_xlabel('Time/(25 ns)')\nsp3.set_ylim(1.0, 2.0)\n\nmask_patch = ob.flag_charging>0\nQ_max_patch = np.mean(ob.Q_max_segments[mask_patch])\nsp2.axhline(y = Q_max_patch,\n linestyle='--', color='r', linewidth=2)\nsp2.set_ylim(1e-3*Q_max_patch, 2*Q_max_patch)\n\nsp1.set_ylim(0, 1.4e9)\nfig2.subplots_adjust(\n top=0.95,\n bottom=0.09,\n left=0.15,\n right=0.94,\n hspace=0.38,\n wspace=0.2)\n\nfig1 = plt.figure(1, figsize=(8*2., 6))\nfig1.set_facecolor('w')\nspprof = plt.subplot(1,2,1)\nspprof.plot(ob.xg_hist*1e3, \n (qe/Trev*1000)*np.mean(ob.energ_eV_impact_hist[:n_slots_plot], \n axis=0),\n linewidth=2)\nspprof.set_ylabel('Heat load [a.u.]')\nspprof.set_xlabel('x [mm]')\nspprof.grid(True)\nspprof.set_ylim(0,.10)\n\nhl_left = np.sum((qe/Trev*1000)*ob.energ_eV_impact_hist[:n_slots_plot, ob.xg_hist<0],\n axis=1)\nhl_right = np.sum((qe/Trev*1000)*ob.energ_eV_impact_hist[:n_slots_plot, ob.xg_hist>0],\n axis=1)\nsplr = plt.subplot(1,2,2, sharex=sp1) \nsplr.plot(hl_right, 'r-', linewidth=2)\nsplr.plot(hl_left, 'b-', linewidth=2)\nsplr.set_xlabel('Bunch passage')\nsplr.set_ylabel('Heat load [W/bunch]')\nsplr.grid(True)\nsplr.set_ylim(0., 0.43)\nsp3.set_xlim(0., n_slots_plot)\n\nfig1.subplots_adjust(\n bottom=.14,\n top=.86)\n\n# crosscheck current on patch\nnel_impact_on_patch = np.sum(ob.nel_hist_impact_seg[:, mask_patch], axis=1)\nnel_emit_on_patch = np.sum(ob.nel_hist_emit_seg[:, mask_patch], axis=1)\n\npatch_area = np.sum(ob.L_edg[mask_patch])\n\naccumulated_charge_m2 = -qe*np.cumsum(nel_impact_on_patch - nel_emit_on_patch)/patch_area\nif plot_charge_from_post_processing:\n sp2.plot(accumulated_charge_m2, 'g', linewidth=2)\n\n\nif compare_against_charge_in_chamber:\n set_patch_Vx = set(list(ob.Vx[1:][mask_patch]) + list(ob.Vx[:-1][mask_patch]))\n \n min_x_patch = np.min(list(set_patch_Vx))\n max_x_patch = np.max(list(set_patch_Vx))\n \n mask_xg_patch = np.logical_and(ob.xg_hist>min_x_patch, ob.xg_histxxx')\n return render(request, 'index.html')\n\ndef mainpage(request):\n return render(request,'mainpage.html')\n\n# def addhosts(request):\n# groups = HostGroup.objects.all()\n# return render(request, 'addhosts.html', {'groups': groups})\n\ndef addhosts(request):\n if request.method == 'POST':\n hostname = request.POST.get('hostname')\n ip = request.POST.get('ip')\n group = request.POST.get('group')\n if group:\n # get_or_create返回元组(实例, 0/1)\n hostgroup = HostGroup.objects.get_or_create(groupname=group)[0]\n if hostname and ip:\n hostgroup.host_set.get_or_create(hostname=hostname, ipaddr=ip)\n\n groups = HostGroup.objects.all()\n return render(request, 'addhosts.html', {'groups': groups})\n\ndef addmodules(request):\n if request.method == 'POST':\n module = request.POST.get('module')\n argument = request.POST.get('argument')\n if module:\n mod = Module.objects.get_or_create(module_name=module)[0]\n if argument:\n mod.argument_set.get_or_create(argument_text=argument)\n\n modules = Module.objects.all()\n return render(request, 'addmodules.html', {'modules': modules})\n#\ndef tasks(request):\n hosts = Host.objects.all()\n groups = HostGroup.objects.all()\n modules = Module.objects.all()\n return render(request, 'tasks.html', {'hosts': hosts, 'groups': groups, 'modules': modules})","sub_path":"n5_projects/project2/ktzansible/ktzansi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"526468449","text":"# File containing client classes\n# Imports of external packages\nimport socket\n\n# Class for handling everyting on the client side\nclass Client():\n def __init__(self,serverAddress, serverPort):\n # Set how many bytes to accept from a socket\n self.bufferSize = 4096\n # Inititate a Socket\n self.s = socket.socket()\n # Connect to a socket with a given address and port\n # For now it's localhost per default\n self.s.connect((socket.gethostname(),serverPort))\n # Receive a file that is sent instantly from the server\n self.receiveFile(self.s,'testReceive.txt')\n\n return\n\n # Function for receiving a file from a socket\n # This function assumes that all data is sent in one transmission, ie the file isn't bigger than bufferSize\n def receiveFile(self, socket, filePath):\n # Open or create a file at the given address\n with open(filePath, \"wb\") as f:\n # Receive data from the socket\n bytesRead = socket.recv(self.bufferSize)\n # Write the data to the file\n f.write(bytesRead)\n return True\n\ndef main():\n client = Client('127.0.0.1', 1234)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Bin/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"243060375","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.style.use('ggplot')\n\n# read from the excel file\ndata = pd.read_excel('../data/se0102.xls')\n\nstr_prefectures = data.iloc[9:56,0]\nprefectures = []\nfor str1 in str_prefectures:\n s_list = str1.split()\n str_pref = ''.join(s_list[1:])\n prefectures.append(str_pref)\n # print str_pref\n\ndata_list = []\nfor i in range(1,120,7):\n data_list.append(data.iloc[9:56,i:i+7])\n\nsurvey_year = range(1920,1960,10)\nfor year in range(1955,2017,5):\n survey_year.append( year )\n\n\nfor i, data in enumerate(data_list):\n s1 =data.iloc[:,1]\n s1.index = prefectures\n s1.to_csv('../data/population'+str(survey_year[i])+'.csv',encoding='utf-8')\n\nfor i, data in enumerate(data_list):\n s1 =data.iloc[:,4]\n s1.index = prefectures\n s1.to_csv('../data/ave_family'+str(survey_year[i])+'.csv',encoding='utf-8')\n","sub_path":"essay1_number_of_people_in_a_family/script/make_csv_for_qgis.py","file_name":"make_csv_for_qgis.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"183912327","text":"'''\r\nMain File for retrieving Wikipedia Articles for Reviews\r\n\r\n'''\r\n'''\r\nimports\r\n'''\r\n#import sys\r\n#sys.path.append('C:\\\\Users\\\\Erfaneh\\\\Google Drive\\\\Cochrane\\\\Codes\\\\Cochrane-Examples2\\\\')\r\n#%%\r\nimport numpy as np\r\nimport re\r\nfrom Reads import ReadData\r\nfrom nltk.corpus import stopwords\r\nfrom SimilarityMeasures import Similarity\r\nfrom Text2Vector import TextRepresentaion\r\nfrom Preprocessing import Preprocessing\r\nfrom document_Retrieval import calculateSim_W2R\r\n\r\n\r\n#%%#########################################\r\n\r\n'''\r\nDefine parameters\r\n'''\r\npath_main = \"/home/erfaneh/googledrive/Projects/Cochrane/\"\r\npath_CochraneReviews = path_main+'DataSet/citedCochrane.db'\r\npath_WikipediaArticles = path_main+\"DataSet/cleaned_wikitext.db\"\r\npath_dependency_file = path_main + \"DataSet/dependency.csv\"\r\n\r\ndim = 2500\r\nn_top = 50\r\n\r\n#%%#########################################\r\n\r\n'''\r\nDefine Objects\r\n'''\r\nFileReading = ReadData()\r\nSimilarityMeasures = Similarity()\r\ntext2Vec = TextRepresentaion()\r\nprepFuncs = Preprocessing()\r\n\r\n#%%#########################################\r\n'''\r\nRead Data\r\n'''\r\nstop = set(stopwords.words('english'))\r\ndb_reviews = FileReading.readReviews(path_CochraneReviews)\r\ndb_wikis = FileReading.readWikipedia(path_WikipediaArticles)\r\ndb_wikis = db_wikis.rename(index=str, columns={\"field1\": \"title\", \"field2\": \"id\", \"field3\": \"text\"})\r\ndependencies = FileReading.readDependencyW2R(path_dependency_file)\r\nWV = FileReading.readWV(\"/home/erfaneh/Desktop/Drives/Datasets/WV/glove.6B.\"+str(dim)+\"d.txt\", stop)\r\n\r\n#%%#########################################\r\nPreprocessingFunctions = [prepFuncs.ExtractNouns, prepFuncs.identity] \r\nText2VecFunctions = [text2Vec.tf_idf] #, text2Vec.LDA, text2Vec.doc2Vec] #text2Vec.tf_idf, \r\nSimilarityFunctions = [SimilarityMeasures.cosine_similarity, SimilarityMeasures.manhattan_distance, SimilarityMeasures.euclidean_distance, SimilarityMeasures.kldivergence_distance, SimilarityMeasures.hellinger_distance]\r\n \r\nfor textRep in Text2VecFunctions:\r\n for prepFunc in PreprocessingFunctions:\r\n for n_gram in (1, 2, 3):\r\n print (dim)\r\n matrix_train, matrix_test = textRep(db_reviews, db_wikis, stop, dim, WV, prepFunc, n_gram)\r\n for simMeasure in SimilarityFunctions:\r\n resultFile = path_main + \"Results/W2R_\" + re.split('\\s|\\.', str(textRep))[3] + \"_\" + re.split('\\s|\\.', str(prepFunc))[3] + \"_\" + re.split('\\s|\\.', str(simMeasure))[3] +\"_ngram\"+ str(n_gram) + \"_dim\" + str(dim) + \"_k\" + str(n_top) + \".csv\"\r\n print(resultFile)\r\n if 'distance' not in str(simMeasure):\r\n calculateSim_W2R(db_reviews, db_wikis, matrix_train, matrix_test, stop, dim, simMeasure, np.argmax, dependencies, WV, n_top, resultFile)\r\n else:\r\n calculateSim_W2R(db_reviews, db_wikis, db_reviews, matrix_train, matrix_test, stop, dim, simMeasure, np.argmin, dependencies, WV, n_top, resultFile)\r\n \r\n","sub_path":"Code/Main_W2R.py","file_name":"Main_W2R.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"152566817","text":"import turtle\r\nimport pandas\r\n\r\n\r\nscreen = turtle.Screen()\r\nscreen.title(\"US states game\")\r\nimage = \"blank_states_img.gif\"\r\nscreen.addshape(image)\r\nturtle.shape(image)\r\nstates_data = pandas.read_csv(\"50_states.csv\")\r\nall_states = states_data.state.to_list()\r\nguessed_states = []\r\nscore = 0\r\nwrong_guess = turtle.Turtle()\r\nwrong_guess.hideturtle()\r\nwrong_guess.penup()\r\n\r\nwhile len(guessed_states) < 50:\r\n pen = turtle.Turtle()\r\n pen.up()\r\n pen.hideturtle()\r\n user_input = screen.textinput(title=f\"{len(guessed_states)}/50 State_name\", prompt=\"Type the state name\")\r\n user_input = user_input.title()\r\n\r\n if user_input == \"Exit\":\r\n break\r\n\r\n if user_input in all_states:\r\n wrong_guess.clear()\r\n\r\n # If the state was already guessed\r\n if user_input in guessed_states:\r\n pass\r\n else:\r\n guessed_states.append(user_input)\r\n state_row = states_data[states_data.state == user_input]\r\n x = float(state_row.x)\r\n y = float(state_row.y)\r\n pen.goto(x, y)\r\n pen.down()\r\n pen.write(user_input)\r\n # If the guess is not among the list of states\r\n else:\r\n # Removing text written by a turtle - https://stackoverflow.com/questions/34823206/turtle-delete-writing-on-screen-and-rewrite\r\n wrong_guess.goto(-40,0)\r\n wrong_guess.pendown()\r\n wrong_guess.write(\"Please try again\",font=(\"Arial\",16,\"normal\"))\r\n\r\nfor state in guessed_states:\r\n all_states.remove(state)\r\n\r\nmissed_states = all_states\r\n\r\n# Converting list to a Series since only series and dataframe has to_csv attribute\r\nLearn = pandas.Series(missed_states)\r\nLearn.to_csv(\"States_to_learn.csv\")\r\n\r\nscreen.exitonclick()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"558163293","text":"from itertools import cycle\nimport requests\n\nwith open(\"proxies.txt\", \"r+\") as f:\n\tproxies = [x.strip() for x in f.readlines()]\nproxy_pool = cycle(proxies)\n\n\ndef getWorkingProxy():\n\tproxy = next(proxy_pool)\n\ttry:\n\t\tr = requests.get(\"http://ipinfo.io/json\", proxies={\"http\": \"http://\"+proxy}, timeout=5)\n\t\treturn proxy\n\texcept Exception as e:\n\t\t#proxies.remove(proxy)\n\t\t#proxy_pool.__setattr__(proxy, None)\n\t\treturn getWorkingProxy()\n","sub_path":"proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"125159120","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib import messages\nfrom . forms import UserFormPlus\n\n''' message types\nmessages.debug\nmessages.info\nmessages.success\nmessages.warning\nmessages.error\n'''\n\n# Create your views here.\n\ndef register(request):\n\tif request.method == 'POST':\n\t\tform = UserFormPlus(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\tusername = form.cleaned_data.get('username')\n\t\t\tmessages.success(request, f'Account created for {username}') #send a message to say account was created successfully\n\n\t\t\treturn redirect('blog-home')\n\telse:\t\n\t\tform = UserFormPlus()\n\n\treturn render(request, 'users/register.html', {'form': form})\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"394432077","text":"import copy\n\nfrom pathlib import Path\n\nfrom aiohttp import web\nfrom aiohttp.hdrs import METH_ANY, METH_ALL\nfrom apispec import APISpec\nfrom apispec.ext.marshmallow import MarshmallowPlugin\nfrom jinja2 import Template\n\nfrom .utils import get_path, get_path_keys, issubclass_py37fix\n\nPATHS = {'get', 'put', 'post', 'delete', 'patch'}\nVALID_RESPONSE_FIELDS = {'description', 'headers', 'examples'}\nVALID_RESPONSE_FULL_FIELDS = VALID_RESPONSE_FIELDS.union({'schema'})\n\n\nclass AIOHTTP_APISpec:\n def __init__(self, app, title, version, url='/api_docs/swagger.json',\n swagger_path='/api_docs', static_path='/static/swagger',\n request_data_prefix='validated_body', **kwargs):\n self.plugin = MarshmallowPlugin()\n self.spec = APISpec(plugins=[self.plugin], openapi_version='2.0',\n title=title, version=version, **kwargs)\n\n self.url = url\n self.swagger_path = swagger_path\n self.static_path = static_path\n self._registered = False\n self._request_data_prefix = request_data_prefix\n\n if app is not None:\n self.register(app)\n\n def swagger_dict(self):\n return self.spec.to_dict()\n\n def register(self, app):\n if self._registered is True:\n return\n\n app['_apispec_request_data_prefix'] = self._request_data_prefix\n\n async def doc_routes(app_):\n self._register(app_)\n\n app.on_startup.append(doc_routes)\n self._registered = True\n\n async def swagger_handler(request):\n return web.json_response(request.app['swagger_dict'])\n\n app.router.add_routes([web.get(self.url, swagger_handler)])\n\n if self.swagger_path is not None:\n self.add_swagger_web_page(app, self.static_path, self.swagger_path)\n\n def add_swagger_web_page(self, app, static_path, view_path):\n static_files = Path(__file__).parent / 'static'\n app.router.add_static(static_path, static_files)\n\n with open(str(static_files / 'index.html')) as swagger_template:\n template = Template(swagger_template.read())\n template = template.render(path=self.url, static=static_path)\n\n async def swagger_view(_):\n return web.Response(\n text=template, content_type='text/html'\n )\n\n app.router.add_route('GET', view_path, swagger_view)\n\n def _register(self, app):\n for route in app.router.routes():\n if issubclass_py37fix(route.handler, web.View) and route.method == METH_ANY:\n for attr in dir(route.handler):\n if attr.upper() in METH_ALL:\n view = getattr(route.handler, attr)\n method = attr\n self._register_route(route, method, view)\n\n else:\n method = route.method.lower()\n view = route.handler\n self._register_route(route, method, view)\n\n app['swagger_dict'] = self.swagger_dict()\n\n def _register_route(self, route, method, view):\n if not hasattr(view, '__apispec__'):\n return None\n\n url_path = get_path(route)\n if not url_path:\n return None\n\n self._update_paths(view.__apispec__, method, url_path)\n\n def _update_paths(self, data, method, url_path):\n if method not in PATHS:\n return\n\n # Requests\n if 'requests' in data:\n for location, params in data['requests'].items():\n parameters = self.plugin.openapi.schema2parameters(\n params['schema'], **params['options']\n )\n data['parameters'].extend(parameters)\n\n del data['requests']\n\n # Parameters\n existing = [p['name'] for p in data['parameters'] if p['in'] == 'path']\n data['parameters'].extend(\n {'in': 'path', 'name': path_key, 'required': True, 'type': 'string'}\n for path_key in get_path_keys(url_path)\n if path_key not in existing\n )\n\n # Responses\n if 'responses' in data:\n responses = {}\n for code, params in data['responses'].items():\n if 'schema' in params:\n raw_parameters = self.plugin.openapi.schema2parameters(\n params['schema'], required=params.get('required', False)\n )[0]\n # https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#responseObject\n parameters = {\n k: v\n for k, v in raw_parameters.items()\n if k in VALID_RESPONSE_FULL_FIELDS\n }\n for extra_info in VALID_RESPONSE_FIELDS:\n if extra_info in params:\n parameters[extra_info] = params[extra_info]\n responses[code] = parameters\n\n else:\n responses[code] = params\n\n data['responses'] = responses\n\n operations = copy.deepcopy(data)\n self.spec.path(path=url_path, operations={method: operations})\n\n\ndef setup_apispec(\n app, title='API documentation', version='0.0.1',\n url='/api_docs/swagger.json', swagger_path='/api_docs',\n static_path='/static/swagger', request_data_prefix='validated_',\n **kwargs):\n AIOHTTP_APISpec(app, title, version, url, swagger_path, static_path, request_data_prefix,\n **kwargs)\n","sub_path":"aiohttp_apispec/apispec.py","file_name":"apispec.py","file_ext":"py","file_size_in_byte":5558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"57671198","text":"#!/usr/bin/env python\n\nimport sys, json, os, os.path, datetime\n\nfrom splunklib.modularinput import *\nfrom apiclient.discovery import build\nfrom apiclient.errors import HttpError\n\ndeveloper_key = \"\"\nyoutube_api_service = \"youtube\"\nyoutube_api_version = \"v3\"\n\nclass MyScript(Script):\n \n def get_scheme(self):\n scheme = Scheme(\"YouTube - Channel Videos\")\n\n scheme.description = \"Get videos of a channel\"\n scheme.use_external_validation = False\n scheme.use_single_instance = True\n\n channelid_argument = Argument(\"channelid\")\n channelid_argument.title = \"channel id\"\n channelid_argument.description = \"Enter a channel id\"\n channelid_argument.data_type = Argument.data_type_string\n channelid_argument.required_on_edit = True\n channelid_argument.required_on_create = True\n scheme.add_argument(channelid_argument)\n\n return scheme\n\n def stream_events(self, inputs, ew):\n youtube = build(youtube_api_service, youtube_api_version, developerKey=developer_key)\n\n checkpoint_dir = os.path.join(os.environ['SPLUNK_HOME'], \"etc\", \"apps\", \"youtube_videos\", \"checkpoint_dir\")\n\n for input_name, input_item in inputs.inputs.iteritems():\n channelid = input_item[\"channelid\"]\n checkpoint_file_path = os.path.join(checkpoint_dir, channelid + \".txt\")\n\n file = touchopen(checkpoint_file_path, \"r+\")\n since = file.read().replace('\\n', '')\n file.seek(0)\n\n output = youtube.search().list(part=\"id,snippet\", channelId=channelid, publishedAfter=since).execute()\n\n d = datetime.datetime.utcnow()\n since = str(d.isoformat(\"T\") + \"Z\")\n file.write(since)\n file.truncate()\n file.close()\n\n if len(output[\"items\"]) != 0:\n for video in output[\"items\"]:\n if \"publishedAt\" in video[\"snippet\"]:\n video[\"_time\"] = video[\"snippet\"][\"publishedAt\"]\n \n event = Event()\n event.stanza = input_name\n event.data = str(json.dumps(video))\n ew.write_event(event)\n\n\ndef touchopen(filename, *args, **kwargs):\n fd = os.open(filename, os.O_RDWR | os.O_CREAT)\n return os.fdopen(fd, *args, **kwargs)\n\nif __name__ == \"__main__\":\n sys.exit(MyScript().run(sys.argv))\n","sub_path":"apps/youtube_channel_videos/bin/youtube_channel_videos.py","file_name":"youtube_channel_videos.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"328572396","text":"import os\nimport re\nimport time\n\ninput_path = '/home/dutir/xieyuning/music_crawl/quiet'\ninput_path = '/home/aaa/data_quiet'\noutput_path = '/home/aaa/output/quiet'\n\ndef movefile(file_path):\n filename = file_path+'.txt'\n command = 'cp {} {}'.format(os.path.join(input_path,filename),os.path.join(output_path,file_path))\n print(command)\n os.system(command)\n \n\n\ndef divide(input_path,output_path):\n \n lis = os.listdir(input_path)\n lis.sort()\n #print(lis)\n \n wav_files = [i for i in lis if i.split('.')[-1] == 'wav']\n totle_num = len(wav_files)\n time_init = time.time()\n num = 0\n \n olist = os.listdir(output_path)\n for file in os.listdir(input_path): \n\n try:\n fname = file.split('.')[0]\n if file.split('.')[-1] == 'wav' and fname not in olist:\n time_start=time.time()\n command = 'spleeter separate -i {}/{} -p spleeter:4stems -o {}'.format(input_path,file,output_path)\n print(command)\n \n os.system(command)\n \n movefile(fname)\n num+=1\n time_end=time.time()\n \n print(\"time_consume:\\t\",time_end-time_start)\n print(\"totle_time:\\t\",time_end-time_init)\n print(\"{}/{}\".format(num,totle_num))\n \n \n except:\n continue\ndivide(input_path,output_path)\n","sub_path":"quiet.py","file_name":"quiet.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"232670708","text":"r,s=input().split()\nr=int(r)\ns=int(s)\nl=[]\nfor num1 in range(r+1, s):\n sum = 0\n\n temp = num1\n while temp > 0:\n digit = temp % 10\n sum += digit ** 3\n temp //= 10\n \n if num1 == sum:\n l.append(sum)\nprint(*l)\n","sub_path":"printing amstrong numbers between two integers.py","file_name":"printing amstrong numbers between two integers.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"155331474","text":"\"\"\" from https://github.com/keithito/tacotron \"\"\"\n\n\"\"\"\nDefines the set of symbols used in text input to the model.\n\nThe default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. \"\"\"\n\nfrom text import cmudict, pinyin\n\n_pad = \"_\"\n_punctuation = \"!'(),.:;? \"\n_special = \"-\"\n_letters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"\n_silences = [\"@sp\", \"@spn\", \"@sil\"]\n_japanese = ['ky','sp', 'sh', 'ch', 'ts','ty', 'ry', 'ny', 'by', 'hy', 'gy', 'kw', 'gw', 'kj', 'gj', 'my', 'py','dy']\n# Prepend \"@\" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):\n_arpabet = [\"@\" + s for s in cmudict.valid_symbols]\n_pinyin = [\"@\" + s for s in pinyin.valid_symbols]\n\n# Export all symbols:\nsymbols = (\n [_pad]\n + list(_special)\n + list(_punctuation)\n + list(_letters)\n + _arpabet\n + _pinyin\n + _silences\n + _japanese\n)\n","sub_path":"text/symbols.py","file_name":"symbols.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"244334012","text":"domain_size = 2 \n\nSolverType = \"monolithic_solver_eulerian\"\n#SolverType2 = \"FractionalStep\"\n\nclass FluidSolverConfiguration:\n solver_type = \"SurfaceTension_monolithic_solver\"\n domain_size = 2 \n TurbulenceModel = \"None\"\n\n # Monolithic solver\n class linear_solver_config:\n solver_type = \"Super LU\"\n scaling = False\n \n #convergence criteria settings\n velocity_relative_tolerance = 1E-4\n velocity_absolute_tolerance = 1E-6\n pressure_relative_tolerance = 1E-4\n pressure_absolute_tolerance = 1E-6\n divergence_cleareance_step = 1\n \n #other solver settings\n oss_switch = 0\n compute_reactions = True\n time_order = 2\n predictor_corrector = False\n dynamic_tau = 0.1 \n max_iteration = 20\n laplacian_form = 2\n \n eulerian_model_part = 0\n\n# Monolithic solver\nMonolithic_Linear_Solver =\"MixedUP\"#\"BiConjugate gradient stabilized\"#\nMonolithic_Iterative_Tolerance = 1E-4 # \nMonolithic_Solver_Max_Iteration = 5000\nMonolithic_Preconditioner_type = \"ILU0\"#\"Diagonal\"\n\nVelocity_Linear_Solver=\"BiConjugate gradient stabilized\"\nPressure_Linear_Solver=\"Conjugate gradient\"\nVelocity_Preconditioner_type=\"ILU0\"\nPressure_Preconditioner_type=\"ILU0\"\nVelocity_Iterative_Tolerance=1E-6 \nPressure_Iterative_Tolerance=1E-3 \nVelocity_Solver_Max_Iteration = 5000\nPressure_Solver_Max_Iteration = 1000\n\nTurbulenceModel = \"None\"\n\nvelocity_relative_tolerance = 1E-4 \nvelocity_absolute_tolerance = 1E-6\npressure_relative_tolerance = 1E-4 \npressure_absolute_tolerance = 1E-6\n\ntime_order = 2\npredictor_corrector = False\nmax_iterations = 10\nlaplacian_form = 2 \n\nAutomaticDeltaTime = \"Fixed\"\ndivergence_cleareance_step = 10\nDt = 0.001\nStart_time = 0.0\nmax_time = 1.00\nnsteps = 100 \n\nuse_dt_in_stabilization = 0.10\nuse_orthogonal_subscales = 0\nCalculate_reactions = True\n\ngroups_dictionary = {\n \"Fluid\" : 1,\n }\n\noutput_time = 0.01\noutput_step = 100\nVolumeOutput = True\n\nnodal_results=[\"VELOCITY\",\"PRESSURE\"]\ngauss_points_results=[]\nGiDPostMode = \"Binary\"\nGiDWriteMeshFlag = True\nGiDWriteConditionsFlag = True\n# Add the following line if using vms_monolithic_solver for lagrangian_model_part\nGiDWriteParticlesFlag = False\nGiDMultiFileFlag = \"Multiples\"\n\nproblem_name=\"GDL-51\"\nproblem_path=\"/home/alex/Examples_kratos/ALEX/channel30mm4_vms.gid\"\n\nkratos_path=\"home/alex/kratos\"\n","sub_path":"applications/ULFapplication/test_examples/lagrangian_sessile_droplet/ProjectParameters.py","file_name":"ProjectParameters.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"200163693","text":"import os\nimport re\nimport json\nimport csv\n\n\"\"\" Descripción\n Modulo para optener los datos de los documentos de texto.\n Se encarga de extraer el nombre, numero de servicios, numero de SAPs y numero de puertos de cada 7210\n Recibe un .txt y regresa un array de datos listo para guardar en un .csv\n\"\"\"\n\ndef extract(file_path):\n\n # Diccionario de patrones asociados a un Keyword de las columnas del CSV final.\n patterns = {\n 'Name': 'Target:[A-Za-z\\t_0-9 .]+',\n 'Services': 'show service service-using | match \"Matching\"',\n 'Saps': 'show service sap-using | match \"Number\"',\n 'Ports': 'show port description | match 10/100/G',\n }\n\n # Diccionario donde se van a ir rotando los datos de cada 7210 para a agregarlos ala variable \"out\" mas adelante.\n temp_data = {\n 'Name':\"\",\n 'Services':0,\n 'Saps':0,\n 'Ports':0,\n }\n\n # Lista de Keywords con el orden en el que deben aparecer en el .csv\n keys = ['Name', 'Services', 'Saps', 'Ports']\n\n # Se habre el archivo .csv para extraer los datos\n with open(file_path, 'r') as file:\n # Se separa en lineas el archivo y se guarda en memoria en forma de lista para hacer más facil el indexado\n # Esto es nesesario porque hay que incrementar y disminuir el indice aparte del ciclo principalself.\n # la forma normal de leer el archivo ( for line in file: ) no ofrece suficiente libertad para hacer esto.\n data = file.read().splitlines()\n size = len(data)\n\n # Inizialisacion de la variable que se va a regresar al final del proceso\n out = []\n\n # Inicio de la lectura del archivo\n for index in range(size):\n\n # Condicional para el caso en el que se encuentre el nombre del 7210\n # Se busca encontrar el patron 'Target:[Cualquier numero de caracteres]'\n if re.search(patterns['Name'], data[index]):\n name = re.search(patterns['Name'], data[index]).group().replace('Target:', '')\n # Se guarda el nombre en la variable antes de agregarlo a 'out'\n temp_data['Name'] = name\n # Condicional para cuando se enceuntra el numero de servicios\n # Se busca encontrar el comando 'show service service-using | match \"Matching\"'\n # y leer el valor de la siguiente linea\n elif re.search(patterns['Services'], data[index]):\n # Si no hay servicios asociados no muestra el patron 'Matching Services : [0-9]+' en la siguiente linea 'index+1'\n # por lo que se debe dejar el valor por defecto\n # luego de encontrar el comando se busca en la line siguiente para extraer el numero\n # Si no hay servicios relacionados el comando no muetra nada\n if re.search(r'Matching Services : [0-9]+', data[index+1]):\n # Se extrae el numero de la linea identificada\n num = re.search(r'[0-9]+', data[index+1]).group()\n # se incrementa el index porque ya se revisó esa linea\n index = index + 1\n # Se guarda el nombre en la variable antes de agregarlo a 'out'\n temp_data['Services'] = num\n # Condicional para cuando se encuentra el numero de SAPs\n # Se busca encontrar el comando 'show service sap-using | match \"Number\"'\n # y leer el valor de la siguiente linea\n elif re.search(patterns['Saps'], data[index]):\n # Si no hay servicios asociados no muestra el patron 'Number of SAPs : [0-9]+' en la siguiente linea 'index+1'\n # por lo que se debe dejar el valor por defecto\n # luego de encontrar el comando se busca en la line siguiente para extrar el numero\n # Si no hay SAPs relacionados el comando no muetra nada\n if re.search(r'Number of SAPs : [0-9]+', data[index+1]):\n # Se extrae el numero de la linea identificada\n num = re.search(r'[0-9]+', data[index+1]).group()\n # se incrementa el indexporque y a se revisó\n index = index + 1\n # Se guarda en la variables adntes de agregarlo a 'out'\n temp_data['Saps'] = num\n # Condicional para el conteo de puertos\n # se encuentra primero el comando 'show port description | match 10/100/G'\n # y despues se empiesa a hacer el conteo de lineas hasta que encuentre un espacio menor de 4 caracteres\n elif re.search(patterns['Ports'], data[index]):\n # Variable que guardará el numero de puertos\n acc = 0\n # variables de control del ciclo while\n sw = True\n j=1\n\n # se realiza el conteo de puertos\n while sw:\n if index+j < size and len(data[index+j])>4:\n acc = acc + 1\n else:\n sw = False\n j = j+1\n\n # Se guarda el valor obtenido\n temp_data['Ports'] = acc\n\n # se adapta al formato .csv en la variable out\n temp = [temp_data[i] for i in keys]\n out.append(temp)\n\n # Se carga la variabel temporal con los valores por defecto\n temp_data = {\n 'Name':\"\",\n 'Services':0,\n 'Saps':0,\n 'Ports':0,\n }\n\n return out\n\n\nif __name__ == \"__main__\":\n\n extract('data/131.txt')\n\n\n\n\n\"\"\" Requerimientos\n\n Formato que debe tener el archivo para ser valido:\n\n #\n #Script Name:SERVICIOS_Y_SAP_CUC-VVC\tScript Version:3\tTarget:CUC_CNT_7210_01\n #Status:Successful\tDate:2018/07/16 09:30:38 821\n #Saved Result File Name:script-SERVICIOS_Y_SAP_CUC-VVC.target-CUC_CNT_7210_01.2018-07-16_09-30-38_821.txt.gz\n #Parameters:\n #\n show service service-using | match \"Matching\"\n Matching Services : 80\n show service sap-using | match \"Number\"\n Number of SAPs : 80\n show port description | match 10/100/G -> loop\n 1/1/4 10/100/Gig Ethernet SFP\n 1/1/4 10/100/Gig Ethernet SFP\n 1/1/4 10/100/Gig Ethernet SFP\n ...\n\n #\n\"\"\"\n","sub_path":"data_extractor.py","file_name":"data_extractor.py","file_ext":"py","file_size_in_byte":6344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"117310924","text":"#!/usr/bin/python3\n\nimport filer\n\n\"\"\"Reading and writing from files\nusing standard python features\n\"\"\"\nfs = filer.opener('testfile.txt')\nfs.write('This is the first line\\n')\nfs.write('This is the second line\\n')\nfs.write('This is the third line\\n')\nfs.close()\n\nfiler.print_file('testfile.txt')\n\ntry:\n\tfs.close()\n\tprint(\"File is now closed\")\nexcept IOError:\n\tprint(\"File could not be closed\")\n\n","sub_path":"python3/fileio/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"277875913","text":"'''\nPROBLEM 1\n\nTIME COMPLEXITY: O(N)\nSPACE COMPLEXITY: O(N)\n\n- traverse the nodes in the tree checking the values of their children with values of x and y\n- if either left or right children is equal to x, update X_found=True and X_parent as value of the node\n- at the end of processing nodes at each level, if one of the x and y is found and other is not, return False, if both are found, compare parents of x and y\n'''\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nfrom collections import deque\nclass Solution:\n def isCousins(self, root: TreeNode, x: int, y: int) -> bool:\n X_parent=None\n Y_parent=None\n X_found=False\n Y_found=False\n \n queue=deque()\n queue.append(root)\n \n while(len(queue)!=0):\n size=len(queue)\n \n\n \n for i in range(size):\n popped=queue.popleft()\n \n\n if popped.left!=None:\n queue.append(popped.left)\n if popped.left.val==x:\n X_parent=popped.val\n X_found=True\n elif popped.left.val==y:\n Y_parent=popped.val\n Y_found=True\n if popped.right!=None:\n queue.append(popped.right)\n if popped.right.val==y:\n Y_parent=popped.val\n Y_found=True\n elif popped.right.val==x:\n X_parent=popped.val\n X_found=True\n \n if (X_found==True and Y_found==False) or (X_found==False and Y_found==True):\n return False\n \n if X_found==True and Y_found==True:\n if X_parent==Y_parent:\n return False\n else:\n return True\n\n\n'''\nPROBLEM 2\n\nTIME COMPLEXITY: O(N)\nSPACE COMPLEXITY: O(N)\n\n- traverse the matrix and add all the rotten orange coordinates in the queue\n- process all the elements in the queue at a time and add their neighbours in queue if they are fresh oranges\n- at the end of each iteration, if queue is not empty, then increment the time variable\n'''\n\nfrom collections import deque\nclass Solution:\n def orangesRotting(self, grid: List[List[int]]) -> int:\n if len(grid)==0:\n return 0\n self.time=0\n queue=deque()\n dirs=[[0,1],[0,-1],[1,0],[-1,0]]\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j]==2:\n queue.append([i,j])\n \n while(len(queue)!=0):\n size=len(queue)\n \n for k in range(size):\n popped=queue.popleft()\n\n for d in dirs:\n row=popped[0]+d[0]\n col=popped[1]+d[1]\n\n if row>=0 and row=0 and col int:\n importance=0\n hashmap={}\n for i in employees:\n hashmap[i.id]=i\n # print(hashmap)\n \n queue=deque()\n queue.append(id)\n \n while(len(queue)!=0):\n popped=queue.popleft()\n importance+=hashmap[popped].importance\n for i in hashmap[popped].subordinates:\n queue.append(i)\n return importance","sub_path":"Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":4577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"578299741","text":"#!/usr/bin/env python\n\n##############################################################################\n##\n## This file is part of Sardana\n##\n## http://www.sardana-controls.org/\n##\n## Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain\n##\n## Sardana is free software: you can redistribute it and/or modify\n## it under the terms of the GNU Lesser General Public License as published by\n## the Free Software Foundation, either version 3 of the License, or\n## (at your option) any later version.\n##\n## Sardana is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU Lesser General Public License for more details.\n##\n## You should have received a copy of the GNU Lesser General Public License\n## along with Sardana. If not, see .\n##\n##############################################################################\n\n__docformat__ = 'restructuredtext'\n\nimport sys\n\nimport sardana\nfrom taurus.external.qt import Qt\nfrom taurus.qt.qtgui.container import TaurusWidget\nfrom reflectionslist import ReflectionsList\nfrom reflectionseditor import ReflectionsEditor\nfrom taurus.qt.qtgui.base import TaurusBaseWidget\n\nfrom taurus.external.qt import QtCore, QtGui\n\nfrom taurus.qt.qtgui.container import TaurusWidget\nfrom taurus.qt.qtgui.input import TaurusValueLineEdit\nimport taurus.core\n\nimport taurus.core.util.argparse\nimport taurus.qt.qtgui.application\nfrom taurus.qt.qtgui.util.ui import UILoadable\n\nglobal flag_update\nflag_update = 0\n\nclass PrivateComboBox(Qt.QComboBox, TaurusBaseWidget):\n \"\"\"ComboBox\"\"\"\n\n def __init__(self, parent=None):\n name = self.__class__.__name__\n self.call__init__wo_kw(Qt.QComboBox, parent)\n self.call__init__(TaurusBaseWidget, name)\n self.setSizeAdjustPolicy(Qt.QComboBox.AdjustToContentsOnFirstShow)\n self.setToolTip(\"Choose an item ...\")\n QtCore.QMetaObject.connectSlotsByName(self)\n\n def loadItems(self, items):\n all_items = [self.itemText(i) for i in range(self.count())]\n for crys in items:\n if crys not in all_items:\n self.addItem(crys)\n\n@UILoadable(with_ui=\"_ui\")\nclass UBMatrixBase(TaurusWidget):\n\n def __init__(self, parent=None, designMode=False):\n TaurusWidget.__init__(self, parent, designMode=designMode)\n\n self.loadUi(filename=\"ubmatrix.ui\")\n\n self.connect(self._ui.UpdateButton, Qt.SIGNAL(\n \"clicked()\"), self.update_values)\n self.connect(self._ui.ComputeUButton,\n Qt.SIGNAL(\"clicked()\"), self.compute_ub)\n self.connect(self._ui.ReflectionsListButton, Qt.SIGNAL(\n \"clicked()\"), self.reflections_list_window)\n self.connect(self._ui.EditReflectionsButton, Qt.SIGNAL(\n \"clicked()\"), self.edit_reflections_window)\n self.connect(self._ui.AffineButton,\n Qt.SIGNAL(\"clicked()\"), self.affine)\n self.connect(self._ui.AddCrystalButton, Qt.SIGNAL(\n \"clicked()\"), self.add_select_crystal)\n# self.connect(self._ui.alattice_value, Qt.SIGNAL(\"textEdited()\"), self.on_alattice_value_textEdited)\n# Funciona con puro QEditValue pero no con TaurusQEdit ...\n\n @classmethod\n def getQtDesignerPluginInfo(cls):\n ret = TaurusWidget.getQtDesignerPluginInfo()\n ret['module'] = 'ubmatrix'\n ret['group'] = 'Taurus Containers'\n ret['container'] = ':/designer/frame.png'\n ret['container'] = False\n return ret\n\n def setModel(self, model):\n\n self.model = model\n\n if model != None:\n self.device = taurus.Device(model)\n\n self.update_values()\n \n uxmodel = model + \"/ux\"\n self._ui.taurusuxvalue.setModel(uxmodel)\n self._ui.taurusuxeditvalue.setModel(uxmodel)\n uymodel = model + \"/uy\"\n self._ui.taurusuyvalue.setModel(uymodel)\n self._ui.taurusuyeditvalue.setModel(uymodel)\n uzmodel = model + \"/uz\"\n self._ui.taurusuzvalue.setModel(uzmodel)\n self._ui.taurusuzeditvalue.setModel(uzmodel)\n amodel = model + \"/a\"\n self._ui.taurusalatticevalue.setModel(amodel)\n self._ui.taurusalatticeeditvalue.setModel(amodel)\n bmodel = model + \"/b\"\n self._ui.taurusblatticevalue.setModel(bmodel)\n self._ui.taurusblatticeeditvalue.setModel(bmodel)\n cmodel = model + \"/c\"\n self._ui.taurusclatticevalue.setModel(cmodel)\n self._ui.taurusclatticeeditvalue.setModel(cmodel)\n alphamodel = model + \"/alpha\"\n self._ui.taurusalphalatticevalue.setModel(alphamodel)\n self._ui.taurusalphalatticeeditvalue.setModel(alphamodel)\n betamodel = model + \"/beta\"\n self._ui.taurusbetalatticevalue.setModel(betamodel)\n self._ui.taurusbetalatticeeditvalue.setModel(betamodel)\n gammamodel = model + \"/gamma\"\n self._ui.taurusgammalatticevalue.setModel(gammamodel)\n self._ui.taurusgammalatticeeditvalue.setModel(gammamodel)\n psirefhmodel = model + \"/psirefh\"\n self._ui.taurusvalue_psirefh.setModel(psirefhmodel)\n self._ui.tauruseditvalue_psirefh.setModel(psirefhmodel)\n psirefkmodel = model + \"/psirefk\"\n self._ui.taurusvalue_psirefk.setModel(psirefkmodel)\n self._ui.tauruseditvalue_psirefk.setModel(psirefkmodel)\n psireflmodel = model + \"/psirefl\"\n self._ui.taurusvalue_psirefl.setModel(psireflmodel)\n self._ui.tauruseditvalue_psirefl.setModel(psireflmodel)\n wavelengthmodel = model + \"/wavelength\"\n self._ui.tauruswavelengthvalue.setModel(wavelengthmodel)\n\n # Set model to engine and modes\n\n enginemodel = model + '/engine'\n self._ui.taurusLabelEngine.setModel(enginemodel)\n\n self.enginescombobox = PrivateComboBox(self)\n self.enginescombobox.setGeometry(QtCore.QRect(130, 460, 221, 27))\n self.enginescombobox.setObjectName(\"engineslist\")\n\n self.enginescombobox.loadItems(self.device.enginelist)\n\n self.connect(self.enginescombobox, Qt.SIGNAL(\n \"currentIndexChanged(QString)\"), self.onEngineChanged)\n\n enginemodemodel = model + '/enginemode'\n self._ui.taurusLabelEngineMode.setModel(enginemodemodel)\n\n self.enginemodescombobox = PrivateComboBox(self)\n self.enginemodescombobox.setGeometry(QtCore.QRect(130, 500, 221, 27))\n self.enginemodescombobox.setObjectName(\"enginemodeslist\")\n\n self.enginemodescombobox.loadItems(self.device.enginemodelist)\n\n self.connect(self.enginemodescombobox, Qt.SIGNAL(\n \"currentIndexChanged(QString)\"), self.onModeChanged)\n\n # Set model to crystal\n\n crystalmodel = model + '/crystal'\n self._ui.taurusLabelCrystal.setModel(crystalmodel)\n\n self.crystalscombobox = PrivateComboBox(self)\n self.crystalscombobox.setGeometry(QtCore.QRect(130, 540, 221, 27))\n self.crystalscombobox.setObjectName(\"crystallist\")\n\n self.crystalscombobox.loadItems(self.device.crystallist)\n\n self.connect(self.crystalscombobox, Qt.SIGNAL(\n \"currentIndexChanged(QString)\"), self.onCrystalChanged)\n \n \n def onEngineChanged(self, enginename):\n self.device.write_attribute(\"engine\", str(enginename))\n\n def onModeChanged(self, modename):\n self.device.write_attribute(\"enginemode\", str(modename))\n\n def onCrystalChanged(self, crystalname):\n if str(crystalname) != \"\":\n self.device.write_attribute(\"crystal\", str(crystalname))\n\n\n def update_values(self):\n ub_values = self.device.ubmatrix\n self._ui.taurusub11value.setValue(ub_values[0][0])\n self._ui.taurusub12value.setValue(ub_values[0][1])\n self._ui.taurusub13value.setValue(ub_values[0][2])\n self._ui.taurusub21value.setValue(ub_values[1][0])\n self._ui.taurusub22value.setValue(ub_values[1][1])\n self._ui.taurusub23value.setValue(ub_values[1][2])\n self._ui.taurusub31value.setValue(ub_values[2][0])\n self._ui.taurusub32value.setValue(ub_values[2][1])\n self._ui.taurusub33value.setValue(ub_values[2][2])\n\n global flag_update\n if flag_update:\n all_items = [self.crystalscombobox.itemText(i) for i in range(self.crystalscombobox.count())]\n for crys in self.device.crystallist:\n if crys not in all_items:\n self.crystalscombobox.addItem(crys)\n for i in range(self.crystalscombobox.count()):\n if self.crystalscombobox.itemText(i) not in self.device.crystallist:\n self.crystalscombobox.removeItem(i)\n flag_update = 1\n \n def compute_ub(self):\n index = [0, 1]\n\n self.device.write_attribute(\"computeub\", index)\n self.update_values()\n \n def reflections_list_window(self):\n\n reflections = self.device.reflectionlist\n\n nb_ref = 0\n xindex = 20\n xh = 70\n xk = 150\n xl = 230\n xrelevance = 330\n xaffinement = 380\n xangle1 = 430\n xangle2 = 510\n xangle3 = 590\n xangle4 = 670\n xangle5 = 750\n xangle6 = 830\n yhkl = 100\n w = ReflectionsList()\n\n self.taurusValueIndex = []\n self.taurusValueH = []\n self.taurusValueK = []\n self.taurusValueL = []\n self.taurusValueRelevance = []\n self.taurusValueAffinement = []\n self.taurusValueAngle1 = []\n self.taurusValueAngle2 = []\n self.taurusValueAngle3 = []\n self.taurusValueAngle4 = []\n self.taurusValueAngle5 = []\n self.taurusValueAngle6 = []\n\n if reflections != None:\n for ref in reflections:\n if nb_ref == 0:\n self.rl_label1_7 = QtGui.QLabel(w)\n self.rl_label1_7.setGeometry(\n QtCore.QRect(xangle1 + 20, 70, 51, 20))\n self.rl_label1_7.setObjectName(\"rl_label1_7\")\n # self.testlabel.setLayoutDirection(QtCore.Qt.RightToLeft)\n self.rl_label1_8 = QtGui.QLabel(w)\n self.rl_label1_8.setGeometry(\n QtCore.QRect(xangle2 + 20, 70, 71, 20))\n self.rl_label1_8.setObjectName(\"rl_label1_8\")\n # self.testlabel.setLayoutDirection(QtCore.Qt.RightToLeft)\n self.rl_label1_9 = QtGui.QLabel(w)\n self.rl_label1_9.setGeometry(\n QtCore.QRect(xangle3 + 20, 70, 41, 20))\n self.rl_label1_9.setObjectName(\"rl_label1_9\")\n self.rl_label1_10 = QtGui.QLabel(w)\n self.rl_label1_10.setGeometry(\n QtCore.QRect(xangle4 + 20, 70, 41, 20))\n self.rl_label1_10.setObjectName(\"rl_label1_10\")\n try:\n self.angle_names = self.device.motorroles\n except: # Only for compatibility\n if self.nb_motors == 4:\n self.angles_names.append(\"omega\")\n self.angles_names.append(\"chi\")\n self.angles_names.append(\"phi\")\n self.angles_names.append(\"theta\")\n elif self.nb_motors == 6:\n self.angles_names.append(\"mu\")\n self.angles_names.append(\"th\")\n self.angles_names.append(\"chi\")\n self.angles_names.append(\"phi\")\n self.angles_names.append(\"gamma\")\n self.angles_names.append(\"delta\")\n # 4circles diffractometer\n if len(ref) == 10:\n self.rl_label1_7.setText(QtGui.QApplication.translate(\n \"Form\", self.angle_names[0], None, QtGui.QApplication.UnicodeUTF8))\n self.rl_label1_8.setText(QtGui.QApplication.translate(\n \"Form\", self.angle_names[1], None, QtGui.QApplication.UnicodeUTF8))\n self.rl_label1_9.setText(QtGui.QApplication.translate(\n \"Form\", self.angle_names[2], None, QtGui.QApplication.UnicodeUTF8))\n self.rl_label1_10.setText(QtGui.QApplication.translate(\n \"Form\", self.angle_names[3], None, QtGui.QApplication.UnicodeUTF8))\n # 6 circles diffractometer\n elif len(ref) == 12:\n self.rl_label1_11 = QtGui.QLabel(w)\n self.rl_label1_11.setGeometry(\n QtCore.QRect(xangle5 + 20, 70, 71, 20))\n self.rl_label1_11.setObjectName(\"rl_label1_11\")\n self.rl_label1_12 = QtGui.QLabel(w)\n self.rl_label1_12.setGeometry(\n QtCore.QRect(xangle6 + 20, 70, 41, 20))\n self.rl_label1_12.setObjectName(\"rl_label1_12\")\n self.rl_label1_7.setText(QtGui.QApplication.translate(\n \"Form\", self.angle_names[0], None, QtGui.QApplication.UnicodeUTF8))\n self.rl_label1_8.setText(QtGui.QApplication.translate(\n \"Form\", self.angle_names[1], None, QtGui.QApplication.UnicodeUTF8))\n self.rl_label1_9.setText(QtGui.QApplication.translate(\n \"Form\", self.angle_names[2], None, QtGui.QApplication.UnicodeUTF8))\n self.rl_label1_10.setText(QtGui.QApplication.translate(\n \"Form\", self.angle_names[3], None, QtGui.QApplication.UnicodeUTF8))\n self.rl_label1_11.setText(QtGui.QApplication.translate(\n \"Form\", self.angle_names[4], None, QtGui.QApplication.UnicodeUTF8))\n self.rl_label1_12.setText(QtGui.QApplication.translate(\n \"Form\", self.angle_names[5], None, QtGui.QApplication.UnicodeUTF8))\n\n self.taurusValueIndex.append(TaurusValueLineEdit(w))\n self.taurusValueIndex[nb_ref].setGeometry(\n QtCore.QRect(xindex, 100 + 30 * (nb_ref), 41, 27))\n self.taurusValueIndex[nb_ref].setReadOnly(True)\n indexname = \"taurusValueIndex\" + str(nb_ref + 2)\n self.taurusValueIndex[nb_ref].setObjectName(indexname)\n self.taurusValueIndex[nb_ref].setValue(int(ref[0]))\n\n self.taurusValueH.append(TaurusValueLineEdit(w))\n self.taurusValueH[nb_ref].setGeometry(\n QtCore.QRect(xh, 100 + 30 * (nb_ref), 81, 27))\n self.taurusValueH[nb_ref].setReadOnly(True)\n hname = \"taurusValueH\" + str(nb_ref + 2)\n self.taurusValueH[nb_ref].setObjectName(hname)\n self.taurusValueH[nb_ref].setValue(\"%10.4f\" % ref[1])\n\n self.taurusValueK.append(TaurusValueLineEdit(w))\n self.taurusValueK[nb_ref].setGeometry(\n QtCore.QRect(xk, 100 + 30 * (nb_ref), 81, 27))\n self.taurusValueK[nb_ref].setReadOnly(True)\n kname = \"taurusValueK\" + str(nb_ref + 2)\n self.taurusValueK[nb_ref].setObjectName(kname)\n self.taurusValueK[nb_ref].setValue(\"%10.4f\" % ref[2])\n\n self.taurusValueL.append(TaurusValueLineEdit(w))\n self.taurusValueL[nb_ref].setGeometry(\n QtCore.QRect(xl, 100 + 30 * (nb_ref), 81, 27))\n self.taurusValueL[nb_ref].setReadOnly(True)\n lname = \"taurusValueL\" + str(nb_ref + 2)\n self.taurusValueL[nb_ref].setObjectName(lname)\n self.taurusValueL[nb_ref].setValue(\"%10.4f\" % ref[3])\n\n self.taurusValueRelevance.append(TaurusValueLineEdit(w))\n self.taurusValueRelevance[nb_ref].setGeometry(\n QtCore.QRect(xrelevance, 100 + 30 * (nb_ref), 41, 27))\n self.taurusValueRelevance[nb_ref].setReadOnly(True)\n relevancename = \"taurusValueRelevance\" + str(nb_ref + 2)\n self.taurusValueRelevance[nb_ref].setObjectName(relevancename)\n self.taurusValueRelevance[nb_ref].setValue(int(ref[4]))\n\n self.taurusValueAffinement.append(TaurusValueLineEdit(w))\n self.taurusValueAffinement[nb_ref].setGeometry(\n QtCore.QRect(xaffinement, 100 + 30 * (nb_ref), 41, 27))\n self.taurusValueAffinement[nb_ref].setReadOnly(True)\n affinementname = \"taurusValueAffinement\" + str(nb_ref + 2)\n self.taurusValueAffinement[\n nb_ref].setObjectName(affinementname)\n self.taurusValueAffinement[nb_ref].setValue(int(ref[5]))\n\n self.taurusValueAngle1.append(TaurusValueLineEdit(w))\n self.taurusValueAngle1[nb_ref].setGeometry(\n QtCore.QRect(xangle1, 100 + 30 * (nb_ref), 81, 27))\n self.taurusValueAngle1[nb_ref].setReadOnly(True)\n angle1name = \"taurusValueAngle1\" + str(nb_ref + 2)\n self.taurusValueAngle1[nb_ref].setObjectName(angle1name)\n self.taurusValueAngle1[nb_ref].setValue(\"%10.4f\" % ref[6])\n\n self.taurusValueAngle2.append(TaurusValueLineEdit(w))\n self.taurusValueAngle2[nb_ref].setGeometry(\n QtCore.QRect(xangle2, 100 + 30 * (nb_ref), 81, 27))\n self.taurusValueAngle2[nb_ref].setReadOnly(True)\n angle2name = \"taurusValueAngle2\" + str(nb_ref + 2)\n self.taurusValueAngle2[nb_ref].setObjectName(angle2name)\n self.taurusValueAngle2[nb_ref].setValue(\"%10.4f\" % ref[7])\n\n self.taurusValueAngle3.append(TaurusValueLineEdit(w))\n self.taurusValueAngle3[nb_ref].setGeometry(\n QtCore.QRect(xangle3, 100 + 30 * (nb_ref), 81, 27))\n self.taurusValueAngle3[nb_ref].setReadOnly(True)\n angle3name = \"taurusValueAngle3\" + str(nb_ref + 2)\n self.taurusValueAngle3[nb_ref].setObjectName(angle3name)\n self.taurusValueAngle3[nb_ref].setValue(\"%10.4f\" % ref[8])\n\n self.taurusValueAngle4.append(TaurusValueLineEdit(w))\n self.taurusValueAngle4[nb_ref].setGeometry(\n QtCore.QRect(xangle4, 100 + 30 * (nb_ref), 81, 27))\n self.taurusValueAngle4[nb_ref].setReadOnly(True)\n angle4name = \"taurusValueAngle4\" + str(nb_ref + 2)\n self.taurusValueAngle4[nb_ref].setObjectName(angle4name)\n self.taurusValueAngle4[nb_ref].setValue(\"%10.4f\" % ref[9])\n\n if len(ref) == 12:\n self.taurusValueAngle5.append(TaurusValueLineEdit(w))\n self.taurusValueAngle5[nb_ref].setGeometry(\n QtCore.QRect(xangle5, 100 + 30 * (nb_ref), 81, 27))\n self.taurusValueAngle5[nb_ref].setReadOnly(True)\n angle5name = \"taurusValueAngle5\" + str(nb_ref + 2)\n self.taurusValueAngle5[nb_ref].setObjectName(angle5name)\n self.taurusValueAngle5[nb_ref].setValue(\"%10.4f\" % ref[10])\n\n self.taurusValueAngle6.append(TaurusValueLineEdit(w))\n self.taurusValueAngle6[nb_ref].setGeometry(\n QtCore.QRect(xangle6, 100 + 30 * (nb_ref), 81, 27))\n self.taurusValueAngle6[nb_ref].setReadOnly(True)\n angle6name = \"taurusValueAngle6\" + str(nb_ref + 2)\n self.taurusValueAngle6[nb_ref].setObjectName(angle6name)\n self.taurusValueAngle6[nb_ref].setValue(\"%10.4f\" % ref[11])\n\n nb_ref = nb_ref + 1\n\n w.resize(930, 100 + nb_ref * 50)\n\n else:\n self.rl_label_nor = QtGui.QLabel(w)\n self.rl_label_nor.setGeometry(\n QtCore.QRect(xangle1 - 50, 110, 300, 20))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(75)\n font.setBold(True)\n self.rl_label_nor.setFont(font)\n self.rl_label_nor.setObjectName(\"rl_label_nor\")\n self.rl_label_nor.setText(QtGui.QApplication.translate(\n \"Form\", \"NO REFLECTIONS\", None, QtGui.QApplication.UnicodeUTF8))\n\n w.show()\n w.show()\n\n def edit_reflections_window(self):\n\n w = ReflectionsEditor()\n w.setModel(self.model)\n\n w.show()\n\n def add_select_crystal(self):\n new_crystal = str(self._ui.NewCrystalLineEdit.text())\n self.device.write_attribute(\"AddCrystal\", new_crystal)\n self.crystalscombobox.loadItems(self.device.crystallist)\n self.device.write_attribute(\"Crystal\", new_crystal)\n\n def affine(self):\n self.device.write_attribute(\"affinecrystal\", 1)\n\ndef main():\n\n parser = taurus.core.util.argparse.get_taurus_parser()\n parser.usage = \"%prog \"\n parser.set_description(\n \"a taurus application for setting diffractometer parameters: ubmatrix, lattice, reflections, ...\")\n\n app = taurus.qt.qtgui.application.TaurusApplication(cmd_line_parser=parser,\n app_version=sardana.Release.version)\n app.setApplicationName(\"ubmatrix\")\n args = app.get_command_line_args()\n if len(args) < 1:\n msg = \"model not set (requires diffractometer controller)\"\n parser.error(msg)\n\n w = UBMatrixBase()\n w.model = args[0]\n w.setModel(w.model)\n w.show()\n\n sys.exit(app.exec_())\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/sardana/taurus/qt/qtgui/extra_hkl/ubmatrix.py","file_name":"ubmatrix.py","file_ext":"py","file_size_in_byte":21897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"270963694","text":"import pandas as pd\n\ndf = pd.read_excel('meddoor/procs.xlsx')\ndf = df.dropna()\n\n\nstring_builder = \"INSERT INTO `iba11`.`Procedures`(`procedureName`)VALUES(\"\nfor i, row in df.iterrows():\n str_ = row['Procs'].strip()\n print (string_builder,\"'\"+str_+\"'\",');')","sub_path":"meddoor/data/dummy_data.py","file_name":"dummy_data.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"34791836","text":"import numpy as np\n# from PIL import Image, ImageDraw\nimport cv2\nimport matplotlib.pyplot as plt\n\nfrom utils import helpers\n\nFOLDER = '/home/filipkr/Documents/xjob/videos/test1/'\nCSV_FILE = FOLDER + 'knee-silent_coordinates-IV.csv'\nVIDEO = FOLDER + 'knee-silent.mp4'\nplot = True\nresolution = 600\n\n\ndef annotate(coords, vid):\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n\n width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)) # or .get(3)\n height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)) # or .get(4)\n fps = int(np.round(vid.get(cv2.CAP_PROP_FPS)))\n vid_new = cv2.VideoWriter(FOLDER + 'annotated-adj.avi', fourcc, fps,\n (width, height))\n\n success, im = vid.read()\n count = 1\n\n while success:\n if count % 10 == 0:\n print('frame:', count)\n print('expected:', coords[-1, 0])\n\n im = cv2.flip(im, 0)\n\n for i in range(1, len(coords[1, :]), 2): # len -1?\n # print('x', coords[count, :])\n # print('x', coords[count + 1, :])\n # print('y', coords[count, i + 1] * height)\n y = int(np.round((coords[count, i]) * height))\n x = int(np.round((coords[count, i + 1] + 0.025) * width))\n\n im = cv2.drawMarker(im, (x, y), (255, 0, 0),\n markerType=cv2.MARKER_CROSS,\n markerSize=10, thickness=5)\n\n vid_new.write(im)\n if plot and count < 3:\n plt.imshow(im)\n plt.show()\n success, im = vid.read()\n count += 1\n\n vid_new.release()\n\n # print(vid_new)\n\n\ndef main():\n coords = np.genfromtxt(CSV_FILE, delimiter=',')\n vid = cv2.VideoCapture(VIDEO)\n\n annotate(coords, vid)\n\n print(coords.shape)\n print(coords[1, :])\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"fix-annotate.py","file_name":"fix-annotate.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"495755844","text":"'''\n厨房里总共有 n 个橘子,你决定每一天选择如下方式之一吃这些橘子:\n吃掉一个橘子。\n如果剩余橘子数 n 能被 2 整除,那么你可以吃掉 n/2 个橘子。\n如果剩余橘子数 n 能被 3 整除,那么你可以吃掉 2*(n/3) 个橘子。\n每天你只能从以上 3 种方案中选择一种方案。\n请你返回吃掉所有 n 个橘子的最少天数。\n\n示例 1:\n输入:n = 10\n输出:4\n解释:你总共有 10 个橘子。\n第 1 天:吃 1 个橘子,剩余橘子数 10 - 1 = 9。\n第 2 天:吃 6 个橘子,剩余橘子数 9 - 2*(9/3) = 9 - 6 = 3。(9 可以被 3 整除)\n第 3 天:吃 2 个橘子,剩余橘子数 3 - 2*(3/3) = 3 - 2 = 1。\n第 4 天:吃掉最后 1 个橘子,剩余橘子数 1 - 1 = 0。\n你需要至少 4 天吃掉 10 个橘子。\n\n示例 2:\n输入:n = 6\n输出:3\n解释:你总共有 6 个橘子。\n第 1 天:吃 3 个橘子,剩余橘子数 6 - 6/2 = 6 - 3 = 3。(6 可以被 2 整除)\n第 2 天:吃 2 个橘子,剩余橘子数 3 - 2*(3/3) = 3 - 2 = 1。(3 可以被 3 整除)\n第 3 天:吃掉剩余 1 个橘子,剩余橘子数 1 - 1 = 0。\n你至少需要 3 天吃掉 6 个橘子。\n\n示例 3:\n输入:n = 1\n输出:1\n\n示例 4:\n输入:n = 56\n输出:6\n\n提示:\n1 <= n <= 2*10^9\n'''\n\n\nclass Solution:\n # @lru_cache(None)\n def minDays(self, n: int) -> int:\n # 方法一:\n # 将中间计算结果保存在字典,计算前查询是否有该结果,有则直接获取,无则min一下\n # 相比方法二,又避免了一些重复性计算\n # 1)带存储结果,输出字典种的结果\n # days = {'0': 0, '1': 1}\n # self.getAns(n, days)\n # print(days)\n # return days[str(n)]\n # 2)带存储结果,直接输出最后的结果,不从字典获取\n # days = {'0': 0, '1': 1}\n # return self.getAns(n, days)\n # 方法二:\n # 不需要另外的函数,直接递归(leetcode平台会TLE)\n # 如果采用缓存装饰器则不会TLE,即在方法前加上一行@lru_cache(None)\n # 依然存在部分重复计算比如n==18时,会重复计算n=3的情况\n if n<=1:\n return n\n return 1+min(self.minDays(n//2)+(n%2), self.minDays(n//3)+(n%3))\n\n def getAns(self, n, dic):\n mins = n\n if str(n) in dic:\n return dic[str(n)]\n mins = min(mins, self.getAns(n//2, dic)+(n % 2))\n mins = min(mins, self.getAns(n//3, dic)+(n % 3))\n mins += 1\n dic[str(n)] = mins\n return mins\n # 最初的解法也是先到dp,但是这种dp方法将每一种n全部计算一次,很容易出现TLE,\n # 而实际计算时,有很多无效的n存在\n # 比如n=16时,只需要计算n=1,2,3,5,8,16;\n # n=32时,仅需计算n=1,2,3,4,5,8,10,16,32.\n # 大大节约了时间(答案集中会测试n=89084693)\n # def minDays(self, n: int) -> int:\n # dp = [0, 1, 2, 2]\n # i = 4\n # while i <= n:\n # tmp = n\n # if i % 2 == 0 and i % 3 == 0:\n # tmp = min(tmp, dp[i-1]+1, dp[i//2]+1, dp[i//3]+1)\n # elif i % 2 == 0 and i % 3 != 0:\n # tmp = min(tmp, dp[i-1]+1, dp[i//2]+1)\n # elif i % 2 != 0 and i % 3 == 0:\n # tmp = min(tmp, dp[i-1]+1, dp[i//3]+1)\n # else:\n # tmp = min(tmp, dp[i-1]+1)\n # dp.append(tmp)\n # i+=1\n # return dp[n]\n\n\ns = Solution()\nn = 89084693\nprint(s.minDays(n))\n","sub_path":"周赛/202/5490.吃掉n个橘子最少天数.py","file_name":"5490.吃掉n个橘子最少天数.py","file_ext":"py","file_size_in_byte":3566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"80931206","text":"# -*- coding:utf-8 -*-\n\"\"\"\nspider编写规则\n1 spider必须继承自StructureSpider\n2 若执行分类抓取则需要配置item_pattern = (), page_pattern = (),其中:\n 1)item_pattern元组中的元素为分类页中每个item链接所对应的xpath表达式\n 2)page_pattern元组中的元素可以是下一页链接所所对应的正则表达式,或者其它规则,详见https://github.com/ShichaoMa/webWalker/wiki/%E9%80%9A%E7%94%A8%E6%8A%93%E5%8F%96%E9%85%8D%E7%BD%AE%E6%96%B9%E6%B3%95\n3 提供get_base_loader静态方法,传入response,返回CustomLoader对应一个Item\n4 提供enrich_data方法,必须被enrich_wrapper装饰,其中:\n 1)传入item_loader, response,使用item_loader的方法(add_value, add_xpath, add_re)添加要抓取的属性名及其对应表达式或值。\n 2)如该次渲染需要产生新的请求,则通过response.meta[\"item_collector\"]提供的(add, extend)方法,\n 则将(prop, item_loader, request_meta)添加到item_collector中。其中:\n prop: 指下次请求获取的全部字段如果做为一个子item返回时,子item在其父item中对应的字段名称。\n item_loader: 用来抽取下次请求中字段的item_loader,如果下次请求返回子item,则此item_loader与父级item_loader不同\n request_meta: 组建request所需要的kwargs type: dict\n4 下次请求所回调的enrich函数名称为 enrich_`prop`\n\"\"\"\nfrom . import JaySpider\nfrom .utils import CustomLoader, enrich_wrapper\nfrom ..items.nixon_item import NixonItem\n\n\nclass NixonSpider(JaySpider):\n name = \"nixon\"\n # 分类抓取的时候使用这两个属性\n item_xpath = ('//ul/li/div/div/a/@href',)\n page_xpath = ('start=0',)\n\n custom_settings = {\n \"ITEM_PIPELINES\": {\n 'crawling.pipelines.KafkaPipeline': None if JaySpider.debug else 100,\n },\n }\n\n @staticmethod\n def get_base_loader(response):\n return CustomLoader(item=NixonItem())\n\n @enrich_wrapper\n def enrich_data(self, item_loader, response):\n item_loader.add_xpath(\"product_id\", '//span[@itemprop=\"productID\"]/text()')\n item_loader.add_xpath(\"part_number\", '//span[@itemprop=\"productID\"]/text()')\n item_loader.add_xpath(\"price\", '//span[@id=\"price-sales\"]/text()')\n item_loader.add_xpath(\"colors\", '//ul/li/a/@data-lgimg')\n item_loader.add_xpath(\"feature\", '//dl[@class=\"pdp-features_list\"]')\n item_loader.add_xpath(\"spec\", '//ul[@class=\"product-attributes-group_list\"]')\n item_loader.add_xpath(\"image_urls\", '//img[@class=\"productthumbnail\"]/@data-lgimg')\n\n def need_duplicate(self, url):\n return url","sub_path":"crawling_back/spiders/nixon_spider.py","file_name":"nixon_spider.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"59490237","text":"class Solution:\n def exist(self, board, word):\n def dfs(pos, coords):\n coords.add(pos)\n if len(coords) == len(word): return True\n x, y = pos\n next_letter = word[len(coords)]\n found = False\n if x > 0 and (x-1, y) not in coords and board[y][x-1] == next_letter: found = found or dfs((x-1, y), coords.copy())\n if x < len(board[0]) - 1 and (x+1, y) not in coords and board[y][x+1] == next_letter: found = found or dfs((x+1, y), coords.copy())\n if y > 0 and (x, y-1) not in coords and board[y-1][x] == next_letter: found = found or dfs((x, y-1), coords.copy())\n if y < len(board) - 1 and (x, y+1) not in coords and board[y+1][x] == next_letter: found = found or dfs((x, y+1), coords.copy())\n return found\n\n return any([dfs((i, j), set()) for j, row in enumerate(board) for i, letter in enumerate(row) if letter == word[0]])\n \ns = Solution()\n#print(s.exist([[\"A\",\"B\",\"C\",\"E\"],[\"S\",\"F\",\"C\",\"S\"],[\"A\",\"D\",\"E\",\"E\"]], \"ABCB\"))\nprint(s.exist([[\"A\",\"B\",\"C\",\"E\"],[\"S\",\"F\",\"E\",\"S\"],[\"A\",\"D\",\"E\",\"E\"]], \"ABCESEEEFS\"))\n","sub_path":"WordSearch.py","file_name":"WordSearch.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"233756462","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport shelter\nimport samplers\nimport pdb\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom shelter import xmid2p, xmid1p\n\nimport matplotlib\nmatplotlib.rcParams['text.usetex'] = True\nplt.rc('text.latex', preamble=r'\\usepackage{amsmath, amssymb}')\n \n\n\n# =============================================================================================\n# =============================================================================================\n\n\n\nclass shelterEmulator:\n def __init__(self, nstakes, normalized=True):\n '''\n Constructs an M-dimensional emulator of trekking pole shelter quality metrics, \n using gaussian process regression to estimate the mean and covaraince of prediciton\n sites, and monte carlo methods to obtain confidence intervals. This class is built\n particularly for shelters which exhibit reflection symmetry in 2-dimensions, \n elsewhere in this repository 'called biradial' shelters.\n\n Parameters\n ----------\n nstakes : int\n Number of stakes for the shelter models that will be emulated\n normalized : bool\n whether or not to expect all shelters to be normalized in the x-dimension\n by util.normalize_footprint\n '''\n\n assert(nstakes % 4 == 0), 'number of stakes must be divisible by 4 for biradial models'\n # compute number of dimensions for free-parameter space\n # +2 DOF per each stakes, and one pole (remainder set by symmetry)\n # -1 for the normalization condition on the footprint width\n self.M = int(2 * (nstakes / 4 + 1) - 1)\n \n self.normalized = normalized\n self.nstakes = nstakes\n \n # define data positions and quality metrics\n # one data point on parameter space will take the form [sy, sx, sy, sx, sy,.... px, py]\n # where sy,x are cartesian positions of stakes, and px,y are that of the pole data shall \n # be sorted such that the normalized position sx' is removed after obtaining from \n # shelter.get_free_params \n self.x = None\n self.q = None\n \n # define GP grid, posterior, covariance\n self.prediction_grid = None\n self.gp_posterior = None\n self.gp_cov = None\n\n # ----------------------------------------------------------------------------------------\n\n def sample_space(self, N, method = 'random'):\n '''\n Samples parameter space; each sample is a shelter with a particular geometry, and each\n sample output is the shelter quality metric.\n\n Parameters\n ----------\n N : int\n The number of samples to draw\n method : string\n The sampling method to use\n '''\n\n if(method == 'random'):\n all_x, self.q = samplers.random_sampling(self.nstakes, N, normed=True)\n self.noise = np.zeros(len(self.q))\n \n # transform into format described in constructor\n self.x = np.zeros((N, self.M))\n for i in range(N):\n this_x = all_x[i]\n \n # find the stake with largest x-value for each sample (this one is normalized,\n # and thus needs to be removed from the free parameters)\n norm_stake = np.argmax(all_x[i,:-1,0])\n \n # move stake with largest x-value to the front of the array\n this_x[[0, norm_stake]] = this_x[[norm_stake, 0]]\n \n # clip normalized stake and add this point in parameter space to class attribute\n self.x[i] = np.ravel(all_x[i])[1:]\n pdb.set_trace() \n \n else:\n pass\n \n # ----------------------------------------------------------------------------------------\n \n def build_gp_grid(self, n, dim_limits=None):\n '''\n Builds the M-dimensional prediciton grid over which the emulator will return results\n\n Parameters\n ----------\n n : int\n number of grid points in each dimension; grid will be composed of (ndim^M) total points\n dim_limits : (M,2) array, optional\n grid limits on each dimension\n '''\n \n \n if(dim_limits is None and self.normalized == True):\n dim_limits = np.vstack([np.zeros(self.M), np.ones(self.M)]).T * 50\n\n numpoints = int(n ** self.M)\n arrays = np.zeros((self.M, n))\n \n print('building GP grid with {} points'.format(numpoints))\n for i in range(self.M):\n arrays[i] = np.linspace(int(dim_limits[i][0]), int(dim_limits[i][1]), n)\n \n grid_dims = np.meshgrid(*arrays)\n self.prediction_grid = np.array([dim.flatten() for dim in grid_dims]).T\n\n\n def view_grid():\n return\n\n\n def run_gp_regression(tau=None, l=None):\n '''\n Runs a Gaussian Process regression in the free parameter space with data sampled from \n self.sample_space()\n\n Parameters\n ----------\n tau : (M,) float array\n variance parameter for the Gaussian kernel\n l : (M,) float array\n scale parameter for the Gaussian kernel\n ''' \n \n assert(self.x is not None), 'Must fist sample parameter space'\n \n # define kernel and prior functions\n # use these values for the kernal params if not passed; informed by runs with \n # 4 stakes, biradial symmetry, and normalized parameters\n if(tau is None):\n tau = np.ones(self.M) * 0.1\n if(l is None):\n l = np.ones(self.M) * 0.01\n self.kernel = lambda x,xp: kernels.sqExpNd(x, xp, tau=tau, l=l)\n self.flat_prior = lambda x: np.zeros((x.shape[0]))\n \n print('running GP regression with hyperparameters tau={}, l={}'.format(tau, l))\n gp_result = GP.run_gp_regression(self.flat_prior, self.kernel, self.x, self.q,\n self.prediction_grid, self.noise, plot=vis_result, plot_sfx=plot_sfx)\n self.gp_posterior = gp_result[0]\n self.gp_cov = gp_result[1]\n\n \n \n\n\n def mc_confidence():\n return\n\nif __name__ == '__main__':\n ff = shelterEmulator(4)\n ff.build_gp_grid(10)\n ff.sample_space(100)\n \n\n\n\n \ndef random_sampling(nstakes, N, symmetry = 'biradial'):\n\n if(symmetry == 'biradial'):\n assert(nstakes % 4 == 0), 'nstakes must be divisible by 4'\n free_stakes = int((nstakes/4))\n if(symmetry == 'bilateral'): \n assert(nstakes % 2 == 0), 'nstakes must be divisible by 2'\n free_stakes = int((nstakes/2))\n \n g = shelter.shelter(nstakes, symmetry=symmetry)\n sx = np.zeros((N, free_stakes))\n sy = np.zeros((N, free_stakes))\n px = np.zeros(N)\n py = np.zeros(N)\n ve = np.zeros(N)\n wp = np.zeros(N)\n pp = np.zeros(N)\n\n for i in range(N):\n if(i%100 == 0): print(i)\n g.sample_footprint()\n g.sample_poles()\n g.pitch()\n fp = g.get_free_params()\n sx[i] = fp[0][:,0]\n sy[i]= fp[0][:,1] / sx[i]\n px[i]= fp[1][0] / sx[i]\n py[i]= fp[1][1] / (sy[i] * sx[i])\n ve[i] = shelter.compute_volumetric_efficiency(g)\n wp[i] = shelter.compute_weather_performance(g)\n pp = (ve/np.max(ve)) * (wp/np.max(wp))\n \n f = plt.figure(figsize=(10,8))\n ax4 = f.add_subplot(221, projection='3d')\n ax5 = f.add_subplot(222, projection='3d')\n ax6 = f.add_subplot(223, projection='3d')\n \n sctr4 = ax4.scatter(px, py, sy[:,0], c=ve, marker='o', cmap=plt.cm.viridis, alpha=0.5)\n sctr5 = ax5.scatter(px, py, sy[:,0], c=wp, marker='o', cmap=plt.cm.viridis, alpha=0.5)\n sctr6 = ax6.scatter(px, py, sy[:,0], c=pp, marker='o', cmap=plt.cm.viridis, alpha=0.5)\n\n scatters = [sctr4, sctr5, sctr6]\n metrics = [r'$\\epsilon_V$', r'$P_W$', r'$\\epsilon_VP_W$']\n axes = [ax4, ax5, ax6]\n xmid_s = 60\n xmid_lw = 2\n \n for i in range(len(axes)):\n axes[i].scatter([xmid1p.norm_px], [xmid1p.norm_py], [xmid1p.norm_y], color='r', marker='x', s=xmid_s, lw=xmid_lw)\n axes[i].scatter([xmid2p.norm_px], [xmid2p.norm_py], [xmid2p,norm_y], color='r', marker='^', s=xmid_s, lw=xmid_lw)\n axes[i].set_xlabel(r'$p_x / s_x$', fontsize=14)\n axes[i].set_ylabel(r'$p_y / s_y$', fontsize=14)\n axes[i].set_zlabel(r'$s_y / s_x$', fontsize=14)\n cbar = f.colorbar(scatters[i], ax=axes[i])\n cbar.set_label(metrics[i], fontsize=14)\n plt.tight_layout()\n plt.show()\n \nif __name__ == '__main__':\n random_sampling(4, 1000)\n","sub_path":"final_project/emulator.py","file_name":"emulator.py","file_ext":"py","file_size_in_byte":8663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"240885723","text":"# write code update word by word in a dictionary edit distance threshold\nimport os\nfrom argparse import ArgumentParser\nfrom pathlib import Path\n\nimport editdistance\nimport numpy as np\nfrom tqdm.auto import tqdm\n\n\nDICT_PATH = os.path.join(os.path.dirname(__file__), 'dict-vi.txt')\nDICT_PATH2 = os.path.join(os.path.dirname(__file__), 'brand.txt')\n# DICT_PATH = f\"./dict.txt\"\nprint(DICT_PATH)\n\n\ndef update_dict(dict_paths):\n \"\"\"\n update dictionary\n \"\"\"\n word_dictionary = []\n for dict_path in dict_paths:\n with open(Path(dict_path), 'rt') as f:\n for line in f.readlines():\n words = line.split()\n words = list(map(str.upper, words))\n word_dictionary.extend(words)\n return word_dictionary\n\n\ndef update_word_from_dictionary(word, dictionary, threshold=0.3):\n word = word.upper()\n distances = np.array([editdistance.eval(\n word, dict_word) / len(dict_word) for dict_word in dictionary])\n min_index = np.argmin(distances)\n min_val = np.min(distances)\n if min_val < threshold: # smaller distance is better\n return dictionary[min_index], min_val, True\n return word, min_val, False\n\n\nif __name__ == \"__main__\":\n # example usage:\n # python update_dict.py ./data/* --threshold 0.3\n\n word_dictionary = update_dict([DICT_PATH, DICT_PATH2])\n\n parser = ArgumentParser()\n parser.add_argument('--txt-paths', nargs='+', type=Path)\n parser.add_argument('--threshold', type=float, default=0.113)\n parser.add_argument('--output-dir', type=Path, default='outputs')\n args = parser.parse_args()\n txt_paths = args.txt_paths\n output_dir = Path(args.output_dir)\n output_dir.mkdir(exist_ok=True, parents=True)\n\n cache = {}\n print('read dict completaly')\n replace_word_counts = 0\n for txt_path in tqdm(txt_paths):\n output_path = output_dir / txt_path.name\n with open(txt_path, 'rt', encoding='utf8') as fi, open(output_path, 'wt', encoding='utf8') as fo:\n for line in fi.readlines():\n splits = line.strip().split(',')\n text = ''.join(splits[8:])\n update_stat = False\n if text in cache:\n new_text, distance, update_stat = cache[text]\n else:\n new_text, distance, update_stat = update_word_from_dictionary(\n text, word_dictionary, args.threshold)\n cache[text] = (new_text, distance, update_stat)\n if update_stat:\n replace_word_counts += 1\n # if new_text != text:\n # print('[{}] \"{}\" to \"{}\". Distance = {}'.format(\n # txt_path.name, text, new_text, distance))\n\n out_line = ','.join(splits[:8]) + f',{new_text}\\n'\n fo.write(out_line)\n\n print(f'Total replace words: {replace_word_counts}')\n","sub_path":"external/paddleocr/transformer/vietocr/postprocess/update_dict.py","file_name":"update_dict.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"563065192","text":"import math\nimport collections\nimport heapq\nimport bisect\nimport functools\nimport string\n\n\ndef solve(a, n):\n a.sort()\n i, j = 0, 1\n m = 0\n while j < n:\n while a[j] - 3 * a[i] > 0:\n i += 1\n m = max(m, j - i + 1)\n j += 1\n return n - m\n\n\nif __name__ == '__main__':\n n = int(input())\n a = list(map(int, input().split()))\n res = solve(a, n)\n print(res)\n","sub_path":"hackerearth/2022/alg/dp/chang-and-the-mathematical-brainbuster.py","file_name":"chang-and-the-mathematical-brainbuster.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"284190463","text":"import sys\nsys.setrecursionlimit(1 << 20)\nINF = float('inf')\n\n\ndef read_int_list():\n return list(map(int, input().split()))\n\n\ndef read_ints():\n return map(int, input().split())\n\n\ndef main():\n N = int(input())\n A = read_int_list()\n x = []\n sign = 1\n x0 = 0\n for a in A:\n x0 += sign * a\n sign *= -1\n x.append(x0)\n for i, a in enumerate(A[:-1]):\n x.append(2 * a - x[i])\n print(*x)\n\n\nmain()\n","sub_path":"abc/abc133d.py","file_name":"abc133d.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"627754543","text":"import socket # To manage sockets\nimport select # To manage many connections on any system\nimport errno\nimport sys\n\nHEADER_LENGTH = 10\n\nIP = \"\"\nPORT = \"\"\n\nwhile (len(IP) and len(PORT)) <= 0:\n IP = input(\"IP: \")\n PORT = input(\"PORT: \")\n\nprint('Please type your username below')\nwhile True:\n my_username = input(\"Username: \")\n if len(my_username) > 0:\n break\n else:\n print('Invalid username')\n\naddressfamily = socket.AF_INET # IPv4\nconnection = socket.SOCK_STREAM # TCP\n\n# Socket created CLIENT\nclient_socket = socket.socket(addressfamily, connection)\nclient_socket.connect((IP, PORT))\nclient_socket.setblocking(False)\n\n# Username set\nusername = my_username.encode('utf-8')\nusername_header = f\"{len(username):<{HEADER_LENGTH}}\".encode('utf-8')\nclient_socket.send(username_header + username)\n\nwhile True:\n\n message = input('{} > '.format(my_username))\n\n # Checks message is not empty\n if len(message) != 0:\n\n # Encode message to bytes, prepare header and convert to bytes, like for username above, then send\n message = message.encode('utf-8')\n message_header = f\"{len(message):<{HEADER_LENGTH}}\".encode('utf-8')\n client_socket.send(message_header + message)\n\n try:\n # Get and print received messages\n while True:\n\n # Receive our \"header\" containing username length, it's size is defined and constant\n username_header = client_socket.recv(HEADER_LENGTH)\n\n if not len(username_header):\n print('Data not received')\n print('Connection closed by the server')\n sys.exit()\n\n # Convert, decode, recive username\n username_length = int(username_header.decode('utf-8').strip())\n username = client_socket.recv(username_length).decode('utf-8')\n\n # Decode, recive message\n message_header = client_socket.recv(HEADER_LENGTH)\n message_length = int(message_header.decode('utf-8').strip())\n message = client_socket.recv(message_length).decode('utf-8')\n\n # Print username and respective message\n print('{} > {}'.format(username, message))\n\n\n #Exceptions\n except IOError as e:\n if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:\n print('Reading error: {}'.format(str(e)))\n sys.exit()\n\n # We just did not receive anything\n continue\n\n except Exception as e:\n # Exception, something happened, then exit\n print('Reading error: {}'.format(str(e)))\n sys.exit()\n\n except KeyboardInterrupt as e:\n print('Reading error: {}'.format(str(e)))\n sys.exit()\n","sub_path":"Laboratorio1/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"52024327","text":"import random\nfrom typing import Dict, List\nimport pandas as pd\n\nfrom football.players import Player, Goalkeeper, Defender, Midfielder, Attacker\nfrom football.names_lists import town_names\n\nteam_suffix = ['Town', 'Rovers', 'United', 'City', '', 'Wanderers',\n 'F.C.']\n\n\ndef team_dataframe(gks: List[Player], defs: List[Player], mids: List[Player], atts: List[Player]) -> pd.DataFrame:\n\n players = gks + defs + mids + atts\n cols = ['Name', 'DOB', 'Age', 'Nationality', 'Position',\n 'Foot', 'Rating', 'Value']\n df = pd.DataFrame(columns=cols)\n\n for i, player in enumerate(players):\n df.loc[i, 'Name'] = player.name\n df.loc[i, 'DOB'] = player.dob\n df.loc[i, 'Age'] = player.age\n df.loc[i, 'Nationality'] = player.nationality\n df.loc[i, 'Position'] = player.position\n df.loc[i, 'Foot'] = player.foot\n df.loc[i, 'Rating'] = player.rating\n df.loc[i, 'Value'] = player.value\n\n return df\n\n\nclass Team:\n\n def __init__(self,\n league=None):\n\n self.__name = random.choice(town_names) + ' ' + random.choice(team_suffix)\n self.__rating = random.randint(1, 10)/10.0\n self.__gks = [Goalkeeper(team=self.__name, team_rating=self.__rating)\n for i in range(3)]\n self.__defs = [Defender(team=self.__name, team_rating=self.__rating)\n for i in range(8)]\n self.__mids = [Midfielder(team=self.__name, team_rating=self.__rating)\n for i in range(7)]\n self.__atts = [Attacker(team=self.__name, team_rating=self.__rating)\n for i in range(5)]\n\n self.__whole_team = self.__gks + self.__defs + self.__mids + self.__atts\n self.__whole_team = {i.name:i for i in self.__whole_team}\n\n self.__equip = team_dataframe(self.__gks, self.__defs,\n self.__mids, self.__atts)\n\n self.__style = random.choice(['442', '343', '532', '433'])\n\n self.__league = league\n\n self.__wins: Dict = {}\n self.__loses: Dict = {}\n self.__draws: Dict = {}\n self.__gf: Dict = {}\n self.__ga: Dict = {}\n self.__matches: Dict = {}\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.name\n\n @property\n def name(self):\n return self.__name\n\n @property\n def rating(self):\n return self.__rating\n\n @property\n def gks(self):\n return self.__gks\n\n @property\n def defs(self):\n return self.__defs\n\n @property\n def mids(self):\n return self.__mids\n\n @property\n def atts(self):\n return self.__atts\n\n @property\n def squadlist(self):\n return self.__equip\n\n @property\n def getteam(self):\n return self.__whole_team\n\n @property\n def style(self):\n return self.__style\n\n @property\n def league(self):\n return self.__league\n\n # @league.setter\n # def league(self, name: str):\n # self.__league = name\n\n def played(self, season: str=None):\n if season:\n return self.__wins.get(season, 0) + self.__draws.get(season, 0) + self.__loses.get(season, 0)\n else:\n return sum(list(self.__wins.values())) + sum(list(self.__loses.values())) + sum(list(self.__draws.values()))\n\n def wins(self, season: str=None):\n if season:\n return self.__wins.get(season, 0)\n else:\n return sum(list(self.__wins.values()))\n\n def loses(self, season: str=None):\n if season:\n return self.__loses.get(season, 0)\n else:\n return sum(list(self.__loses.values()))\n\n def draws(self, season: str=None):\n if season:\n return self.__draws.get(season, 0)\n else:\n return sum(list(self.__draws.values()))\n\n def goals_for(self, season: str=None):\n if season:\n return self.__gf.get(season, 0)\n else:\n return sum(list(self.__gf.values()))\n \n def goals_against(self, season: str=None):\n if season:\n return self.__ga.get(season, 0)\n else:\n return sum(list(self.__ga.values()))\n\n def get_match(self, season: str, roundd: str):\n \n if not season or not roundd:\n raise Exception('Need season and roundd identifier')\n else:\n return self.__matches[season][roundd]\n\n def add_match(self, season: str, roundd: str, match):\n \n if season in self.__matches.keys():\n if str(round) in self.__matches[season]:\n self.__matches[season][roundd].append(match)\n else:\n self.__matches[season][roundd] = match\n else:\n self.__matches[season] = {roundd:match}\n\n def win(self, csi: str, goals_for: int, goals_against: int):\n\n if csi == 0:\n raise Exception\n\n if str(csi) in self.__wins.keys():\n self.__wins[str(csi)] = self.__wins[str(csi)] + 1\n else:\n self.__wins[str(csi)] = 1\n\n if str(csi) in self.__gf.keys():\n self.__gf[str(csi)] = self.__gf[str(csi)] + goals_for\n else:\n self.__gf[str(csi)] = goals_for\n\n if str(csi) in self.__ga.keys():\n self.__ga[str(csi)] = self.__ga[str(csi)] + goals_against\n else:\n self.__ga[str(csi)] = goals_against\n\n def lose(self, csi: str, goals_for: int, goals_against: int):\n\n if str(csi) in self.__loses.keys():\n self.__loses[str(csi)] = self.__loses[str(csi)] + 1\n else:\n self.__loses[str(csi)] = 1\n\n if str(csi) in self.__gf.keys():\n self.__gf[str(csi)] = self.__gf[str(csi)] + goals_for\n else:\n self.__gf[str(csi)] = goals_for\n\n if str(csi) in self.__ga.keys():\n self.__ga[str(csi)] = self.__ga[str(csi)] + goals_against\n else:\n self.__ga[str(csi)] = goals_against\n\n def draw(self, csi: str, goals_for: int, goals_against: int):\n\n if str(csi) in self.__draws.keys():\n self.__draws[str(csi)] = self.__draws[str(csi)] + 1\n else:\n self.__draws[str(csi)] = 1\n\n if str(csi) in self.__gf.keys():\n self.__gf[str(csi)] = self.__gf[str(csi)] + goals_for\n else:\n self.__gf[str(csi)] = goals_for\n\n if str(csi) in self.__ga.keys():\n self.__ga [str(csi)] = self.__ga[str(csi)] + goals_against\n else:\n self.__ga[str(csi)] = goals_against\n\n\n def get_player(self, player: str) -> Player:\n return self.__whole_team[player]\n\n \n def pick_team(self) -> List[Player]:\n\n team = self.squadlist\n formation = self.style\n\n gk = team.loc[team.loc[:, 'Position'] == 'gk', :].sort_values(\n ['Rating', 'Value'], ascending=[False, False]\n ).loc[:, 'Name'].values[0]\n gk = [i for i in self.__gks if i.name == gk]\n\n deff = team.loc[team.loc[:, 'Position'] == 'def', :].sort_values(\n ['Rating', 'Value'], ascending=[False, False]\n ).loc[:, 'Name'].values[:int(formation[0])]\n deff = [i for i in self.__defs if i.name in deff]\n\n midd = team.loc[team.loc[:, 'Position'] == 'mid', :].sort_values(\n ['Rating', 'Value'], ascending=[False, False]\n ).loc[:, 'Name'].values[:int(formation[1])]\n midd = [i for i in self.__mids if i.name in midd]\n\n att = team.loc[team.loc[:, 'Position'] == 'att', :].sort_values(\n ['Rating', 'Value'], ascending=[False, False]\n ).loc[:, 'Name'].values[:int(formation[2])]\n att = [i for i in self.__atts if i.name in att]\n\n team = gk + deff + midd + att\n\n return team\n","sub_path":"src/football/teams.py","file_name":"teams.py","file_ext":"py","file_size_in_byte":7792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"75782961","text":"from __future__ import annotations\nfrom dataclasses import dataclass, field\nfrom kiota_abstractions.base_request_builder import BaseRequestBuilder\nfrom kiota_abstractions.get_path_parameters import get_path_parameters\nfrom kiota_abstractions.method import Method\nfrom kiota_abstractions.request_adapter import RequestAdapter\nfrom kiota_abstractions.request_information import RequestInformation\nfrom kiota_abstractions.request_option import RequestOption\nfrom kiota_abstractions.serialization import Parsable, ParsableFactory\nfrom typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union\n\nif TYPE_CHECKING:\n from ...........models.o_data_errors.o_data_error import ODataError\n from ...........models.workbook_table_sort import WorkbookTableSort\n from .apply.apply_request_builder import ApplyRequestBuilder\n from .clear.clear_request_builder import ClearRequestBuilder\n from .reapply.reapply_request_builder import ReapplyRequestBuilder\n\nclass SortRequestBuilder(BaseRequestBuilder):\n \"\"\"\n Provides operations to manage the sort property of the microsoft.graph.workbookTable entity.\n \"\"\"\n def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:\n \"\"\"\n Instantiates a new SortRequestBuilder and sets the default values.\n Args:\n path_parameters: The raw url or the Url template parameters for the request.\n request_adapter: The request adapter to use to execute the requests.\n \"\"\"\n super().__init__(request_adapter, \"{+baseurl}/drives/{drive%2Did}/items/{driveItem%2Did}/workbook/worksheets/{workbookWorksheet%2Did}/tables/{workbookTable%2Did}/sort{?%24select,%24expand}\", path_parameters)\n \n async def delete(self,request_configuration: Optional[SortRequestBuilderDeleteRequestConfiguration] = None) -> None:\n \"\"\"\n Delete navigation property sort for drives\n Args:\n request_configuration: Configuration for the request such as headers, query parameters, and middleware options.\n \"\"\"\n request_info = self.to_delete_request_information(\n request_configuration\n )\n from ...........models.o_data_errors.o_data_error import ODataError\n\n error_mapping: Dict[str, ParsableFactory] = {\n \"4XX\": ODataError,\n \"5XX\": ODataError,\n }\n if not self.request_adapter:\n raise Exception(\"Http core is null\") \n return await self.request_adapter.send_no_response_content_async(request_info, error_mapping)\n \n async def get(self,request_configuration: Optional[SortRequestBuilderGetRequestConfiguration] = None) -> Optional[WorkbookTableSort]:\n \"\"\"\n Retrieve the properties and relationships of tablesort object.\n Args:\n request_configuration: Configuration for the request such as headers, query parameters, and middleware options.\n Returns: Optional[WorkbookTableSort]\n \"\"\"\n request_info = self.to_get_request_information(\n request_configuration\n )\n from ...........models.o_data_errors.o_data_error import ODataError\n\n error_mapping: Dict[str, ParsableFactory] = {\n \"4XX\": ODataError,\n \"5XX\": ODataError,\n }\n if not self.request_adapter:\n raise Exception(\"Http core is null\") \n from ...........models.workbook_table_sort import WorkbookTableSort\n\n return await self.request_adapter.send_async(request_info, WorkbookTableSort, error_mapping)\n \n async def patch(self,body: Optional[WorkbookTableSort] = None, request_configuration: Optional[SortRequestBuilderPatchRequestConfiguration] = None) -> Optional[WorkbookTableSort]:\n \"\"\"\n Update the navigation property sort in drives\n Args:\n body: The request body\n request_configuration: Configuration for the request such as headers, query parameters, and middleware options.\n Returns: Optional[WorkbookTableSort]\n \"\"\"\n if not body:\n raise TypeError(\"body cannot be null.\")\n request_info = self.to_patch_request_information(\n body, request_configuration\n )\n from ...........models.o_data_errors.o_data_error import ODataError\n\n error_mapping: Dict[str, ParsableFactory] = {\n \"4XX\": ODataError,\n \"5XX\": ODataError,\n }\n if not self.request_adapter:\n raise Exception(\"Http core is null\") \n from ...........models.workbook_table_sort import WorkbookTableSort\n\n return await self.request_adapter.send_async(request_info, WorkbookTableSort, error_mapping)\n \n def to_delete_request_information(self,request_configuration: Optional[SortRequestBuilderDeleteRequestConfiguration] = None) -> RequestInformation:\n \"\"\"\n Delete navigation property sort for drives\n Args:\n request_configuration: Configuration for the request such as headers, query parameters, and middleware options.\n Returns: RequestInformation\n \"\"\"\n request_info = RequestInformation()\n request_info.url_template = self.url_template\n request_info.path_parameters = self.path_parameters\n request_info.http_method = Method.DELETE\n if request_configuration:\n request_info.add_request_headers(request_configuration.headers)\n request_info.add_request_options(request_configuration.options)\n return request_info\n \n def to_get_request_information(self,request_configuration: Optional[SortRequestBuilderGetRequestConfiguration] = None) -> RequestInformation:\n \"\"\"\n Retrieve the properties and relationships of tablesort object.\n Args:\n request_configuration: Configuration for the request such as headers, query parameters, and middleware options.\n Returns: RequestInformation\n \"\"\"\n request_info = RequestInformation()\n request_info.url_template = self.url_template\n request_info.path_parameters = self.path_parameters\n request_info.http_method = Method.GET\n request_info.headers[\"Accept\"] = [\"application/json\"]\n if request_configuration:\n request_info.add_request_headers(request_configuration.headers)\n request_info.set_query_string_parameters_from_raw_object(request_configuration.query_parameters)\n request_info.add_request_options(request_configuration.options)\n return request_info\n \n def to_patch_request_information(self,body: Optional[WorkbookTableSort] = None, request_configuration: Optional[SortRequestBuilderPatchRequestConfiguration] = None) -> RequestInformation:\n \"\"\"\n Update the navigation property sort in drives\n Args:\n body: The request body\n request_configuration: Configuration for the request such as headers, query parameters, and middleware options.\n Returns: RequestInformation\n \"\"\"\n if not body:\n raise TypeError(\"body cannot be null.\")\n request_info = RequestInformation()\n request_info.url_template = self.url_template\n request_info.path_parameters = self.path_parameters\n request_info.http_method = Method.PATCH\n request_info.headers[\"Accept\"] = [\"application/json\"]\n if request_configuration:\n request_info.add_request_headers(request_configuration.headers)\n request_info.add_request_options(request_configuration.options)\n request_info.set_content_from_parsable(self.request_adapter, \"application/json\", body)\n return request_info\n \n @property\n def apply(self) -> ApplyRequestBuilder:\n \"\"\"\n Provides operations to call the apply method.\n \"\"\"\n from .apply.apply_request_builder import ApplyRequestBuilder\n\n return ApplyRequestBuilder(self.request_adapter, self.path_parameters)\n \n @property\n def clear(self) -> ClearRequestBuilder:\n \"\"\"\n Provides operations to call the clear method.\n \"\"\"\n from .clear.clear_request_builder import ClearRequestBuilder\n\n return ClearRequestBuilder(self.request_adapter, self.path_parameters)\n \n @property\n def reapply(self) -> ReapplyRequestBuilder:\n \"\"\"\n Provides operations to call the reapply method.\n \"\"\"\n from .reapply.reapply_request_builder import ReapplyRequestBuilder\n\n return ReapplyRequestBuilder(self.request_adapter, self.path_parameters)\n \n from kiota_abstractions.base_request_configuration import BaseRequestConfiguration\n\n @dataclass\n class SortRequestBuilderDeleteRequestConfiguration(BaseRequestConfiguration):\n from kiota_abstractions.base_request_configuration import BaseRequestConfiguration\n\n \"\"\"\n Configuration for the request such as headers, query parameters, and middleware options.\n \"\"\"\n \n @dataclass\n class SortRequestBuilderGetQueryParameters():\n \"\"\"\n Retrieve the properties and relationships of tablesort object.\n \"\"\"\n def get_query_parameter(self,original_name: Optional[str] = None) -> str:\n \"\"\"\n Maps the query parameters names to their encoded names for the URI template parsing.\n Args:\n original_name: The original query parameter name in the class.\n Returns: str\n \"\"\"\n if not original_name:\n raise TypeError(\"original_name cannot be null.\")\n if original_name == \"expand\":\n return \"%24expand\"\n if original_name == \"select\":\n return \"%24select\"\n return original_name\n \n # Expand related entities\n expand: Optional[List[str]] = None\n\n # Select properties to be returned\n select: Optional[List[str]] = None\n\n \n from kiota_abstractions.base_request_configuration import BaseRequestConfiguration\n\n @dataclass\n class SortRequestBuilderGetRequestConfiguration(BaseRequestConfiguration):\n from kiota_abstractions.base_request_configuration import BaseRequestConfiguration\n\n \"\"\"\n Configuration for the request such as headers, query parameters, and middleware options.\n \"\"\"\n # Request query parameters\n query_parameters: Optional[SortRequestBuilder.SortRequestBuilderGetQueryParameters] = None\n\n \n from kiota_abstractions.base_request_configuration import BaseRequestConfiguration\n\n @dataclass\n class SortRequestBuilderPatchRequestConfiguration(BaseRequestConfiguration):\n from kiota_abstractions.base_request_configuration import BaseRequestConfiguration\n\n \"\"\"\n Configuration for the request such as headers, query parameters, and middleware options.\n \"\"\"\n \n\n","sub_path":"msgraph/generated/drives/item/items/item/workbook/worksheets/item/tables/item/sort/sort_request_builder.py","file_name":"sort_request_builder.py","file_ext":"py","file_size_in_byte":10867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"411819866","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport cv2\nfrom tensorflow.contrib import learn\nfrom tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib\n# from sklearn.datasets import fetch_lfw_people\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.contrib.learn.python import SKCompat\nfrom .DataRepresentation import LoadData\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n# tf.logging.set_verbosity(tf.logging.INFO)\n\n# Convolutional neural net contrusction function\n# It describes every layer separetly\n \ndef cnn_model_fn(features, labels, mode):\n # Input Layer, standard square 31*31 gray \n input_layer = tf.reshape(features, [-1, 31, 31, 1])\n \n # Convolutional Layer #1, takes as the input the input_layer, valid padding and the AF is a ReLU\n conv1 = tf.layers.conv2d(\n inputs=input_layer,\n filters=20,\n kernel_size=[4, 4],\n padding=\"valid\",\n activation=tf.nn.relu\n ) \n # Normalization Layer #1, experimental normalization layer just after the conv layer\n # norm1 = tf.layers.local_response_normalization(\n # inputs=conv1,\n # depth_radius=5,\n # alpha=0.0001,\n # beta=0.75\n #)\n \n\n # Pooling Layer #1, using maxplooling takes as an input the conv1 layer\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n\n # Convolutional Layer #2, takes as an input the output of the maxplooling1, 40 filters\n conv2 = tf.layers.conv2d(\n inputs=pool1,\n filters=40,\n kernel_size=[3, 3],\n padding=\"valid\",\n activation=tf.nn.relu\n )\n \n # Normalization Layer #2\n # norm2 = tf.layers.local_response_normalization(\n # inputs=conv2,\n # depth_radius=5,\n # alpha=0.0001,\n # beta=0.75\n #)\n \n # Pooling Layer #2, takes as an input the output of the 2nd convonlutional layer pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n\n # Convolutional Layer #3, takes as the input the output of the 2nd max_pooling layer, augumenting the dimensions to 60\n conv3 = tf.layers.conv2d(\n inputs=pool2,\n filters=60,\n kernel_size=[3, 3],\n padding='valid',\n activation=tf.nn.relu\n )\n \n # Pooling Layer #3, finaly pooling layer\n pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)\n # Convolutional Layer #4, final convolution layer, the output has 80 in depth\n conv4 = tf.layers.conv2d(\n inputs=pool3,\n filters=80,\n kernel_size=[2, 2],\n padding='valid',\n activation=tf.nn.relu\n )\n \n # Flatten tensor into a batch of vectors\n # Input Tensor Shape: [batch_size, 1, 2, 80]\n # Output Tensor Shape: [batch_size, 1 * 2 * 80]\n conv4_flat = tf.reshape(conv4, [-1, 80])\n pool3_flat = tf.reshape(pool3, [-1, 80])\n \n # Experimental\n \n # Concat conv4 and pool3\n # conc = tf.concat(['conv4_flat','pool3_flat'], axis = -1)\n \n # Dense Layer\n # Densely connected layer with 1024 neurons\n # Input Tensor Shape: [batch_size, 1 * 2 * 80]\n # Output Tensor Shape: [batch_size, 1024]\n dense = tf.layers.dense(inputs=conv4_flat, units=1024, activation=tf.nn.relu)\n\n # Add dropout operation; 0.6 probability that the element will be kept\n dropout = tf.layers.dropout(\n inputs=dense,\n rate=0.6,\n training=mode == learn.ModeKeys.TRAIN\n )\n\n # Logits layer\n # Input Tensor Shape: [batch_size, 1024]\n # Output Tensor Shape: [batch_size, 10]\n logits = tf.layers.dense(inputs=dropout, units=10)\n loss = None\n train_op = None\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n if mode != learn.ModeKeys.INFER:\n onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)\n loss = tf.losses.softmax_cross_entropy(\n onehot_labels=onehot_labels,\n logits=logits\n )\n\n # Configure the Training Op (for TRAIN mode)\n if mode == learn.ModeKeys.TRAIN:\n train_op = tf.contrib.layers.optimize_loss(\n loss=loss,\n global_step=tf.contrib.framework.get_global_step(),\n learning_rate=0.001,\n optimizer=\"SGD\"\n )\n\n # Generate Predictions\n predictions = {\n \"classes\": tf.argmax(\n input=logits, axis=1),\n \"probabilities\": tf.nn.softmax(\n logits, name=\"softmax_tensor\")\n }\n\n # Return a ModelFnOps object\n return model_fn_lib.ModelFnOps(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op)\n\n \n#Building the classifier based on the model function that we've created above in the cnn_model_fn\nclassifier = learn.Estimator(\n model_fn=cnn_model_fn,\n model_dir=\"../Data/Models\"\n )\n\n# tensors_to_log = {\"probabilities\": \"softmax_tensor\"}\n# logging_hook = tf.train.LoggingTensorHook(\n# tensors=tensors_to_log, every_n_iter=50)\n\n\n# Defining the Training function that will fit the model by using the classifier that we made\ndef Training():\n print(\"[+] Welcome to the training program.\")\n X, Y = LoadData()\n train_data, eval_data, train_labels, eval_labels = train_test_split(X, Y, test_size=0.2, random_state=42)\n # print(type(train_labels))\n global classifier\n # print(\"[+] till now working\")\n # Set up logging for predictions\n # Log the values in the \"Softmax\" tensor with label \"probabilities\"\n # global logging_hook \n # print(\"[+] till now working\")\n\n # Train the model\n classifier.fit(x=train_data,y=train_labels, batch_size= 15, steps= 10000)\n # print(\"[+] till now working\")\n # Configure the accuracy metric for evaluation\n metrics = {\n \"accuracy\": learn.MetricSpec(\n metric_fn=tf.metrics.accuracy,\n prediction_key=\"classes\"),\n }\n\n # Evaluate the model and print results\n eval_results = classifier.evaluate(\n x=eval_data,\n y=eval_labels,\n metrics=metrics\n )\n print(eval_results)\n\n\n# The function that will use the trained model to predict the labels \ndef Solver(Input):\n global classifier\n os.chdir('Bin')\n # predictions = classifier.predict(x=Input, as_iterable=True)\n for i, p in enumerate(predictions):\n return p['classes']\n \n# In case of wrong classification, the intervenience of the human oracle to correct the labeling \ndef Correction(Input):\n global classifier\n X_train = np.empty((15,31,31),dtype=np.float32)\n Y_train = np.empty((15))\n cor = int(raw_input('What is the correct answer ?\\n-->'))\n for i in xrange(15):\n X_train[i] = Input\n Y_train[i] = cor \n classifier.fit(x=X_train, y=Y_train, batch_size=7, steps=3000, monitors=[logging_hook])\n\n \n__all__ = [\"Training\",\"Solver\",\"Correction\"]\n","sub_path":"Bin/Machines/deepID-v2.py","file_name":"deepID-v2.py","file_ext":"py","file_size_in_byte":7109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"614641539","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('beers', '0003_auto_20150130_0249'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='UserProfile',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('website', models.URLField(blank=True)),\n ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AlterField(\n model_name='beer',\n name='avg_rating',\n field=models.PositiveIntegerField(null=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='beer',\n name='type',\n field=models.CharField(max_length=128, null=True, choices=[(b'light', b'Light'), (b'dark', b'Dark')]),\n preserve_default=True,\n ),\n ]\n","sub_path":"beers/migrations/0004_auto_20150131_2234.py","file_name":"0004_auto_20150131_2234.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"512475026","text":"import csv\nimport time\n\n\ndef export_csv():\n with open('user_data.csv', 'w') as f:\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writerow(user_input)\n\n\ndef import_csv():\n with open('user_data.csv', 'r') as f:\n reader = csv.reader(f)\n data = []\n for row in reader:\n data.append(list(row))\n return data\n","sub_path":"data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"514784001","text":"#!/usr/bin/env python\n\n# -*- coding: utf-8 -*-\n\n'''\nlook for newly completed illumina runs and enter then into bauer db\n\nCreated on 2017-11-01\n\n@author: Meghan Correa \n@copyright: 2017 The Presidents and Fellows of Harvard College. All rights reserved.\n@license: GPL v2.0\n'''\nimport os, glob, time\nimport logging\nimport json\nfrom datetime import datetime\nfrom odybcl2fastq import config\nfrom odybcl2fastq import constants as const\nimport odybcl2fastq.util as util\nfrom odybcl2fastq.emailbuilder.emailbuilder import buildmessage\nfrom odybcl2fastq.bauer_db import BauerDB\n\nLOG_FILE = const.ROOT_DIR + 'db.log'\nPROCESSED_FILE = 'bauer.processed'\nCOMPLETE_FILE = 'bauer.complete'\nINCOMPLETE_NOTIFIED_FILE = 'bauer.incomplete_notified'\nINCOMPLETE_AFTER_DAYS = 1\nDAYS_TO_SEARCH = 7\n# a hardcoded date not to search before\n# this will be helpful in transitioning from seqprep to odybcl2fastq\nSEARCH_AFTER_DATE = datetime.strptime('May 10 2018', '%b %d %Y')\nREQUIRED_FILES = ['SampleSheet.csv', 'RunInfo.xml']\nPROC_NUM = 1\nFREQUENCY = 60\n\ndef setup_logging():\n # take level from env or INFO\n level = os.getenv('ODYBCL2FASTQ_LOGGING_LEVEL', logging.INFO)\n logging.basicConfig(\n filename= LOG_FILE,\n level=level,\n format='%(asctime)s %(message)s'\n )\n logging.getLogger().addHandler(logging.StreamHandler())\n\ndef failure_email(run, cmd, ret_code, std_out, std_err):\n log = ody_run.get_output_log(run)\n subject = \"Run DB insert Failed: %s\" % run\n message = (\"%s\\ncmd: %s\\nreturn code: %i\\nstandard out: %s\\nstandard\"\n \" error: %s\\nsee log: %s\\n\" % (subject, cmd, ret_code, std_out, std_err, log))\n send_email(message, subject)\n\ndef send_email(message, subject):\n logging.warning(message)\n fromaddr = config.EMAIL['from_email']\n toemaillist=config.EMAIL['to_email']\n buildmessage(message, subject, None, fromaddr, toemaillist)\n\ndef run_is_incomplete(dir):\n now = datetime.now()\n m_time = datetime.fromtimestamp(os.stat(dir).st_mtime)\n # filter out if modified before cutover to odybcl2fastq\n if m_time < SEARCH_AFTER_DATE:\n return False\n # filter out if modified after reasonable delay to allow for completion\n if ((now - m_time).days) <= INCOMPLETE_AFTER_DAYS:\n return False\n # filter out if tagged as complete\n if os.path.isfile(dir + COMPLETE_FILE):\n return False\n # filter out if never tagged for processing\n if not os.path.isfile(dir + PROCESSED_FILE):\n return False\n # filter out already notified\n if os.path.isfile(dir + INCOMPLETE_NOTIFIED_FILE):\n return False\n return True\n\ndef need_to_process(dir):\n now = datetime.now()\n m_time = datetime.fromtimestamp(os.stat(dir).st_mtime)\n # filter out if modified before cutover to odybcl2fastq\n if m_time < SEARCH_AFTER_DATE:\n return False\n # filter out if modified outside or search window\n if ((now - m_time).days) > DAYS_TO_SEARCH:\n return False\n # filter out if tagged as processed\n if os.path.isfile(dir + PROCESSED_FILE):\n return False\n # filter out if any required files are missing\n for req in REQUIRED_FILES:\n if not os.path.exists(dir + req):\n return False\n return True\n\ndef find_runs(filter):\n # get all subdirectories\n dirs = sorted(glob.glob(config.SOURCE_DIR + '*/'))\n runs = []\n for dir in dirs:\n if filter(dir):\n runs.append(dir)\n return runs\n\ndef get_sample_sheet_path(run_dir):\n # set default\n sample_sheet_path = run_dir + 'SampleSheet.csv'\n return sample_sheet_path\n\ndef notify_incomplete_runs():\n run_dirs = find_runs(run_is_incomplete)\n run_dirs_str = \"\\n\".join(run_dirs)\n if run_dirs:\n message = \"The following runs failed be entered into bauer db %s or more days ago:\\n\\n%s\" % (INCOMPLETE_AFTER_DAYS, run_dirs_str)\n send_email(message, 'BauerDB incomplete runs')\n for run in run_dirs:\n util.touch(run, INCOMPLETE_NOTIFIED_FILE)\n\ndef load_runs(proc_num):\n runs_found = find_runs(need_to_process)\n run_dirs = runs_found[:proc_num]\n if run_dirs:\n logging.info(\"Found %s runs: %s\\nprocessing first %s:\\n%s\\n\" % (len(runs_found), json.dumps(runs_found), len(run_dirs),\n json.dumps(run_dirs)))\n results = {}\n for run_dir in run_dirs:\n util.touch(run_dir, PROCESSED_FILE)\n run = os.path.basename(os.path.normpath(run_dir))\n sample_sheet_path = get_sample_sheet_path(run_dir)\n bauer = BauerDB(sample_sheet_path)\n logging.info(\"Loading run into bauer db:\\n%s\\n\" % (run))\n results[run] = bauer.insert_run(run_dir)\n failed_runs = []\n success_runs = []\n for run, result in results.items():\n if result:\n success_runs.append(run)\n status = 'success'\n util.touch(run_dir, COMPLETE_FILE)\n else:\n failed_runs.append(run)\n status = 'failure'\n\n # success or failure of individual run will be logged from run.py to capture\n # manual runs for the status log\n logging.info(\"Completed %i runs %i success %s and %i failures %s\\n\\n\\n\" %\n (len(results), len(success_runs), json.dumps(success_runs), len(failed_runs), json.dumps(failed_runs)))\n\nif __name__ == \"__main__\":\n try:\n proc_num = os.getenv('ODYBCL2FASTQ_PROC_NUM', PROC_NUM)\n setup_logging()\n # run continuously\n while True:\n # search for new runs\n load_runs(proc_num)\n notify_incomplete_runs()\n # wait before checking for more runs to process\n frequency = os.getenv('ODYBCL2FASTQ_FREQUENCY', FREQUENCY)\n if frequency != FREQUENCY:\n logging.info(\"Frequency is not default: %i\\n\" % frequency)\n time.sleep(frequency)\n except Exception as e:\n logging.exception(e)\n send_email(str(e), 'Odybcl2fastq load_runs.py exception')\n","sub_path":"odybcl2fastq/load_runs.py","file_name":"load_runs.py","file_ext":"py","file_size_in_byte":6067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"523486511","text":"#coding=gbk\nimport GateC\nimport numpy as np\nimport struct\nfrom ctypes import *\n\nisBackward = 1\ndata1 = np.array([[2,2,3],[4,5,6]],dtype=np.double)\n#buf = struct.pack(\"P\",byref(data))\ndata2 = np.array([[1,2,3],[4,5,6]],dtype=np.double)\ndata3 = np.array([[1,2,3],[4,5,6]],dtype=np.double)\nresult = GateC.AddGate(data1,data2,0,\"a\")\n\nprint(\"hhh\")\n\n\n\n\ninput = np.array([[1,2,3,4],\n\t\t\t\t [4,5,6,5],\n\t\t\t\t [7,8,9,0],\n\t\t\t\t [1,2,3,4]],dtype=np.double)\n#input = np.random.uniform(1,2,size=(2,1,6,6))\nresult = GateC.PoolGate(input,2,2,0,\"b\")\nprint(result)\n\n#result2 = GateC.PoolGate(result,2,2,1)\n#print(result2)\n\nresult = GateC.AddGate(data1,data2,isBackward,\"a\")\nprint(\"ccc\")\nresult = GateC.PoolGate(input,2,2,isBackward,\"b\")\nprint(\"ddd\")\nresult = GateC.PoolGate(input,2,2,0,\"c\")\nresult = GateC.PoolGate(input,2,2,1,\"c\")","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"9760294","text":"from django.urls import path\nfrom .views import delete_all_customers, batch_delete_customers, export_customers, CustomerReadView, export_customers_batch, export_customers_single, CustomerListView, CustomerCreateView, CustomerUpdateView\nfrom . import views\n\nurlpatterns = [\n path('', CustomerListView.as_view(), name='customer-home'),\n path('/update/', CustomerUpdateView.as_view(), name='customer-update'),\n path('read//', CustomerReadView.as_view(), name='customer-read'),\n path('new/', CustomerCreateView.as_view(), name='customer-create'),\n path('export/csv/', export_customers, name='customer-export'),\n path('/export/csv/single', export_customers_single, name='customer-export-single'),\n path('/export/csv/batch', export_customers_batch, name='customer-export_batch'),\n path('/delete', batch_delete_customers, name='customer-delete-batch'),\n path('delete/', delete_all_customers, name='customer-delete-all'),\n]\n","sub_path":"customers/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"299513703","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('quickstimate', '0006_auto_20150716_0224'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='paperestimate',\n old_name='dirll_holes_quantity',\n new_name='drill_holes_quantity',\n ),\n ]\n","sub_path":"quickstimate/migrations/0007_auto_20150716_0230.py","file_name":"0007_auto_20150716_0230.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"483668494","text":"def login_disponivel(nome, lista):\n for i in lista:\n if nome not in lista:\n return nome\n if i == nome:\n nome = nome + str(1)\n return nome\n lista_nova = []\n for n in lista:\n lista_nova.append(n[:-1])\n c = 1\n for z in lista_nova:\n if z == nome:\n c += 1\n nome = nome + str(c)\n return nome","sub_path":"backup/user_292/ch168_2020_06_22_18_13_20_898488.py","file_name":"ch168_2020_06_22_18_13_20_898488.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"131437652","text":"#!/usr/bin/env python\n\n'''\n\ndelete.py: wrapper for \"delete\" of a json file for Singularity Hub command line tool.\n\nThis function takes input arguments (not environment variables) of the following:\n\n --key: should be the key to delete from the json file\n --file: should be the json file to read\n\nCopyright (c) 2017, Vanessa Sochat. All rights reserved. \n\n\"Singularity\" Copyright (c) 2016, The Regents of the University of California,\nthrough Lawrence Berkeley National Laboratory (subject to receipt of any\nrequired approvals from the U.S. Dept. of Energy). All rights reserved.\n \nThis software is licensed under a customized 3-clause BSD license. Please\nconsult LICENSE file distributed with the sources of this project regarding\nyour rights to use or distribute this software.\n \nNOTICE. This Software was developed under funding from the U.S. Department of\nEnergy and the U.S. Government consequently retains certain rights. As such,\nthe U.S. Government has been granted for itself and others acting on its\nbehalf a paid-up, nonexclusive, irrevocable, worldwide license in the Software\nto reproduce, distribute copies to the public, prepare derivative works, and\nperform publicly and display publicly, and to permit other to do so. \n\n\n'''\n\nimport sys\nimport os\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)))\n\nimport optparse\nfrom helpers.json.main import DELETE\nfrom message import bot\n\ndef get_parser():\n\n parser = optparse.OptionParser(description=\"GET key from json\")\n\n parser.add_option(\"--key\", \n dest='key', \n help=\"key to delete from json\", \n type=str,\n default=None)\n\n parser.add_option(\"--file\", \n dest='file', \n help=\"Path to json file to delete from\", \n type=str,\n default=None)\n\n return parser\n\n\n\ndef main():\n\n parser = get_parser()\n \n try:\n (args,options) = parser.parse_args()\n except:\n sys.exit(0)\n \n if args.key is not None and args.file is not None:\n\n success = DELETE(key=args.key,\n jsonfile=args.file)\n else:\n bot.error(\"--key and --file must be defined for DELETE. Exiting\")\n sys.exit(1)\n\nif __name__ == '__main__':\n main()\n","sub_path":"libexec/python/helpers/json/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"66156529","text":"\"\"\"\nThis command sync the info data between desktop and server\n\"\"\"\nfrom __future__ import absolute_import\nimport os\n\nfrom django.core.management.base import BaseCommand\nfrom django.core import serializers\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom datetime import timedelta\n\nfrom ...models import Info, Author\nfrom reads.models import Read\n\nimport logging\n\nlogger = logging.getLogger('info_collector')\n\nclass Command(BaseCommand):\n args = 'none'\n help = 'sync the info data between desktop and server.'\n\n def handle(self, *args, **options):\n verbosity = options.get('verbosity')\n\n authors = Author.objects.all()\n author_data = serializers.serialize(\n \"json\", authors, indent=2, use_natural_foreign_keys=True, use_natural_primary_keys=True\n )\n open(os.path.join(settings.INFO_SYNC['DUMP_TO'], 'author.json'), 'w').write(author_data)\n\n two_weeks_ago = timezone.now() - timedelta(days=14)\n recent_items = Info.objects.filter(timestamp__gt=two_weeks_ago)\n if verbosity:\n print('exporting %d items' % (recent_items.count()))\n fields = [f.name for f in Info._meta.fields]\n fields = filter(lambda x: x not in ['id', 'content'], fields)\n data = serializers.serialize(\n \"json\", recent_items, indent=2, fields=fields, use_natural_foreign_keys=True, use_natural_primary_keys=True\n )\n open(os.path.join(settings.INFO_SYNC['DUMP_TO'], 'info.json'), 'w').write(data)\n\n reads_items = Read.objects.all()\n if verbosity:\n print('exporting %d items' % (reads_items.count()))\n fields = [f.name for f in Read._meta.fields]\n fields = filter(lambda x: x not in ['id', ], fields)\n data = serializers.serialize(\n \"json\", reads_items, indent=2, fields=fields, use_natural_foreign_keys=True\n )\n open(os.path.join(settings.INFO_SYNC['DUMP_TO'], 'reads.json'), 'w').write(data)\n\n\n cmd = 'rsync -rave ssh {} {}:{}'.format(\n settings.INFO_SYNC['DUMP_TO'], settings.INFO_SYNC['SERVER'], settings.INFO_SYNC['LOAD_FROM']\n )\n if verbosity:\n print(cmd)\n ret = os.system(cmd)\n\n cmd = 'ssh {} \"{}\"'.format(\n settings.INFO_SYNC['SERVER'], settings.INFO_SYNC['LOAD_CMD']\n )\n if verbosity:\n print(cmd)\n ret = os.system(cmd)\n logger.info('sync_info completed successfully')\n","sub_path":"info_collector/management/commands/sync_info.py","file_name":"sync_info.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"64203305","text":"# Copyright 2019 VIA Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport sys\nimport os\nimport csv\nimport numpy as np\nimport scipy.io as sio\n\nmat_file = sys.argv[1]\ncsv_file = sys.argv[2]\n\nwith open(csv_file, 'w') as csvfile:\n\n mat = sio.loadmat(mat_file)\n\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(['relative_im_path','class','bbox_x1','bbox_y1','bbox_x2','bbox_y2','test'])\n\n for annotation in mat['annotations'][0]:\n test = np.squeeze(annotation['test'])\n im_path = str(np.squeeze(annotation['relative_im_path']))\n cls = np.squeeze(annotation['class'])\n x1 = np.squeeze(annotation['bbox_x1'])\n y1 = np.squeeze(annotation['bbox_y1'])\n x2 = np.squeeze(annotation['bbox_x2'])\n y2 = np.squeeze(annotation['bbox_y2'])\n\n csvwriter.writerow([im_path, cls, x1, y1, x2, y2, test])\n\n","sub_path":"convert_mat_to_csv.py","file_name":"convert_mat_to_csv.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"196212556","text":"from nio import Block\nfrom nio.properties.holder import PropertyHolder\nfrom nio.properties.list import ListProperty\nfrom nio.properties.string import StringProperty\nfrom nio.types.string import StringType\n\n\nclass Criterion(PropertyHolder):\n\n \"\"\" Each instance of this class represents a matching criterion\n for Publisher/Subscriber blocks.\n\n Properties:\n keyword (str): the identifier for the criterion\n rules (list(str)): valid match values\n\n \"\"\"\n keyword = StringProperty(title='Filter Key', default='')\n rule = ListProperty(StringType,\n title='Filter Values (list of acceptable values)')\n\n\nclass TopicsBlock(Block):\n\n \"\"\" Base class for blocks that contain a list of criteria.\n\n Properties:\n criteria (list(Criterion)): A list of criteria to be used\n as needed.\n\n \"\"\"\n criteria = ListProperty(Criterion, title='Topics')\n\n def _flatten_topics(self):\n result = {}\n for c in self.criteria():\n tmp = c.to_dict()\n result[tmp['keyword']] = tmp['rule']\n return result\n","sub_path":"blocks/communication/topics.py","file_name":"topics.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"326603497","text":"from django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\nfrom ImageMatcher.uploader.models import Picture\nfrom ImageMatcher.uploader.forms import PictureForm\nfrom ImageMatcher.uploader.image_processing import process_image\n\ndef list(request):\n # Handle file upload\n if request.method == 'POST':\n form = PictureForm(request.POST, request.FILES)\n if form.is_valid():\n target_dens = process_image(request.FILES['imgfile'])\n newimg = Picture(imgfile = request.FILES['imgfile'], density = target_dens)\n newimg.save()\n\n else:\n form = PictureForm()\n\n try:\n uploaded_image = Picture.objects.latest('id')\n related_images = Picture.objects.raw('SELECT * FROM uploader_picture order by abs(\"density\" - %s) asc, id desc limit 4', [uploaded_image.density])[1:4]\n except:\n uploaded_image = None\n related_images = None\n\n # Render list page with the documents and the form\n return render_to_response(\n 'uploader/list.html',\n {'uploaded_image': uploaded_image, 'related_images':related_images, 'form': form},\n context_instance=RequestContext(request)\n )\n","sub_path":"ImageMatcher/uploader/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"297862184","text":"#!/usr/bin/env python\n'''\nFile: episodes.py\nAuthor: George Ang \nDescription:\n'''\n\nfrom db import db\n\ndef insert(showname=None, seasonnumber=None, episodenumber=None, callback=None):\n conditions = {}\n if showname:\n conditions['showname'] = showname\n if seasonnumber:\n conditions['seasonnumber'] = seasonnumber\n if episodenumber:\n conditions['episodenumber'] = episodenumber\n\n if not conditions:\n if callback:\n callback(None, None)\n\n def _on_found(response, error):\n if error:\n if callback:\n callback(None, error)\n\n if not response:\n db.episode.insert(conditions, callback=callback)\n\n else:\n if callback:\n callback(None, None)\n\n db.episode.find_one(conditions, _on_found)\n\ndef find(showname=None, seasonnumber=None, episodenumber=None, callback=None):\n conditions = {}\n if seasonnumber:\n conditions['seasonnumber'] = seasonnumber\n if episodenumber:\n conditions['episodenumber'] = episodenumber\n if showname:\n conditions['$or'] = [dict(showname=showname), dict(alias=showname)]\n if not conditions:\n return None\n\n db.episode.find_one(conditions, callback=callback)\n\ndef findall(callback):\n return db.episode.find(callback=callback)\n\n","sub_path":"app/amodel/episodes.py","file_name":"episodes.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"118400002","text":"#\n# File:\n# wrf4.py\n#\n# Synopsis:\n# Draws colored streamlines of U,V read off a WRF output file. \n#\n# Categories:\n# Streamlines\n#\n# Author:\n# Mary Haley\n# \n# Date of initial publication:\n# April, 2015\n#\n# Description:\n# This example shows how to read and unstagger U,V data on a WRF\n# output grid, and then draw streamlines. The streamlines are\n# dense, so the arrays are strided to cull some of them.\n#\n# Effects illustrated:\n# o Plotting WRF data in a lat/lon projection\n# o Unstaggering WRF data\n# o Drawing streamlines\n# o Coloring streamlines by another field\n# \n# Output:\n# This example produces a colored streamline plot\n# \n# Notes:\n# You will need to include your own WRF output file in place\n# of the one referenced by this example.\n#======================================================================\n\nimport numpy as np\nimport Nio, Ngl, os, sys\n\n#----------------------------------------------------------------------\n# This function takes a WRF variable and unstaggers it along the\n# given dimension.\n#----------------------------------------------------------------------\ndef wrf_unstagger(x):\n rank = len(x.shape)\n if rank < 2:\n print(\"wrf_unstagger: variable must be at least 2-dimensional\")\n return x \n\n xdims = x.dimensions\n if xdims[rank-1].endswith(\"_stag\"):\n dim = \"lon\"\n elif xdims[rank-2].endswith(\"_stag\"):\n dim = \"lat\"\n else:\n print(\"wrf_unstagger: error: couldn't find the staggered dimension\")\n return x \n\n if rank == 4:\n if dim == \"lon\":\n xu = 0.5*(x[:,:,:,:-1] + x[:,:,:,1:])\n else:\n xu = 0.5*(x[:,:,:-1,:] + x[:,:,1:,:])\n elif rank == 3:\n if dim == \"lon\":\n xu = 0.5*(x[:,:,:-1] + x[:,:,1:])\n else:\n xu = 0.5*(x[:,:-1,:] + x[:,1:,:])\n elif rank == 2:\n if dim == \"lon\":\n xu = 0.5*(x[:,:-1] + x[:,1:])\n else:\n xu = 0.5*(x[:-1,:] + x[1:,:])\n return xu\n\n# Read data\nfilename = \"wrfout_d03_2012-04-22_23_00_00\"\nif(not os.path.exists(filename)):\n print(\"You do not have the necessary '%s' file to run this example.\" % filename)\n print(\"You need to supply your own WRF output file\")\n print(\"WRF output files usually have names like '%s'\" % filename)\n sys.exit()\n\n# Read some WRF data\na = Nio.open_file(filename+\".nc\") # Must add \".nc\" suffix for Nio.open_file\nu = a.variables[\"U\"]\nv = a.variables[\"V\"]\nlatu = a.variables[\"XLAT_U\"]\nlonu = a.variables[\"XLONG_U\"]\n\n# Unstagger the data\nua = wrf_unstagger(u)\nva = wrf_unstagger(v)\nlat = wrf_unstagger(latu)\nlon = wrf_unstagger(lonu)\n\n# First timestep, lowest (bottommost) level, every 5th lat/lon\nnl = 0\nnt = 0\nnstep = 5 # a stride to cull some of the streamlines\nu10 = ua[nt,nl,::nstep,::nstep]\nv10 = va[nt,nl,::nstep,::nstep]\nspd = np.sqrt(u10**2+v10**2) \n\n# Open file for graphics\nwks_type = \"png\"\nwks = Ngl.open_wks(wks_type,\"wrf4\")\n\nres = Ngl.Resources()\n\nres.mpDataBaseVersion = \"MediumRes\" # Better map outlines\nres.mpLimitMode = \"LatLon\" # Zoom in on map area of interest\nres.mpMinLatF = np.min(lat[:])-0.1\nres.mpMaxLatF = np.max(lat[:])+0.1\nres.mpMinLonF = np.min(lon[:])-0.1\nres.mpMaxLonF = np.max(lon[:])+0.1\n\nres.mpFillOn = True\nres.mpLandFillColor = \"beige\"\nres.mpOceanFillColor = \"transparent\"\nres.mpInlandWaterFillColor = \"transparent\"\nres.mpGridLatSpacingF = 1\nres.mpGridLonSpacingF = 1\n#res.mpGridAndLimbOn = False\n \nres.stLineThicknessF = 3.0 # Thicker streamlines\nres.stMonoLineColor = False # Use multiple colors for streamlines\n\nres.tiMainString = \"U10/V10 streamlines color by wind speed\"\n\n# Necessary to overlay on map correctly.\nres.vfYArray = lat[0,::nstep,::nstep]\nres.vfXArray = lon[0,::nstep,::nstep]\n\nplot = Ngl.streamline_scalar_map(wks,u10,v10,spd,res)\n\nNgl.end()\n\n","sub_path":"examples/wrf4.py","file_name":"wrf4.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"119344533","text":"import matplotlib.pyplot as plt\n\nnumbers = [2, 4, 6, 8, 10, 12, 14]\nfaves = [1, 2, 4, 8, 12, 17, 9]\n\nplt.scatter(numbers, faves, s=100)\n\n# Set title, label axes\nplt.title(\"Favorite Numbers\", fontsize=24)\nplt.xlabel(\"Number\", fontsize=14)\nplt.ylabel(\"People Who Appreciate\", fontsize=14)\n\n# Size of tick labels\nplt.tick_params(axis='both', which='major', labelsize=14)\n\nplt.show()\n","sub_path":"04-scatter/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"31804504","text":"import csv\nimport math\n\nfrom operator import itemgetter\n\nmovies = {}\nusers = {}\n\n\ndef main():\n # Create Movie objects for each item in u.item file\n with open('u.item', encoding='latin_1') as f:\n reader = csv.reader(f, delimiter='|')\n for row in reader:\n # movie id | movie title | release date | video release date | IMDb URL | genres\n movies[row[0]] = Movie(id=row[0],\n name=row[1],\n release_date=row[2],\n video_release_date=row[3],\n imdb_url=row[4],\n genres=row[5])\n\n # Create user objects for each item in u.user file\n with open('u.user') as f:\n reader = csv.reader(f, delimiter='|')\n for row in reader:\n users[row[0]] = User(id=row[0],\n age=row[1],\n gender=row[2],\n occupation=row[3])\n\n # Add ratings to movie and user objects\n with open('u.data') as f:\n reader = csv.reader(f, delimiter='\\t')\n for row in reader:\n users[row[0]].ratings[row[1]] = {'rating': row[2], 'timestamp': row[3]}\n movies[row[1]].ratings[row[0]] = {'rating': row[2], 'timestamp': row[3]}\n\n # Calculate average rating for each movie and user\n for movie in movies.values():\n movie.avg_rating = Ratings.avg_rating(movie.ratings)\n\n for user in users.values():\n user.avg_rating = Ratings.avg_rating(user.ratings)\n\n Interface.program_loop()\n\n\nclass Movie:\n\n def __init__(self, **kwargs):\n self.id = ''\n self.name = ''\n self.release_date = ''\n self.video_release_date = ''\n self.imdb_url = ''\n self.genres = []\n self.ratings = {}\n self.avg_rating = ''\n self.similar_users = []\n\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n def update_ratings(self, inp):\n self.ratings = inp\n\n\nclass Interface:\n\n def program_loop():\n while True:\n print(\"Welcome to Movie Database\")\n user_input = 0\n while user_input not in ('1', '2', '3'):\n user_input = input('1. See top movies.\\n2. Search by user.\\n3. Quit.\\n>>> ')\n if user_input == '1':\n Interface.show_top_movies()\n elif user_input == '2':\n Interface.user_movies()\n else:\n exit()\n\n def show_top_movies():\n num_of_movies = input('How many movies should be displayed? ')\n top_movies = Ratings.get_top_x(num_of_movies, 150)\n num = 1\n for movie in enumerate(top_movies):\n print(movie[0] + 1, \": \", movie[1][2])\n num += 1\n\n def user_movies():\n user_num = input('Please enter a user number: ')\n user_choice = 0\n while user_choice not in ('1', '2'):\n user_choice = input('1. Top rated, popular movies, you havn\\'t seen.\\n2. Movie recommentations.\\n>>> ')\n if user_choice == '1':\n user_movies = Ratings.get_user_top_x(user_num, 10, 150)\n for movie in enumerate(user_movies):\n print(movie[0] + 1, \": \", movie[1][2])\n else:\n user_movies = Ratings.recommended_movies(user_num)\n num = 1\n for movie in user_movies:\n print(num, \": \", movies[movie].name)\n num += 1\n\n def print_movie_list(user_movies):\n for movie in enumerate(movies):\n print(movie[0] + 1, \": \", movie[1][2])\n\n\nclass User:\n\n def __init__(self, **kwargs):\n self.id = ''\n self.name = ''\n self.first_name = ''\n self.middle_name = ''\n self.last_name = ''\n self.email = ''\n self.age = ''\n self.gender = ''\n self.occupation = ''\n self.zip = ''\n self.ratings = {}\n self.avg_rating = ''\n\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n\nclass Ratings:\n\n def avg_rating(ratings):\n all_ratings = [int(x.get('rating')) for x in ratings.values()]\n return \"%.2f\" % float(sum(all_ratings)/len(all_ratings))\n\n def get_top_x(x, min_ratings):\n top_list = []\n for movie in movies.values():\n if (len(movie.ratings)) > int(min_ratings):\n top_list.append([movie.id, movie.avg_rating, movie.name])\n return sorted(top_list, reverse=True, key=lambda y: y[1])[:int(x)]\n\n def get_user_top_x(id, x, min_ratings):\n watched = users[id].ratings.keys()\n top_x = [[movies[k].id, movies[k].avg_rating, movies[k].name] for k in movies.keys() if k not in watched and len(movies[k].ratings) > int(min_ratings)]\n return sorted(top_x, reverse=True, key=lambda y: y[1])[:int(x)]\n\n def euclidean_distance(usr1, usr2):\n \"\"\"\n Given two user ids, give the Euclidean distance between their\n movie ratings on a scale of 0 to 1. 1 means the two lists are\n identical.\n \"\"\"\n usr1_movies = [movie_id for movie_id in users[usr1].ratings.keys()]\n usr2_movies = [movie_id for movie_id in users[usr2].ratings.keys()]\n same_movies = [movie_id for movie_id in usr1_movies if movie_id in usr2_movies]\n users_ratings = {}\n for movie_id in same_movies:\n user1_ratings = users[usr1].ratings.get(movie_id).get('rating')\n user2_ratings = users[usr2].ratings.get(movie_id).get('rating')\n users_ratings[movie_id] = {'user_1': user1_ratings, 'user_2': user2_ratings}\n\n user1_ratings = []\n user2_ratings = []\n\n for movie_id in users_ratings.values():\n user1_ratings.append(int(movie_id['user_1']))\n user2_ratings.append(int(movie_id['user_2']))\n\n # Guard against empty lists.\n if len(user1_ratings) is 0:\n return 0\n\n # Note that this is the same as vector subtraction.\n diffs = [user1_ratings[idx] - user2_ratings[idx] for idx in range(len(user1_ratings))]\n squares = [diff ** 2 for diff in diffs]\n sum_of_squares = sum(squares)\n\n return 1 / (1 + math.sqrt(sum_of_squares))\n\n def similiar_users(usr1):\n \"\"\"Returns a list of lists with [userid, similarity]\"\"\"\n ratings = []\n for usr2 in users.keys():\n ratings.append([usr2, Ratings.euclidean_distance(users[usr1].id, usr2)])\n return sorted(ratings, key=itemgetter(1), reverse=True)[:1000]\n\n def recommended_movies(usr1):\n similiar_users = Ratings.similiar_users(usr1)\n movies_seen = [x for x in users[usr1].ratings.keys()]\n suggested_movies = {}\n\n for user in similiar_users:\n user_movies = list(users[user[0]].ratings.keys())\n for movie in user_movies:\n if movie not in movies_seen:\n user_movie_rating = users[user[0]].ratings[movie]['rating']\n user_movie_weight = user[1] * float(user_movie_rating)\n if movie in suggested_movies:\n suggested_movies[movie] += user_movie_weight\n else:\n suggested_movies[movie] = user_movie_weight\n\n sorted_suggestions = []\n for k, v in suggested_movies.items():\n sorted_suggestions.append([k, v])\n\n top_10_suggested = (sorted(sorted_suggestions, key=itemgetter(1), reverse=True)[:10])\n return [x[0] for x in top_10_suggested]\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"movie_lib.py","file_name":"movie_lib.py","file_ext":"py","file_size_in_byte":7588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"215985559","text":"import pandas\nfrom geopy.geocoders import Nominatim\n\n#Creating Pandas data frame for csv file\ndf = pandas.read_csv(\"supermarkets.csv\")\n#print(df)\n#creating object of Nominatim\nnom = Nominatim(scheme=\"http\")\n\n#Formating Address Column field in the dataframe to : Road , City , State ZIP, Country\ndf[\"Address\"] = df[\"Address\"]+\", \"+df[\"City\"]+\", \"+df[\"State\"]+\", \"+df[\"Country\"]\n#print(df)\n\n#Iterating through each value of Address field using \"apply\" method of Pandas and sending it as parmeter to geocode method of Nominatim to provide the complete location infromation\n#Creating a new column Coordinates to store complete location information of each Address.\n\ndf[\"Coordinates\"] = df[\"Address\"].apply(nom.geocode) #() are not required after geocode method as its auto applied by \"apply\" method\n\n\n#Creating two new columns Latitude and Longitude to store that infromation in df dataframe\n#We are using lambda to store the values of Coorinates column in a temp variable x and then use x.latitude to pull lattitude information from it\n#One of the value in df[\"Coordinates\"] is None type, so x.latitude wtill Error as it will be NoneType.latitude which was not expcted. So we use an if contoidion\n#to use .latitude if x!=None\ndf[\"Latitude\"] = df[\"Coordinates\"].apply(lambda x:x.latitude if x!=None else None)\ndf[\"Longitude\"] = df[\"Coordinates\"].apply(lambda x:x.longitude if x!=None else None)\n\nprint(\"Latidudes :\\n\",df[\"Latitude\"])\nprint(\"\\nLongitudes :\\n\",df[\"Longitude\"])\n","sub_path":"Pandas/CoordinateTeller.py","file_name":"CoordinateTeller.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"83626364","text":"import pandas as pd\n\n\ndef Label_Calculate(data=None, period=10, point=0.00001, profit=200, stoploss=200):\n \"\"\"\n\n :param data:\n :param period:\n :param point:\n :param profit:\n :param stoploss:\n :return:\n \"\"\"\n size = len(data.index)\n res = []\n for i in range(0, size):\n label = 0\n if i >= (size - period):\n res.append(label)\n continue\n else:\n back_df = data.iloc[i + 1:i + 1 + period]\n high_back = back_df.loc[:, ['High']].max().values[0]\n low_back = back_df.loc[:, ['Low']].min().values[0]\n openprice = data['Open'][i]\n if (high_back - openprice) >= profit * point and (openprice - low_back) < stoploss * point:\n label = 1\n if (openprice - low_back) >= profit * point and (high_back - openprice) < stoploss * point:\n label = -1\n res.append(label)\n res = pd.Series(res, name='Label')\n data = data.join(res)\n return data\n","sub_path":"Label_Calculate/Label_Calculate.py","file_name":"Label_Calculate.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"248199328","text":"# Comparações: ==\"IQUAL\" !=\"DIFERENTE\" >\"MAIOR\" <\"MENOR\"\n# >=\"MAIOR OU IGUAL\" <=\"MENOR OU IGUAL\"\n# AND e OR e ELSE \"SENÂO\"\n\n# Exemplo:\n\na = 2\nb = 1\nc = 20\n\nif a > b: #ou seja se A for maior que B vamos imrimir\n print(a, 'Somente se for maior que:', b)\n\n'''vimos no resultado abaixo que foi impresso'''\n\nif a > c: # ou seja se A for maior que C, mas C é 20\n print(a, 'Somente se for maior que 20:', c)\n'''vimos que NÂO foi impresso a 2ª linha de comando'''\n\n# para resolvermos isto usamos o ELSE \"então\" onde ele responde\n# fazendo a resposta ou retorno do resultado, veja a baixo:\n\nif a > c:\n print(a,'Somente se for maior que 20 ou C:', c)\nelse: #Ou Seja com ELSE,fizemos uma comparação e o programa descidiu\n print(a, 'não é maior do que:', c)\n\n\n\n\n\n\n\n","sub_path":"PycharmProjects/pythonbasico/aula03_operadores_logicos_if_e_else.py","file_name":"aula03_operadores_logicos_if_e_else.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"148441538","text":"import threading\r\n\r\ndata = 1000000\r\nthread_list = []\r\n\r\nmylock = threading.Lock() # thread lock 생성\r\n\r\ndef drink(max):\r\n\tglobal data\r\n\tfor i in range(0, max):\r\n\t\tmylock.acquire() # 락 획득 \r\n\t\tdata -= 1\r\n\t\tmylock.release() # 락 해제 \r\n\r\nfor i in range(0,2):\r\n\tthread_inst = threading.Thread(target=drink, args=(500000,))\r\n\tthread_list.append(thread_inst)\r\n\tthread_inst.start()\r\n\r\nfor thread in thread_list:\r\n\tthread.join()\r\n\r\n\r\nprint(data)","sub_path":"courses/w04_py/source/threads/mythreading5.py","file_name":"mythreading5.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"221709046","text":"import os\nimport re\nimport json\nimport subprocess,shlex\nfrom subprocess import check_output\nfrom threading import Timer\n\nhw1 = ['???','sumList','digitsOfInt', 'additivePersistence', 'digitalRoot', 'listReverse', 'palindrome']\nhw2 = ['???','assoc','removeDuplicates', 'wwhile','fixpoint', 'exprToString', 'eval', 'build']\nhw3 = ['???','sqsum', 'pipe', 'sepConcat', 'stringOfList', 'clone', 'padZero', 'removeZero', 'bigAdd', 'mulByDigit', 'bigMul']\n\nannotation_hw1 = [\"\",\\\n \" : int list -> int\", \\\n \" : int -> int list\", \\\n \" : int -> int\", \\\n \" : int -> int\", \\\n \" : 'a list -> 'a list\", \\\n \" : string -> bool\"]\n\nannotation_hw2 = [\"\",\\\n \" : 'a * 'b * ('b * 'a) list -> 'a \", \\\n \" : 'a list -> 'a list\", \\\n \" : ('a -> 'a * bool) * 'a -> 'a\", \\\n \": ('a -> 'a) * 'a -> 'a\", \\\n \" : expr -> string\", \\\n \" : expr * float * float -> float\", \\\n \": ((int * int -> int) * int) -> expr\"]\n\nannotation_hw3 = [\"\",\\\n \" : int list -> int \", \\\n \" : ('a -> 'a) list -> ('a -> 'a)\", \\\n \" : string -> string list -> string\", \\\n \" : ('a -> string) -> 'a list -> string\", \\\n \" : 'a -> int -> 'a list\", \\\n \" : int list -> int list -> int list * int list\", \\\n \" : int list -> int list\", \\\n \" : int list -> int list -> int list\", \\\n \" : int -> int list -> int list \", \\\n \" : int list -> int list -> int list\"]\n\ndef add_annotation(annotation, problem_name, code):\n # match the variables\n var_regex = '(?<=' + problem_name + ' )(.*?)(?=\\=)'\n var = re.search(var_regex, code)\n # match the string to replace\n my_regex = '(?<=' + problem_name + ' )(.*?)\\='\n result = re.sub(my_regex, lambda match: replace(match, var.group(), annotation), code,1)\n #print (result)\n return result\n\ndef replace(match, variables, annotation):\n replacement = annotation\n replacement += ' = fun '\n replacement += variables\n replacement += ' -> '\n return replacement\n\ndef find_annotation(indice):\n \n annotation = ''\n if(indice['problem'] == '???'):\n print(\"helper method here\")\n return(annotation)\n\n if (indice['hw'] == 'hw1'):\n annotation = annotation_hw1[hw1.index(indice['problem'])]\n elif (indice['hw'] == 'hw2'):\n annotation = annotation_hw2[hw2.index(indice['problem'])]\n elif (indice['hw'] == 'hw3'):\n annotation = annotation_hw3[hw3.index(indice['problem'])]\n return(annotation)\n\ndef build_dict(bad, annotation, indice):\n\n dic = dict()\n dic['bad'] = bad\n dic['prob'] = indice['problem']\n dic['annotated'] = add_annotation(annotation, indice['problem'], bad)\n if not indice['fix']:\n dic['fix'] = ''\n else:\n dic['fix'] = indice['fix'][0]\n dic['annotated_fix'] = add_annotation(annotation, indice['problem'], dic['fix'])\n return dic\n\ndef find_annotation_with_label(hw_num, label):\n #print(hw_num)\n #print(label)\n annotation = ''\n if (hw_num == 'hw1'):\n annotation = annotation_hw1[hw1.index(label)]\n elif (hw_num =='hw2'):\n annotation = annotation_hw2[hw2.index(label)]\n elif (hw_num == 'hw3'):\n #print(hw3.index(label))\n annotation = annotation_hw3[hw3.index(label)]\n return(annotation)\n\ndef run(cmd, timeout_sec):\n proc = subprocess.Popen([\"python\"], stdout=subprocess.PIPE, \n stderr=subprocess.PIPE,universal_newlines = True)\n print(cmd)\n print(proc.communicate(\"while True: print('hello')\\n\", timeout = timeout_sec)[0])\n\n '''\n stdout = proc.stdout.readline()\n proc.communicate(timeout = timeout_sec)\n #stdout = proc.stdout.read()\n print(stdout)\n #print(stderr)\n '''\n\ndef annotate_and_compile(indice, label, hw_num):\n #print(label)\n annotation = find_annotation_with_label( hw_num, label)\n #print(annotation)\n annotated_prog = add_annotation(annotation, label, indice['ocaml'][0]['min'])\n #print (annotated_prog)\n #annotated_prog = \"\\nlet rec mulByDigit : int -> int list -> int list = fun i l -> \\n match List.rev l with\\n | [] -> []\\n | h::t ->\\n let rec helper acc v =\\n if v = 0 then acc else helper ((v mod 10) :: acc) (v / 10) in\\n let rec adder x = match x with | [] -> [] | h::t -> [h] in\\n adder\\n ((mulByDigit i (List.rev (List.map (fun x -> x * 10) t))) @\\n [helper [] (h * i)]);;\\n\"\n #annotated_prog = \"let rec wwhile : ('a -> 'a * bool) * 'a -> 'a = fun (f,b) -> let c' = f b in if c' = b then c' else wwhile (f, c');;\"\n \n try:\n #error_output = subprocess.run([\"ocaml\"], input = annotated_prog, \n # stdout=subprocess.PIPE,universal_newlines = True, timeout=2)\n \n error_output1 = check_output([\"ocaml\"], input = annotated_prog, \n stderr=subprocess.PIPE,universal_newlines = True, timeout=2)\n\n #print(error_output)\n #print(error_output1)\n except subprocess.TimeoutExpired:\n #print('timeout')\n error_output = 'Expired'\n\n '''\n try:\n error_output =run(annotated_prog, 1)\n except subprocess.TimeoutExpired:\n print('timeout')\n '''\n\n #print(error_output)\n \n if(error_output == 'Expired'):\n #print(\"logic\")\n return \"logic error\"\n\n #print(error_output)\n if \"rror\" in error_output.stdout:\n print(\"err\")\n return error_output.stdout\n else:\n return \"\"\n\n\n#obj = 'buildob= 1, build = 1'\n#anno = add_annotation('hello', 'build', obj)\n#print (anno)\n\n\n#obj = {\"event\": \"eval\", \"ocaml\": [{\"type\": \"other\", \"in\": \"let rec mulByDigit i l = \\nmatch (List.rev l) with\\n| [] -> 0\\n| h::t -> ( (h*i)/10 + List.rev i t )\", \"min\": \"\\nlet rec mulByDigit i l =\\n match List.rev l with | [] -> 0 | h::t -> ((h * i) / 10) + (List.rev i t);;\\n\", \"out\": \"Characters 85-93:\\n | h::t -> ( (h*i)/10 + List.rev i t );;\\n ^^^^^^^^\\nError: This function has type 'a list -> 'a list\\n It is applied to too many arguments; maybe you forgot a `;'.\\n\"}]}\n#print(annotate_and_compile(obj,'mulByDigit','hw3'))\n#annotate_and_compile(1,'mulByDigit','hw3')","sub_path":"Spring_2016/list_of_errors_ver3/type_annotate.py","file_name":"type_annotate.py","file_ext":"py","file_size_in_byte":5782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"334712855","text":"import EcalUtils\nimport EcalCondDB\nimport os\n\n\nclass BeamspotPlot(EcalUtils.EcalPlot):\n\n def __init__(self, dbName='', tag='', since='', fileType='png', directory='./'):\n \"\"\"Init a default data.\"\"\"\n self._dbName = dbName\n self._tag = tag\n self._since = since.strip().split(';')[0]\n self._until = since.strip().split(';')[-1]\n self._directory = directory\n #self._name = get_name(get_formated_db_name(dbName), tag, since, fileType)\n #self._name = get_name(tag, since, fileType)\n self._fileType = fileType\n self._name = self._tag + '_' + self._since + '_' + self._until + '.' + self._fileType\n\n def get_directory(self):\n #return os.path.join(basedir, dbName)\n return get_directory(dbName = self._dbName, tag = self._tag, since = self._since.split(';')[0], fileType = self._fileType, basedir = self._directory)\n\n def _create(self):\n ecalCondDB = EcalCondDB.EcalCondDB(self._dbName)\n ecalCondDB.trendPlot(self._tag, int(self._since), int(self._until), os.path.join(self._directory, self._tag + '_' + self._since + '_' + self._until))","sub_path":"payloadInspector/BeamspotUtils.py","file_name":"BeamspotUtils.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"103018889","text":"# Prompt: https://leetcode.com/problems/squares-of-a-sorted-array/\nclass Solution:\n def sortedSquares(self, A):\n # This could probably be improved by splitting the negatives into a separate array\n # and sorting those before adding to the positive array. This would save sort time\n # because the positive ones don't need to be sorted. Not sure if Python's sort()\n # is optimized enough be the same speed as that solution would be.\n A2 = [x**2 for x in A]\n A2.sort()\n return A2\n \n","sub_path":"0. Easy/0977. Squares of a Sorted Array/squares_array.py","file_name":"squares_array.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"467247858","text":"import pandas as pd\r\nimport definitions\r\n\r\n\r\ndef get_fixtures(venues, league, date):\r\n head = 'https://vwa.bracketpal.com/dailyform/'\r\n fixtures = list()\r\n if \"Methodist Ladies College\" in venues:\r\n venues.append(\"MLC\")\r\n print(venues)\r\n for i in league:\r\n print(\"---------\")\r\n url = head + str(i) + \"/\" + str(date)\r\n table_MN = pd.read_html(url)\r\n try:\r\n df = table_MN[2]\r\n for index, row in df.iterrows():\r\n if row[0] != \"Time\" and type(row[1]) != type(0.01):\r\n venue = row[1].split(\" Ct\")[0].replace(\" Ct\", \"\")\r\n print(venue)\r\n if venue in venues:\r\n court = row[1].split(\"Ct\")[1]\r\n team_a = row[2]\r\n team_b = row[5]\r\n try:\r\n duty = row[7][5:]\r\n except (TypeError, KeyError):\r\n duty = \" \"\r\n division = definitions.div_dict[url.split('/')[-2]]\r\n _date = url.split('/')[-1].split('-')\r\n date_dd = _date[2]\r\n date_mm = _date[1]\r\n date_yyyy = _date[0]\r\n print(row)\r\n if type(row[0]) == type(0.01):\r\n time = \"\"\r\n time_hr = \" \"\r\n time_min = \" \"\r\n else:\r\n time = row[0].split(':')\r\n time_hr = time[0].zfill(2)\r\n time_min = time[1]\r\n\r\n tmp_venue = definitions.venues_dict[venue.lower()].split(\"*\")\r\n venue_0 = tmp_venue[0]\r\n venue_1 = tmp_venue[1]\r\n venue_2 = tmp_venue[2]\r\n venue_full = \" \".join(tmp_venue)\r\n\r\n fixture = definitions.Fixture(venue, venue_0, venue_1, venue_2, venue_full, court, team_a,\r\n team_b, duty, division, date_dd, date_mm, date_yyyy, time_hr,\r\n time_min)\r\n fixtures.append(fixture)\r\n else:\r\n print(venue)\r\n except IndexError:\r\n pass\r\n\r\n return fixtures\r\n\r\n\r\ndef full_pdf(fixtures, token, files):\r\n for fixture in fixtures:\r\n file_out = definitions.APP_ROOT + \"\\\\Scoresheets\\\\temp\\\\\" + token + \"\\\\\" + fixture.venue + \"-\" \\\r\n + fixture.court + \"-\" + fixture.time_hr + fixture.time_min + \".pdf\"\r\n canvas_data = definitions.get_overlay_canvas_wavl(fixture)\r\n form = definitions.merge(canvas_data, template_path=definitions.wavl_pdf_default)\r\n definitions.save(form, filename=file_out)\r\n files.append(file_out)\r\n return files\r\n\r\n\r\ndef jl_pdf(fixtures, token, files):\r\n for fixture in fixtures:\r\n file_out = definitions.APP_ROOT + \"\\\\Scoresheets\\\\temp\\\\\" + token + \"\\\\\" + fixture.venue + \"-\" \\\r\n + fixture.court + \"-\" + fixture.time_hr + fixture.time_min + \".pdf\"\r\n canvas_data = definitions.get_overlay_canvas_jl(fixture)\r\n form = definitions.merge(canvas_data, template_path=definitions.jl_pdf_default)\r\n definitions.save(form, filename=file_out)\r\n files.append(file_out)\r\n return files\r\n\r\n\r\ndef gen_pdfs(fixtures, date):\r\n all_files = []\r\n for fixture in fixtures:\r\n file_out = definitions.APP_ROOT + \"\\\\Scoresheets\\\\temp\\\\\" + date + \"\\\\\" + fixture.venue + \"-\" \\\r\n + fixture.court + \"-\" + fixture.time_hr + fixture.time_min + \"-\" + fixture.division[2] + \".pdf\"\r\n if fixture.division[2] in definitions.jl_div_list:\r\n canvas_data = definitions.get_overlay_canvas_jl(fixture)\r\n form = definitions.merge(canvas_data, template_path=definitions.jl_pdf_default)\r\n if fixture.division[2] in definitions.wavl_div_list:\r\n canvas_data = definitions.get_overlay_canvas_wavl(fixture)\r\n form = definitions.merge(canvas_data, template_path=definitions.wavl_pdf_default)\r\n definitions.save(form, filename=file_out)\r\n all_files.append(file_out)\r\n return all_files\r\n\r\n\r\ndef gen_file_list(all_files, venue_usage, wavl_usage, wavjl_usage):\r\n file_list = []\r\n\r\n for file in all_files:\r\n splitted = file.split('\\\\')[-1].split(\"-\")\r\n venue = splitted[0]\r\n division = splitted[-1].replace(\".pdf\", \"\")\r\n\r\n if venue in venue_usage and (division in wavl_usage or division in wavjl_usage):\r\n file_list.append(file)\r\n return file_list\r\n\r\n\r\ndef generate_output(files, token):\r\n output = definitions.APP_ROOT + \"\\\\output\\\\\" + token + \".pdf\"\r\n files.sort()\r\n definitions.merge_pdfs(files, output)\r\n","sub_path":"readPDF.py","file_name":"readPDF.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"501652595","text":"from bs4 import BeautifulSoup\nimport time\nfrom urllib.request import urlopen\n#import lxml.html \n#import requests \n\ntry: #python 3\n from tkinter import *\n \nexcept: #python 2\n from Tkinter import *\n \ndef web_scrap(word):\n\n global t\n get_url=\"http://dictionary.com/browse/\"+str(word.replace(\" \",\"-\"))\n url=urlopen(get_url)\n content=url.read()\n soup=BeautifulSoup(content,\"lxml\")\n line=soup.find('div', attrs={'class': 'def-content'})\n \n t=line.text.strip().split()\n \n\n #can also be done by lxml below is scrapping using lxml\n\n #tree = lxml.html.fromstring(content)\n #div=tree.cssselect('div.def-content')[0]\n #t=div.text\n #print(t.strip())\n\n popup() # calling notification window\n\n\nclass mywindow(Frame):\n \n def __init__(self, parent):\n Frame.__init__(self, parent) \n self.parent = parent\n self.initUI()\n \n def initUI(self):\n self.parent.title(\"MEANING FINDER\")\n self.pack(fill=BOTH, expand=True)\n\n global ent\n\n frame = Frame(self)\n frame.pack(fill=X)\n Label(frame, text=\"Enter a word : \").pack(side=\"top\",pady=5,padx=5)\n ent = Entry(frame,width=150)\n ent.config(font=(\"Times\", 16, \"bold\"))\n ent.bind(\"\",(lambda event: web_scrap(ent.get())))\n ent.pack(side=\"left\",padx=10)\n \n\nclass popup(object):\n\n def __init__(self,show_time=5000): \n self.stime=show_time\n self.root = Tk()\n self.root.title(\"MEANING\")\n self.root.geometry('320x85')\n text = Text(self.root)\n text.insert(INSERT,t)\n text.config(font=(\"Calibri\",12,\"bold\"),wrap=WORD,state=DISABLED)\n text.pack()\n self.root.after(self.stime , lambda:self.root.destroy()) #show tkinter window for 5 seconds\n self.root.mainloop()\n\n\ndef main():\n root = Tk()\n root.geometry(\"350x60\")\n root.resizable(width=False, height=False)\n app = mywindow(root)\n root.mainloop() \n\nif __name__ == '__main__':\n main() \n\n\n \n","sub_path":"Tkinter-apps-master/words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"167078499","text":"from time import time\nimport pandas as pd\n\nimport matplotlib\nimport numpy as np\n\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import f1_score\n\nimport tensorflow as tf\n\nfrom tensorflow.python.keras.models import Model, Sequential\nfrom tensorflow.python.keras.layers import Input, Embedding, LSTM, GRU, Conv1D, Conv2D, GlobalMaxPool1D, Dense, Dropout\n\nfrom util import make_w2v_embeddings\nfrom util import split_and_zero_padding\nfrom util import ManDist\n\nimport pickle\n\n\n# File paths\nTRAIN_CSV = '../quora/data/train.csv'\n\n# Load training set\ntrain_df = pd.read_csv(TRAIN_CSV)\nfor q in ['question1', 'question2']:\n train_df[q + '_n'] = train_df[q]\n\n# Make word2vec embeddings\nembedding_dim = 300\nmax_seq_length = 20\nuse_w2v = True\n\n# train_df, embeddings = make_w2v_embeddings(train_df, embedding_dim=embedding_dim, empty_w2v=not use_w2v)\n# pickle.dump([train_df, embeddings], open('../quora/data/embeddings_glove.pkl','wb'))\ntrain_df, embeddings = pickle.load(open('../quora/data/embeddings_glove.pkl','rb'))\n\n# Split to train validation\nvalidation_size = int(len(train_df) * 0.1)\ntraining_size = len(train_df) - validation_size\n\nX = train_df[['question1_n', 'question2_n']]\nY = train_df['is_duplicate']\n\nX_train, X_validation, Y_train, Y_validation = train_test_split(X, Y, test_size=0.2, random_state=22)\nX_validation, X_test, Y_validation, Y_test = train_test_split(X_validation, Y_validation, test_size=0.5, random_state=22)\n\nprint(X_train.shape)\nprint(X_validation.shape)\nprint(X_test.shape)\n\nX_train = split_and_zero_padding(X_train, max_seq_length)\nX_validation = split_and_zero_padding(X_validation, max_seq_length)\nX_test = split_and_zero_padding(X_test, max_seq_length)\n\n# Convert labels to their numpy representations\nY_train = Y_train.values\nY_validation = Y_validation.values\nY_test = Y_test.values\n\n# Make sure everything is ok\nassert X_train['left'].shape == X_train['right'].shape\nassert len(X_train['left']) == len(Y_train)\n\nX_train = np.array([np.concatenate((X_train['left'][i], X_train['right'][i])) for i in range(len(X_train['left']))])\nX_validation = np.array([np.concatenate((X_validation['left'][i], X_validation['right'][i])) for i in range(len(X_validation['left']))])\nX_test = np.array([np.concatenate((X_test['left'][i], X_test['right'][i])) for i in range(len(X_test['left']))])\n# --\n\nmodel = tf.keras.models.load_model('./data/LSTM_glove.h5')\nmodel.summary()\n\nprediction = model.predict(X_test, verbose=1, batch_size=128)\nmse = mean_squared_error(Y_test, prediction)\nprediction_int = prediction >= 0.5\nprediction_int = np.array(prediction_int).astype(int)\nacc = accuracy_score(Y_test, prediction_int, normalize=True)\nf1 = f1_score(Y_test, prediction_int, average='weighted') \nprint(mse, acc)\nprint(f1)\n\n# prediction = model.predict(X_train, verbose=1, batch_size=512)\n# mse = mean_squared_error(Y_train, prediction)\n# prediction_int = prediction >= 0.5\n# prediction_int = np.array(prediction_int).astype(int)\n# acc = accuracy_score(Y_train, prediction_int, normalize=True)\n# f1 = f1_score(Y_train, prediction_int, average='weighted') \n# print(mse, acc)\n# print(f1)","sub_path":"cnn_predict.py","file_name":"cnn_predict.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"105246983","text":"from pprint import pprint as pp\nimport os\nimport glob\n\ncountry_to_capital = {\n \"UnitedKingdom\": \"London\",\n \"Brazil\": \"Brazilia\",\n \"Morocco\": \"Rabat\",\n \"Sweden\": \"Stockholm\"\n}\n\npp(country_to_capital)\n\n# Reverse the order of the dictionary from Key:Value to Value:Key\n\ncapital_to_country = {capital: country for country, capital in country_to_capital.items()}\npp(capital_to_country )\n\nfile_sizes = {os.path.realpath(p): os.stat(p).st_size\n for p in glob.glob(\"*.py\")}\n\npp(file_sizes)","sub_path":"Python_Basics/Iterables/DictionaryComprehensions.py","file_name":"DictionaryComprehensions.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"321110284","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('markets', '0002_market_is_active'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='Outcomes',\n new_name='Event',\n ),\n migrations.RenameField(\n model_name='outcome',\n old_name='set',\n new_name='event',\n ),\n ]\n","sub_path":"markets/migrations/0003_auto_20141121_1552.py","file_name":"0003_auto_20141121_1552.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"594926214","text":"import discord\nfrom numpy.random import choice\n\nfrom discord.ext import commands\n\nclass decide(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def whatpvmshallwedo(self, ctx):\n await ctx.send(choice(\n ['Solak', 'Vorago', 'AoD', 'ED1', 'ED2', 'ED3', 'RotS', 'Raids', 'w54'],\n p=[.30, .25, .01, .08, .08, .08, .01, .10, .09 ]\n ))\n \ndef setup(bot):\n bot.add_cog(decide(bot))\n","sub_path":"cogs/decide.py","file_name":"decide.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"85561619","text":"\"\"\"CPU functionality.\"\"\"\n\nimport sys\n\nclass CPU:\n \"\"\"Main CPU class.\"\"\"\n\n def __init__(self):\n \"\"\"Construct a new CPU.\"\"\"\n self.pc = 0 \n self.registers = [0] * 8\n # self.ram =[[0] * 8] * 256 #256 bytes of ram\n self.SP = 7\n self.ram = [0] * 256\n self.FL = 0\n\n def load(self):\n \"\"\"Load a program into memory.\"\"\"\n\n address = 0\n\n # For now, we've just hardcoded a program:\n filename = sys.argv[1]\n\n with open(filename) as f:\n for line in f:\n n = line.split(\"#\")\n n[0] = n[0].strip()\n\n if n[0] == \"\":\n continue\n val = int(n[0], 2)\n self.ram[address] = val\n address += 1\n\n\n # program = [\n # # From print8.ls8\n # 0b10000010, # LDI R0,8\n # 0b00000000,\n # 0b00001000,\n # 0b01000111, # PRN R0\n # 0b00000000,\n # 0b00000001, # HLT\n # ]\n\n # for instruction in program:\n # self.ram[address] = instruction\n # address += 1\n\n\n def alu(self, op, reg_a, reg_b):\n \"\"\"ALU operations.\"\"\"\n\n if op == \"ADD\":\n reg_a += reg_b\n return reg_a\n #elif op == \"SUB\": etc\n elif op == \"MUL\":\n return reg_a * reg_b\n elif op == \"CMP\":\n print(f\"A {reg_a}, B {reg_b}, Flag {self.FL}\")\n if reg_a is reg_b:\n self.FL = (self.FL & 0b00000001)\n elif reg_a > reg_b:\n self.FL = (self.FL & 0b00000010)\n elif reg_a < reg_b:\n self.FL = (self.FL & 0b00000100)\n else:\n raise Exception(\"Unsupported ALU operation\")\n\n def ram_read(self, MAR):\n value = self.ram[MAR]\n\n return value\n\n def ram_write(self, MAR, MAD):\n # register_number = self.ram[self.pc + 1]\n # value = self.ram[self.pc + 2]\n self.ram[MAR] = MAD\n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n\n print(f\"TRACE: %02X | %02X %02X %02X |\" % (\n self.pc,\n #self.fl,\n #self.ie,\n self.ram_read(self.pc),\n self.ram_read(self.pc + 1),\n self.ram_read(self.pc + 2)\n ), end='')\n\n for i in range(8):\n print(\" %02X\" % self.reg[i], end='')\n\n print()\n\n def jump(self, reg):\n self.pc = reg\n\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n running = True\n # SP = 5\n\n \n LDI = 0b10000010\n HALT = 0b00000001\n PRN = 0b01000111\n MUL = 0b10100010\n ADD = 0b10100000\n PUSH = 0b01000101\n POP = 0b01000110\n CALL = 0b01010000\n RET = 0b00010001\n CMP = 0b10100111\n JMP = 0b01010100\n JEQ = 0b01010101\n JNE = 0b01010110\n #ir\n\n\n while running:\n instruction_register = self.ram_read(self.pc)\n # incr = ((instruction_register & 0b11111111) >> 6) + 1\n operand_a = self.ram_read(self.pc + 1)\n operand_b = self.ram_read(self.pc + 2)\n # print(\"Current instruction\", instruction_register)\n # print(\"in\", instruction_register)\n if instruction_register == LDI:\n print(\"store\", operand_a, operand_b)\n self.registers[operand_a] = operand_b\n self.pc += 3\n # self.pc += incr\n\n elif instruction_register == PRN:\n # print(\"print\", operand_a)\n print(self.registers[operand_a])\n self.pc += 2\n # self.pc += incr\n elif instruction_register == ADD:\n print(self.alu(\"ADD\", self.registers[operand_a], self.registers[operand_b]))\n self.pc += 3\n elif instruction_register == MUL:\n print(self.alu(\"MUL\", self.registers[operand_a], self.registers[operand_b]))\n # print(self.registers[operand_a] * self.registers[operand_b])\n # self.pc += 3\n self.pc += 3\n elif instruction_register == JEQ:\n # print(\"equal\")\n if self.FL == (self.FL & 0b00000001):\n self.jump(self.registers[operand_a])\n # self.pc += 2\n elif instruction_register == JNE:\n print(\"JNE\")\n if self.FL == (self.FL & 0b11111111):\n print(\"True\", self.FL, self.registers[operand_a])\n self.jump(self.registers[operand_a])\n # self.pc += 2\n elif instruction_register == JMP:\n self.jump(self.registers[operand_a])\n # self.pc += 2\n elif instruction_register == CMP:\n print(\"CMP\")\n self.alu(\"CMP\",self.registers[operand_a], self.registers[operand_b])\n # print(self.FL)\n self.pc += 3\n elif instruction_register == PUSH:\n # decrement the stack pointer\n # SP -= 1\n self.registers[self.SP]-=1\n # self.SP -= 1\n reg_num = self.ram[self.pc + 1]\n value = self.registers[reg_num]\n self.ram[self.registers[self.SP]] = value\n # reg_num = self.ram[self.pc + 1]\n # reg_val = self.registers[reg_num]\n # self.ram[self.registers[self.SP]] =reg_val\n # self.ram_write(self.registers[self.SP], self.registers[reg_val])\n print(\"PUSH\", value, reg_num, \"address\", self.ram[self.registers[self.SP]], self.registers[self.SP], \"pointer\", self.SP)\n self.pc+=2\n # self.registers[self.SP] -= 1\n # self.pc += 1\n # # copy value from register to memory at stack pointer\n # reg_num = self.ram[self.pc]\n # # print(\"Reg_num\", reg_num)\n # value = self.registers[reg_num]\n # self.ram[self.registers[self.SP]] = value\n\n # self.pc += 2\n elif instruction_register == POP:\n # copy the value from the top of the stack into a given register\n # self.pc += 1\n # reg_val = self.ram_read(self.registers[self.SP])\n reg_val = self.ram[self.registers[self.SP]]\n\n # reg_num = self.ram_read(self.pc + 1)\n reg_num = self.ram[self.pc + 1]\n self.registers[reg_num] = reg_val\n self.registers[self.SP] += 1\n self.pc += 2\n\n # self.pc += 1\n # reg_num = self.ram[self.pc]\n # value = self.ram[self.registers[self.SP]]\n # self.registers[reg_num] = value\n # # increment\n # self.SP += 1\n # print(\"POP\", SP, \"Value:\", value, \"new sp:\", SP)\n # self.registers[SP]\n elif instruction_register == CALL:\n print(\"Call\")\n return_address = self.pc + 2\n self.registers[self.SP] -= 1\n self.ram[self.registers[self.SP]] = return_address\n\n reg_num = self.ram[self.pc + 1]\n sub_address = self.registers[reg_num]\n self.pc = reg_num\n elif instruction_register == RET:\n self.pc = self.registers[self.SP]\n elif instruction_register == HALT:\n running = False\n # self.pc += 1\n else:\n print(\"Nope\")\n sys.exit()\n # self.pc += 1\n # self.pc += incr\n","sub_path":"ls8/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":7826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"246100366","text":"config = {\n\"Luminosity\": 1000,\n\"InputDirectory\": \"results\",\n\n\"Histograms\" : {\n \"invMass\" : {},\n \"etmiss\" : {},\n \"vxp_z\" : {},\n \"pvxp_n\" : {},\n \"lep_n\" : {},\n \"leadlep_pt\" : {},\n \"leadlep_eta\" : {\"y_margin\" : 0.2},\n \"leadlep_E\" : {},\n \"leadlep_phi\" : {\"y_margin\" : 0.6},\n \"leadlep_charge\" : {\"y_margin\" : 0.6},\n \"leadlep_type\" : {\"y_margin\" : 0.5},\n \"leadlep_ptconerel30\" : {},\n \"leadlep_etconerel20\" : {},\n \"leadlep_z0\" : {},\n \"leadlep_d0\" : {},\n \"traillep_pt\" : {},\n \"traillep_eta\" : {\"y_margin\" : 0.2},\n \"traillep_E\" : {},\n \"traillep_phi\" : {\"y_margin\" : 0.6},\n \"traillep_charge\" : {\"y_margin\" : 0.6},\n \"traillep_type\" : {\"y_margin\" : 0.5},\n \"traillep_ptconerel30\" : {},\n \"traillep_etconerel20\" : {},\n \"traillep_z0\" : {},\n \"traillep_d0\" : {},\n \"n_jets\" : {},\n \"jet_pt\" : {},\n \"jet_m\" : {},\n \"jet_jvf\" : {\"y_margin\" : 0.5},\n \"jet_eta\" : {\"y_margin\" : 0.3},\n \"jet_MV1\" : {},\n},\n\n\"Paintables\": {\n \"Stack\": {\n \"Order\": [\"Diboson\"], \n \"Processes\" : { \n \"Diboson\" : {\n \"Color\" : \"#fa7921\",\n \"Contributions\" : [\"WW\", \"WZ\", \"ZZ\"]},\n } \n },\n \"data\" : {\n \"Contributions\": [\"data_Muons\"]}\n},\n\n\"Depictions\": {\n \"Order\": [\"Main\", \"Data/MC\"],\n \"Definitions\" : { \n \"Main\": {\n \"type\" : \"Main\",\n \"Paintables\": [\"Stack\", \"data\"]},\n\n \"Data/MC\": {\n \"type\" : \"Agreement\",\n \"Paintables\" : [\"data\", \"Stack\"]},\n }\n},\n}\n","sub_path":"Configurations/PlotConf_ZAnalysis.py","file_name":"PlotConf_ZAnalysis.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"250247828","text":"from django.shortcuts import render,redirect\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom first_app import forms\nfrom first_app.forms import signupform,signin,userform,userform_update,signupform_update,HomeForm,CommentForm\nfrom first_app.models import user_signup,User,Friend,Post\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.views.generic import TemplateView\n\nfrom django.contrib.auth import authenticate,login,logout\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\n\n\n@login_required\ndef user_logout(request):\n logout(request)\n return HttpResponseRedirect(reverse('first_app:home'))\n\n\npro=0\n\n\nclass HomeView(TemplateView):\n template_name = 'first_app/friends.html'\n\n def get(self, request):\n user = User.objects.exclude(username=request.user.username)\n\n friend=None\n friends=None\n\n try:\n friend = Friend.objects.get(current_user=request.user)\n friends = friend.users.all()\n except Friend.DoesNotExist:\n friend = None\n friends = None\n\n args = {\n 'users': user,'friends':friends,'pro':pro,\n }\n return render(request, self.template_name, args)\n\n@login_required\ndef posts(request,post, pk):\n post=Post.objects.get(pk=pk)\n\n\n # post = get_object_or_404(Post, pk=pk)\n if request.method == \"POST\":\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = post\n comment.save()\n return redirect('first_app:feed')\n else:\n form = CommentForm()\n\n return render(request,'first_app/posts.html',{'form': form,'post':post})\n\ndef index(request):\n # user=user_signup.objects.get(username=username)\n # users = User.objects.exclude(id=request.user.id)\n # friend = Friend.objects.filter(current_user=request.user)\n # try:\n # friends = friend.users.all()\n # except ObjectDoesNotExist:\n # friends = None\n return render(request,'first_app/home.html',)\n\ndef navbar(request):\n return render(request,'first_app/navbar.html',)\n\ndef login2(request):\n if request.method=='POST':\n username=request.POST.get('username')\n password=request.POST.get('password')\n\n user = authenticate(username=username,password=password)\n\n if user:\n if user.is_active:\n login(request,user)\n # print(\"Login successful\")\n messages.success(request, \"Logged in Successfully!\")\n return HttpResponseRedirect(reverse('first_app:home'))\n\n\n else:\n return HttpResponse(\"Acount not Active\")\n else:\n print(\"Anauthorised Entry\")\n return HttpResponse(\"Invalid login request\")\n else:\n return render(request,'first_app/login2.html')\n\n\ndef signup2(request):\n\n registred=False\n\n if request.method ==\"POST\":\n user_form=userform(data=request.POST)\n signup_form=signupform(data=request.POST)\n\n if user_form.is_valid() and signup_form.is_valid():\n user=user_form.save()\n user.set_password(user.password)\n user.save()\n\n profile=signup_form.save(commit=False)\n profile.user=user\n\n if 'propic' in request.FILES:\n profile.propic=request.FILES['propic']\n else:\n print(\"No Images Found!\")\n\n\n\n\n\n\n profile.save()\n\n registred=True\n # return render(request,'first_app/signup.html')\n\n else:\n print(user_form.errors,signup_form.errors)\n\n\n else:\n user_form=userform()\n signup_form=signupform()\n\n return render(request,'first_app/signup2.html',\n {'user_form':user_form,\n 'signup_form':signup_form,\n 'registred':registred}\n )\n\n\n@login_required\ndef profee(request, proo, pk):\n friend = User.objects.get(pk=pk)\n request.session['friend']=pk\n\n return redirect('first_app:Friend_profile',)\n\n\n@login_required\ndef feed(request):\n posts_a=Post.objects.all().order_by('-created')\n friend = Friend.objects.get(current_user=request.user)\n friends = friend.users.all()\n users=User.objects.exclude(username=request.user.username)\n form = HomeForm(request.POST)\n text=0\n if form.is_valid():\n post = form.save(commit=False)\n post.user = request.user\n post.save()\n text = form.cleaned_data['post']\n form = HomeForm()\n return redirect('first_app:home')\n\n args = {'form': form, 'text': text,'posts_a':posts_a,'friends':friends,'users':users,}\n\n\n return render(request,'first_app/feed.html',args)\n\n\n@login_required\ndef profile(request):\n me=User.objects.all()\n person=user_signup.objects.all()\n\n\n return render(request,'first_app/profile.html',{'me':me})\n\n@login_required\ndef Friend_profile(request):\n pk=request.session['friend']\n friend = User.objects.get(pk=pk)\n user=friend\n request.session.pop('friend', None)\n request.session.modified = True\n return render(request,'first_app/friends_profile.html',{'user':user})\n\n\n\ndef signup(request):\n\n registred=False\n\n if request.method ==\"POST\":\n user_form=userform(data=request.POST)\n signup_form=signupform(data=request.POST)\n\n if user_form.is_valid() and signup_form.is_valid():\n user=user_form.save()\n user.set_password(user.password)\n user.save()\n\n profile=signup_form.save(commit=False)\n profile.user=user\n\n if 'propic' in request.FILES:\n profile.propic=request.FILES['propic']\n else:\n print(\"No Images Found!\")\n\n\n\n\n\n\n profile.save()\n\n registred=True\n # return render(request,'first_app/signup.html')\n\n else:\n print(user_form.errors,signup_form.errors)\n\n\n else:\n user_form=userform()\n signup_form=signupform()\n\n return render(request,'first_app/signup.html',\n {'user_form':user_form,\n 'signup_form':signup_form,\n 'registred':registred}\n )\n\n@login_required\ndef profile_update(request):\n\n if request.method == \"POST\":\n\n user_update=userform_update(request.POST, instance=request.user)\n signup_update=signupform_update(request.POST,request.FILES, instance=request.user.user_signup)\n\n if user_update.is_valid() and signup_update.is_valid():\n user_update.save()\n #user.save()\n\n signup_update.save()\n\n #profile.save()\n messages.success(request,'Your profile has been Updated Successfully')\n return render(request,'first_app/home.html')\n\n else:\n print(user_update.errors,signup_update.errors)\n\n\n else:\n user_update=userform_update(request.POST, instance=request.user)\n signup_update=signupform_update(request.POST,request.FILES, instance=request.user.user_signup)\n\n return render(request,'first_app/profile_update.html',\n {'user_update':user_update,\n 'signup_update':signup_update}\n )\n\n\n\n\ndef signin(request):\n if request.method=='POST':\n username=request.POST.get('username')\n password=request.POST.get('password')\n\n user = authenticate(username=username,password=password)\n\n if user:\n if user.is_active:\n login(request,user)\n # print(\"Login successful\")\n messages.success(request, \"Logged in Successfully!\")\n return HttpResponseRedirect(reverse('first_app:home'))\n\n\n else:\n return HttpResponse(\"Acount not Active\")\n else:\n print(\"Anauthorised Entry\")\n return HttpResponse(\"Invalid login request\")\n else:\n return render(request,'first_app/login.html')\n\n\ndef change_friends(request, operation, pk):\n friend = User.objects.get(pk=pk)\n if operation == 'add':\n Friend.make_friend(request.user, friend)\n elif operation == 'remove':\n Friend.lose_friend(request.user, friend)\n return redirect('link:first_app')\n\n\ndef add_comment_to_post(request, pk):\n\n\n post = get_object_or_404(Post, pk=pk)\n if request.method == \"POST\":\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = post\n comment.save()\n return redirect('link:feed', pk=post.pk)\n else:\n form = CommentForm()\n return render(request, 'first_app/feed.html', {'form': form})\n","sub_path":"first_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"45644382","text":"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport ctypes\nfrom weld.weldobject import *\nfrom weld.types import *\nfrom weld.encoders import NumpyArrayEncoder, ScalarDecoder\nimport weld.bindings as cweld\nfrom collections import namedtuple\nfrom pprint import pprint\nimport sys\nimport json\nfrom timeit import default_timer as timer\nfrom pprint import pprint\nimport argparse\nimport math\n\n# Create data\ndef generate_data(n_R, hit_S, hit_T, hit_U):\n n_S = int(math.ceil(0.2 * n_R))\n n_T = int(math.ceil(0.05 * n_R))\n n_U = int(math.ceil(0.001 * n_R))\n\n U_uk = np.arange(n_U, dtype='int64')\n U_val = np.arange(n_U, dtype='int64')\n\n T_uk = np.random.choice(U_uk, n_T, replace=True)\n T_uk[int(math.ceil(n_T*hit_U)):] += n_U\n T_tk = np.arange(n_T, dtype='int64')\n\n S_tk = np.random.choice(T_tk, n_S, replace=True)\n S_tk[int(math.ceil(n_S*hit_T)):] += n_T\n S_sk = np.arange(n_S, dtype='int64')\n\n R_sk = np.random.choice(S_sk, n_R, replace=True)\n R_sk[int(math.ceil(n_R*hit_S)):] += n_S\n R_rk = np.arange(n_R, dtype='int64')\n\n columns = [R_rk, R_sk, S_sk, S_tk, T_tk, T_uk, U_uk, U_val]\n for col in columns:\n np.random.shuffle(col)\n return columns\n\n# Create a dictionary with the values grouped by the keys\ndef group_by_key(keys, vals):\n grouped = {}\n for k, v in zip(keys, vals):\n group = grouped.get(k)\n if group is None:\n group = []\n group.append(v)\n grouped[k] = group\n return grouped\n\n# Perform the join in Python, check if hit ratios are accurate\ndef join_python(R_rk, R_sk, S_sk, S_tk, T_tk, T_uk, U_uk, U_val):\n S_ht = group_by_key(S_sk, S_tk)\n T_ht = group_by_key(T_tk, T_uk)\n U_ht = {}\n for (uk, uval) in zip(U_uk, U_val):\n U_ht[uk] = uval\n\n aggregate = int(0)\n s_hit = 0.0\n s_try = 0.0\n t_hit = 0.0\n t_try = 0.0\n u_hit = 0.0\n u_try = 0.0\n hits = 0\n for (rk, sk) in zip(R_rk, R_sk):\n tks = S_ht.get(sk)\n s_try += 1.0\n if (tks != None):\n s_hit += 1.0\n for tk in tks:\n uks = T_ht.get(tk)\n t_try += 1.0\n if (uks != None):\n t_hit += 1.0\n for uk in uks:\n uval = U_ht.get(uk)\n u_try += 1.0\n if (uval != None):\n u_hit += 1.0\n hits += 1\n aggregate += uval\n end = timer()\n\n print(\"S hit ratio: \" + (str(s_hit / s_try) if s_try > 0 else str(0)))\n print(\"T hit ratio: \" + (str(t_hit / t_try) if t_try > 0 else str(0)))\n print(\"U hit ratio: \" + (str(u_hit / u_try) if u_try > 0 else str(0)))\n print(\"Hits: \" + str(hits))\n\n return aggregate\n\n# Create the args object for Weld\ndef args_factory(encoded):\n class Args(ctypes.Structure):\n _fields_ = [e for e in encoded]\n return Args \n\n# Join the tables using Weld\ndef join_weld(values, ty, threads, weld_conf):\n adaptive = ty == 'Adaptive' or ty == 'Lazy'\n lazy = ty == 'Lazy'\n file_path = 'join_bf.weld' if ty == 'Bloom Filter' else 'join.weld'\n \n weld_code = None\n with open(file_path, 'r') as content_file:\n weld_code = content_file.read()\n\n enc = NumpyArrayEncoder()\n names = ['R_rk', 'R_sk', 'S_sk', 'S_tk', 'T_tk', 'T_uk', 'U_uk', 'U_val']\n argtypes = [enc.py_to_weld_type(x).ctype_class for x in values]\n encoded = [enc.encode(x) for x in values]\n\n Args = args_factory(zip(names, argtypes))\n weld_args = Args()\n for name, value in zip(names, encoded):\n setattr(weld_args, name, value)\n\n void_ptr = ctypes.cast(ctypes.byref(weld_args), ctypes.c_void_p)\n arg = cweld.WeldValue(void_ptr)\n\n # Compile the module\n err = cweld.WeldError()\n conf = cweld.WeldConf()\n conf.set(\"weld.optimization.applyAdaptiveTransforms\", \"true\" if adaptive else \"false\")\n conf.set(\"weld.adaptive.lazyCompilation\", \"true\" if lazy else \"false\")\n conf.set(\"weld.threads\", str(threads))\n conf.set(\"weld.memory.limit\", \"20000000000\")\n if weld_conf is not None:\n for key, val in weld_conf.iteritems():\n conf.set(key, val)\n\n comp_start = timer()\n module = cweld.WeldModule(weld_code, conf, err)\n comp_time = timer() - comp_start\n\n if err.code() != 0:\n raise ValueError(\"Could not compile function {}: {}\".format(\n weld_code, err.message()))\n\n # Run the module\n dec = ScalarDecoder()\n restype = WeldLong()\n err = cweld.WeldError()\n\n exec_start = timer()\n weld_ret = module.run(conf, arg, err)\n exec_time = timer() - exec_start\n\n if err.code() != 0:\n raise ValueError((\"Error while running function,\\n{}\\n\\n\"\n \"Error message: {}\").format(\n weld_code, err.message()))\n\n ptrtype = POINTER(restype.ctype_class)\n data = ctypes.cast(weld_ret.data(), ptrtype)\n result = dec.decode(data, restype)\n \n weld_ret.free()\n arg.free()\n\n return (result, comp_time, exec_time)\n\nif __name__ == '__main__':\n # Parse command line arguments\n parser = argparse.ArgumentParser(\n description=\"Micro benchmark for adaptive joins\"\n )\n parser.add_argument('-c', '--conf', type=str, required=True,\n help=\"Path to configuration file\")\n parser.add_argument('-o', '--output', type=str, required=True,\n help=\"Path to output file\")\n cmdline_args = parser.parse_args()\n opt_dict = vars(cmdline_args)\n conf_path = opt_dict['conf']\n out_path = opt_dict['output']\n\n # Parse configuration file\n with open(conf_path) as f:\n conf = json.load(f)\n num_rows = conf['num_rows']\n sfs = conf['sf']\n num_iters = conf['num_iterations']\n s_hits = conf['s_hit']\n t_hit = conf['t_hit']\n u_hit = conf['u_hit']\n types = conf['type']\n num_threads = conf['num_threads']\n weld_conf = conf.get('weld_conf')\n\n # Start benchmarking\n total_iters = len(sfs) * len(s_hits) * len(types) * len(num_threads)\n iters = 1\n with open(out_path, 'w') as f:\n f.write('type,n_rows,sf,s_hit,t_hit,u_hit,threads,comp_time,exec_time\\n')\n for sf in sfs:\n for s_hit in s_hits:\n data = generate_data(num_rows * sf, s_hit, t_hit, u_hit)\n expect = join_python(data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7])\n for t in types:\n for threads in num_threads:\n print('[%03d/%03d] %s, %d, %d, %.3f, %.3f, %.3f, %d' % (iters, total_iters, t, num_rows, sf, s_hit, t_hit, u_hit, threads))\n for i in range(num_iters):\n (result, comp_time, exec_time) = join_weld(data, t, threads, weld_conf)\n assert(result == expect)\n\n row = '%s,%d,%d,%f,%f,%f,%d,%f,%f\\n' % (t, num_rows, sf, s_hit, t_hit, u_hit, threads, comp_time, exec_time)\n f.write(row)\n iters += 1","sub_path":"benchmarks/join_dm/bench.py","file_name":"bench.py","file_ext":"py","file_size_in_byte":7093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"478809340","text":"from .typing import Inventory_Dist, Input_Warehouse_List, Shipment\nfrom .warehouse import Warehouse\n\n\nclass InventoryAllocator(object):\n \"\"\"\n Optimally create shipments of inventory to complete orders\n\n Attributes:\n __warhouse_list (List[Warhouse]): List of all Warehouse objects.\n\n \"\"\"\n\n def __init__(self):\n self.__warehouse_list = []\n\n def __create_warehouse_list(self, warehouse_dict_list: Input_Warehouse_List):\n \"\"\"\n Creates a Warehouse object for each Warhouse_Dict in the list\n\n Parameters:\n warhouse_dict_list: A list of dicts mapping warehouse names to inventories\n\n \"\"\"\n for inp_warehouse in warehouse_dict_list:\n self.__warehouse_list.append(\n Warehouse(inp_warehouse[\"name\"], inp_warehouse[\"inventory\"])\n )\n\n def __are_multiple_warehouses_required(self, item: str,\n order_quantity: int) -> bool:\n \"\"\"\n Return True if multiple warehouses are needed otherwise False\n\n Side Effects:\n If a single warehouse can ship all of the item then it processes\n a shipment for item from that warehouse and returns False\n\n Parameters:\n item: Item to be shipped.\n order_quantity: Amount of item to be shipped.\n\n \"\"\"\n total_amount = 0\n\n for warehouse in self.__warehouse_list:\n quantity_in_warehouse = warehouse.get_quantity(item)\n if quantity_in_warehouse >= order_quantity:\n # Single warehouse can ship all of the item so return False\n warehouse.process_item_shipment(item, order_quantity)\n return False\n # Keep track of total item amount across warehouses\n total_amount += quantity_in_warehouse\n\n return total_amount >= order_quantity\n\n def __process_item_shipments_across_warehouses(self, item: str,\n order_quantity: int):\n \"\"\"\n Process item shipments across warehouses for order_quantity amount\n\n Parameters:\n item: Item to be shipped.\n order_quantity: Amount of item to be shipped.\n\n \"\"\"\n quantity_left = order_quantity\n\n # Greedily take inventory from warehouses until shipment is complete\n for warehouse in self.__warehouse_list:\n quantity_in_warehouse = warehouse.get_quantity(item)\n if quantity_in_warehouse > 0:\n shipping_quantity = min(quantity_in_warehouse,\n quantity_left)\n quantity_left -= shipping_quantity\n warehouse.process_item_shipment(item, shipping_quantity)\n if quantity_left <= 0:\n break\n\n def allocate_inventory(self, order: Inventory_Dist,\n warehouse_dicts: Input_Warehouse_List) -> Shipment:\n \"\"\"\n Returns Shipment of optimally allocated inventory\n\n Parameters:\n order: The order to be completed.\n warhouse_dicts: A list of dicts mapping warehouse names and to inventories\n\n \"\"\"\n self.__create_warehouse_list(warehouse_dicts)\n\n # Process all items in order into warehouse shipments\n for item, quantity in order.items():\n if quantity == 0:\n # Skip orders of 0\n continue\n if self.__are_multiple_warehouses_required(item, quantity):\n # Single warehouse can't do shipment and distributed shipment is possible\n self.__process_item_shipments_across_warehouses(item, quantity)\n\n # Combine warehouse shipments to fufill order\n shipment = []\n for warehouse in self.__warehouse_list:\n warehouse_shipment = warehouse.ship_processed_shipments()\n if warehouse_shipment:\n shipment.append(warehouse_shipment)\n return shipment\n","sub_path":"inventory-allocator/deliverr_challenge_2020/src/inventory_allocator.py","file_name":"inventory_allocator.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"596323938","text":"import requests\nimport json\nimport playerDatabase\nimport watcherLogging\n\n# This will be set in the config file so dont worry about it :-)\nmainIpandPort = \"192.168.1.1:28015\"\n\n# get all recent players with hyper links\n\n\ndef get_all_recent_player_info(serverIP, ServerPort):\n global mainIpandPort\n\n try:\n resp = requests.get(\"http://\" + serverIP + ':' +\n ServerPort + \"/recent.json\", allow_redirects=True)\n print(\"check for all player recent info Address\" + str(mainIpandPort))\n myList = []\n playerCount = 0\n\n # print(resp.content)\n\n players = json.loads(resp.content)\n\n for p in players:\n myList.append('player=(' + p['name'] + ') id=(' + p['id'] + ')\\n')\n playerCount += 1\n myList.append(\"Total recent players::{}\" .format(playerCount) + '\\n')\n\n except Exception as e:\n print(\"Error has occured in PlayRust.io request get_all_recent_player_info ::\" + str(e))\n watcherLogging.error_logs(\n 'Error has occured in PlayRust.io request get_all_recent_player_info ::' + str(e))\n\n return myList\n\n# check thise dictinary against values stored in the database and returns a dictionary of name changes if any\n# key = playerID and Value = player name\n\n\ndef check_for_player_name_changes(serverIP, ServerPort):\n global mainIpandPort\n playerDbList = []\n myReturnList = []\n try:\n resp = requests.get(\"http://\" + serverIP + ':' +\n ServerPort + \"/recent.json\", allow_redirects=True)\n print(\"check for player name changes Address\" + str(mainIpandPort))\n #myDic = {}\n\n # print(resp.content)\n\n players = json.loads(resp.content)\n\n for p in players:\n playerDbList = playerDatabase.check_for_existing_player(p['id'])\n if len(playerDbList) > 0:\n for pData in playerDbList:\n if pData[2] != p['name']:\n # check if the name has changed\n playerDatabase.update_player_name(p['id'], p['name'])\n myReturnList.append(\n \"Player ::\" + pData[1] + \" Now Playing as ::\" + p['name'] + '\\n')\n print(\"player changed name\")\n else:\n # add new player to the database\n playerDatabase.add_player_data(p['id'], p['name'], p['name'])\n print(\"Added new player :: Id:\" +\n p['id'] + \" Name:\" + p['name'])\n\n # add or check player in the database\n\n except Exception as e:\n print(\"Error has occured in PlayRust.io request check_for_player_name_changes ::\" + str(e))\n watcherLogging.error_logs(\n 'Error has occured in PlayRust.io request check_for_player_name_changes ::' + str(e))\n\n return myReturnList\n","sub_path":"playRustIO.py","file_name":"playRustIO.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"221793184","text":"'''\nCreated on 2018. 9. 27.\n\n@author: kitcoop\n'''\nfrom urllib.request import urlopen, Request\nfrom urllib.parse import urlencode\nfrom bs4 import BeautifulSoup\n\ndef scrapper(url) :\n\treq= Request(url, headers={'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'})\n\turldata = urlopen(req)\n\thtml = urldata.read()\n\tsoup = BeautifulSoup(html, 'html.parser')\n\tprint(soup)\n\nscrapper('http://gall.dcinside.com/mgallery/board/view?id=mnet_k&no=2698119')\n","sub_path":"craw/.ipynb_checkpoints/scrapper2-checkpoint.py","file_name":"scrapper2-checkpoint.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"352180896","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.14-intel/egg/tnefparse/mapi.py\n# Compiled at: 2018-12-01 10:41:34\n\"\"\"MAPI attribute definitions\"\"\"\nimport logging, sys\nfrom decimal import Decimal\nfrom .util import apptime, dbl64, float32, guid, int8, int16, int32, int64, systime, uint8, uint16, uint32, uint64\nif sys.hexversion < 50331648:\n range = xrange\nlogger = logging.getLogger('mapi-decode')\nSZMAPI_UNSPECIFIED = 0\nSZMAPI_NULL = 1\nSZMAPI_SHORT = 2\nSZMAPI_INT = 3\nSZMAPI_FLOAT = 4\nSZMAPI_DOUBLE = 5\nSZMAPI_CURRENCY = 6\nSZMAPI_APPTIME = 7\nSZMAPI_ERROR = 10\nSZMAPI_BOOLEAN = 11\nSZMAPI_OBJECT = 13\nSZMAPI_INT8BYTE = 20\nSZMAPI_STRING = 30\nSZMAPI_UNICODE_STRING = 31\nSZMAPI_SYSTIME = 64\nSZMAPI_CLSID = 72\nSZMAPI_BINARY = 258\nSZMAPI_BEATS_THE_HELL_OUTTA_ME = 51\nMULTI_VALUE_FLAG = 4096\nGUID_EXISTS_FLAG = 32768\n\ndef decode_mapi(data, codepage='cp1252', starting_offset=None):\n \"\"\"decode MAPI types\"\"\"\n dataLen = len(data)\n attrs = []\n offset = starting_offset or 0\n num_properties = uint32(data[offset:offset + 4])\n offset += 4\n try:\n for i in range(num_properties):\n if offset >= dataLen:\n logger.warn(\"Skipping property '%i'\" % i)\n continue\n attr_type = uint16(data[offset:offset + 2])\n offset += 2\n attr_name = uint16(data[offset:offset + 2])\n offset += 2\n guid_id = ''\n guid_name = None\n guid_prop = None\n if attr_name >= GUID_EXISTS_FLAG:\n guid_id = guid(data, offset)\n offset += 16\n kind = uint32(data[offset:offset + 4])\n offset += 4\n if kind == 0:\n guid_prop = uint32(data[offset:offset + 4])\n offset += 4\n else:\n iidLen = uint32(data[offset:offset + 4])\n offset += 4\n q, r = divmod(iidLen, 4)\n if r != 0:\n iidLen += 4 - r\n guid_name = data[offset:offset + iidLen].decode('utf-16')\n offset += iidLen\n num_mv_properties = None\n if MULTI_VALUE_FLAG & attr_type:\n attr_type ^= MULTI_VALUE_FLAG\n num_mv_properties = uint32(data[offset:offset + 4])\n offset += 4\n for mv in range(num_mv_properties or 1):\n try:\n attr_data, offset = parse_property(data, offset, attr_name, attr_type, codepage, num_mv_properties)\n attr = TNEFMAPI_Attribute(attr_type, attr_name, attr_data, guid_id, guid_name, guid_prop)\n attrs.append(attr)\n except Exception:\n logger.debug('Attribute type: 0x%4.4x', attr_type)\n logger.debug('Attribute name: 0x%4.4x (%s)', attr_name, TNEFMAPI_Attribute.codes.get(attr_name))\n raise\n\n if (num_mv_properties or 1) % 2 and attr_type in (SZMAPI_SHORT, SZMAPI_BOOLEAN):\n offset += 2\n\n except Exception as e:\n import traceback\n stack = traceback.format_exc()\n logger.error('decode_mapi Exception %s' % e)\n logger.debug(stack)\n\n if starting_offset is not None:\n return (offset, attrs)\n else:\n return attrs\n return\n\n\ndef parse_property(data, offset, attr_name, attr_type, codepage, is_multi):\n attr_data = None\n if attr_type in (SZMAPI_SHORT, SZMAPI_BOOLEAN):\n attr_data = data[offset:offset + 2]\n offset += 2\n elif attr_type in (SZMAPI_INT, SZMAPI_FLOAT, SZMAPI_ERROR):\n attr_data = data[offset:offset + 4]\n offset += 4\n elif attr_type in (SZMAPI_DOUBLE, SZMAPI_APPTIME, SZMAPI_CURRENCY, SZMAPI_INT8BYTE, SZMAPI_SYSTIME):\n attr_data = data[offset:offset + 8]\n offset += 8\n elif attr_type == SZMAPI_CLSID:\n attr_data = data[offset:offset + 16]\n offset += 16\n elif attr_type in (SZMAPI_STRING, SZMAPI_UNICODE_STRING, SZMAPI_OBJECT, SZMAPI_BINARY, SZMAPI_UNSPECIFIED):\n if is_multi:\n num_vals = 1\n else:\n num_vals = uint32(data[offset:offset + 4])\n offset += 4\n attr_data = []\n for j in range(num_vals):\n length = uint32(data[offset:offset + 4])\n offset += 4\n q, r = divmod(length, 4)\n if r != 0:\n length += 4 - r\n if attr_type == SZMAPI_UNICODE_STRING:\n attr_data.append(data[offset:offset + length].decode('utf-16'))\n elif attr_type == SZMAPI_STRING:\n attr_data.append(data[offset:offset + length].decode(codepage))\n else:\n attr_data.append(data[offset:offset + length])\n offset += length\n\n else:\n raise ValueError('Unknown MAPI type 0x%4.4x' % attr_type)\n return (attr_data, offset)\n\n\nclass TNEFMAPI_Attribute(object):\n \"\"\"represents a mapi attribute\n\n Property reference docs:\n\n https://docs.microsoft.com/en-us/office/client-developer/outlook/mapi/mapping-canonical-property-names-to-mapi-names#tagged-properties\n https://fossies.org/linux/libpst/xml/MAPI_definitions.pdf\n\n Most these properties represent PidTag properties\n \"\"\"\n MAPI_ACKNOWLEDGEMENT_MODE = 1\n MAPI_ALTERNATE_RECIPIENT_ALLOWED = 2\n MAPI_AUTHORIZING_USERS = 3\n MAPI_AUTO_FORWARD_COMMENT = 4\n MAPI_AUTO_FORWARDED = 5\n MAPI_CONTENT_CONFIDENTIALITY_ALGORITHM_ID = 6\n MAPI_CONTENT_CORRELATOR = 7\n MAPI_CONTENT_IDENTIFIER = 8\n MAPI_CONTENT_LENGTH = 9\n MAPI_CONTENT_RETURN_REQUESTED = 10\n MAPI_CONVERSATION_KEY = 11\n MAPI_CONVERSION_EITS = 12\n MAPI_CONVERSION_WITH_LOSS_PROHIBITED = 13\n MAPI_CONVERTED_EITS = 14\n MAPI_DEFERRED_DELIVERY_TIME = 15\n MAPI_DELIVER_TIME = 16\n MAPI_DISCARD_REASON = 17\n MAPI_DISCLOSURE_OF_RECIPIENTS = 18\n MAPI_DL_EXPANSION_HISTORY = 19\n MAPI_DL_EXPANSION_PROHIBITED = 20\n MAPI_EXPIRY_TIME = 21\n MAPI_IMPLICIT_CONVERSION_PROHIBITED = 22\n MAPI_IMPORTANCE = 23\n MAPI_IPM_ID = 24\n MAPI_LATEST_DELIVERY_TIME = 25\n MAPI_MESSAGE_CLASS = 26\n MAPI_MESSAGE_DELIVERY_ID = 27\n MAPI_MESSAGE_SECURITY_LABEL = 30\n MAPI_OBSOLETED_IPMS = 31\n MAPI_ORIGINALLY_INTENDED_RECIPIENT_NAME = 32\n MAPI_ORIGINAL_EITS = 33\n MAPI_ORIGINATOR_CERTIFICATE = 34\n MAPI_ORIGINATOR_DELIVERY_REPORT_REQUESTED = 35\n MAPI_ORIGINATOR_RETURN_ADDRESS = 36\n MAPI_PARENT_KEY = 37\n MAPI_PRIORITY = 38\n MAPI_ORIGIN_CHECK = 39\n MAPI_PROOF_OF_SUBMISSION_REQUESTED = 40\n MAPI_READ_RECEIPT_REQUESTED = 41\n MAPI_RECEIPT_TIME = 42\n MAPI_RECIPIENT_REASSIGNMENT_PROHIBITED = 43\n MAPI_REDIRECTION_HISTORY = 44\n MAPI_RELATED_IPMS = 45\n MAPI_ORIGINAL_SENSITIVITY = 46\n MAPI_LANGUAGES = 47\n MAPI_REPLY_TIME = 48\n MAPI_REPORT_TAG = 49\n MAPI_REPORT_TIME = 50\n MAPI_RETURNED_IPM = 51\n MAPI_SECURITY = 52\n MAPI_INCOMPLETE_COPY = 53\n MAPI_SENSITIVITY = 54\n MAPI_SUBJECT = 55\n MAPI_SUBJECT_IPM = 56\n MAPI_CLIENT_SUBMIT_TIME = 57\n MAPI_REPORT_NAME = 58\n MAPI_SENT_REPRESENTING_SEARCH_KEY = 59\n MAPI_X400_CONTENT_TYPE = 60\n MAPI_SUBJECT_PREFIX = 61\n MAPI_NON_RECEIPT_REASON = 62\n MAPI_RECEIVED_BY_ENTRYID = 63\n MAPI_RECEIVED_BY_NAME = 64\n MAPI_SENT_REPRESENTING_ENTRYID = 65\n MAPI_SENT_REPRESENTING_NAME = 66\n MAPI_RCVD_REPRESENTING_ENTRYID = 67\n MAPI_RCVD_REPRESENTING_NAME = 68\n MAPI_REPORT_ENTRYID = 69\n MAPI_READ_RECEIPT_ENTRYID = 70\n MAPI_MESSAGE_SUBMISSION_ID = 71\n MAPI_PROVIDER_SUBMIT_TIME = 72\n MAPI_ORIGINAL_SUBJECT = 73\n MAPI_DISC_VAL = 74\n MAPI_ORIG_MESSAGE_CLASS = 75\n MAPI_ORIGINAL_AUTHOR_ENTRYID = 76\n MAPI_ORIGINAL_AUTHOR_NAME = 77\n MAPI_ORIGINAL_SUBMIT_TIME = 78\n MAPI_REPLY_RECIPIENT_ENTRIES = 79\n MAPI_REPLY_RECIPIENT_NAMES = 80\n MAPI_RECEIVED_BY_SEARCH_KEY = 81\n MAPI_RCVD_REPRESENTING_SEARCH_KEY = 82\n MAPI_READ_RECEIPT_SEARCH_KEY = 83\n MAPI_REPORT_SEARCH_KEY = 84\n MAPI_ORIGINAL_DELIVERY_TIME = 85\n MAPI_ORIGINAL_AUTHOR_SEARCH_KEY = 86\n MAPI_MESSAGE_TO_ME = 87\n MAPI_MESSAGE_CC_ME = 88\n MAPI_MESSAGE_RECIP_ME = 89\n MAPI_ORIGINAL_SENDER_NAME = 90\n MAPI_ORIGINAL_SENDER_ENTRYID = 91\n MAPI_ORIGINAL_SENDER_SEARCH_KEY = 92\n MAPI_ORIGINAL_SENT_REPRESENTING_NAME = 93\n MAPI_ORIGINAL_SENT_REPRESENTING_ENTRYID = 94\n MAPI_ORIGINAL_SENT_REPRESENTING_SEARCH_KEY = 95\n MAPI_START_DATE = 96\n MAPI_END_DATE = 97\n MAPI_OWNER_APPT_ID = 98\n MAPI_RESPONSE_REQUESTED = 99\n MAPI_SENT_REPRESENTING_ADDRTYPE = 100\n MAPI_SENT_REPRESENTING_EMAIL_ADDRESS = 101\n MAPI_ORIGINAL_SENDER_ADDRTYPE = 102\n MAPI_ORIGINAL_SENDER_EMAIL_ADDRESS = 103\n MAPI_ORIGINAL_SENT_REPRESENTING_ADDRTYPE = 104\n MAPI_ORIGINAL_SENT_REPRESENTING_EMAIL_ADDRESS = 105\n MAPI_CONVERSATION_TOPIC = 112\n MAPI_CONVERSATION_INDEX = 113\n MAPI_ORIGINAL_DISPLAY_BCC = 114\n MAPI_ORIGINAL_DISPLAY_CC = 115\n MAPI_ORIGINAL_DISPLAY_TO = 116\n MAPI_RECEIVED_BY_ADDRTYPE = 117\n MAPI_RECEIVED_BY_EMAIL_ADDRESS = 118\n MAPI_RCVD_REPRESENTING_ADDRTYPE = 119\n MAPI_RCVD_REPRESENTING_EMAIL_ADDRESS = 120\n MAPI_ORIGINAL_AUTHOR_ADDRTYPE = 121\n MAPI_ORIGINAL_AUTHOR_EMAIL_ADDRESS = 122\n MAPI_ORIGINALLY_INTENDED_RECIP_ADDRTYPE = 123\n MAPI_ORIGINALLY_INTENDED_RECIP_EMAIL_ADDRESS = 124\n MAPI_TRANSPORT_MESSAGE_HEADERS = 125\n MAPI_DELEGATION = 126\n MAPI_TNEF_CORRELATION_KEY = 127\n MAPI_BODY = 4096\n MAPI_REPORT_TEXT = 4097\n MAPI_ORIGINATOR_AND_DL_EXPANSION_HISTORY = 4098\n MAPI_REPORTING_DL_NAME = 4099\n MAPI_REPORTING_MTA_CERTIFICATE = 4100\n MAPI_RTF_SYNC_BODY_CRC = 4102\n MAPI_RTF_SYNC_BODY_COUNT = 4103\n MAPI_RTF_SYNC_BODY_TAG = 4104\n MAPI_RTF_COMPRESSED = 4105\n MAPI_RTF_SYNC_PREFIX_COUNT = 4112\n MAPI_RTF_SYNC_TRAILING_COUNT = 4113\n MAPI_ORIGINALLY_INTENDED_RECIP_ENTRYID = 4114\n MAPI_BODY_HTML = 4115\n MAPI_SMTP_MESSAGE_ID = 4149\n MAPI_CONTENT_INTEGRITY_CHECK = 3072\n MAPI_EXPLICIT_CONVERSION = 3073\n MAPI_IPM_RETURN_REQUESTED = 3074\n MAPI_MESSAGE_TOKEN = 3075\n MAPI_NDR_REASON_CODE = 3076\n MAPI_NDR_DIAG_CODE = 3077\n MAPI_NON_RECEIPT_NOTIFICATION_REQUESTED = 3078\n MAPI_DELIVERY_POINT = 3079\n MAPI_ORIGINATOR_NON_DELIVERY_REPORT_REQUESTED = 3080\n MAPI_ORIGINATOR_REQUESTED_ALTERNATE_RECIPIENT = 3081\n MAPI_PHYSICAL_DELIVERY_BUREAU_FAX_DELIVERY = 3082\n MAPI_PHYSICAL_DELIVERY_MODE = 3083\n MAPI_PHYSICAL_DELIVERY_REPORT_REQUEST = 3084\n MAPI_PHYSICAL_FORWARDING_ADDRESS = 3085\n MAPI_PHYSICAL_FORWARDING_ADDRESS_REQUESTED = 3086\n MAPI_PHYSICAL_FORWARDING_PROHIBITED = 3087\n MAPI_PHYSICAL_RENDITION_ATTRIBUTES = 3088\n MAPI_PROOF_OF_DELIVERY = 3089\n MAPI_PROOF_OF_DELIVERY_REQUESTED = 3090\n MAPI_RECIPIENT_CERTIFICATE = 3091\n MAPI_RECIPIENT_NUMBER_FOR_ADVICE = 3092\n MAPI_RECIPIENT_TYPE = 3093\n MAPI_REGISTERED_MAIL_TYPE = 3094\n MAPI_REPLY_REQUESTED = 3095\n MAPI_REQUESTED_DELIVERY_METHOD = 3096\n MAPI_SENDER_ENTRYID = 3097\n MAPI_SENDER_NAME = 3098\n MAPI_SUPPLEMENTARY_INFO = 3099\n MAPI_TYPE_OF_MTS_USER = 3100\n MAPI_SENDER_SEARCH_KEY = 3101\n MAPI_SENDER_ADDRTYPE = 3102\n MAPI_SENDER_EMAIL_ADDRESS = 3103\n MAPI_CURRENT_VERSION = 3584\n MAPI_DELETE_AFTER_SUBMIT = 3585\n MAPI_DISPLAY_BCC = 3586\n MAPI_DISPLAY_CC = 3587\n MAPI_DISPLAY_TO = 3588\n MAPI_PARENT_DISPLAY = 3589\n MAPI_MESSAGE_DELIVERY_TIME = 3590\n MAPI_MESSAGE_FLAGS = 3591\n MAPI_MESSAGE_SIZE = 3592\n MAPI_PARENT_ENTRYID = 3593\n MAPI_SENTMAIL_ENTRYID = 3594\n MAPI_CORRELATE = 3596\n MAPI_CORRELATE_MTSID = 3597\n MAPI_DISCRETE_VALUES = 3598\n MAPI_RESPONSIBILITY = 3599\n MAPI_SPOOLER_STATUS = 3600\n MAPI_TRANSPORT_STATUS = 3601\n MAPI_MESSAGE_RECIPIENTS = 3602\n MAPI_MESSAGE_ATTACHMENTS = 3603\n MAPI_SUBMIT_FLAGS = 3604\n MAPI_RECIPIENT_STATUS = 3605\n MAPI_TRANSPORT_KEY = 3606\n MAPI_MSG_STATUS = 3607\n MAPI_MESSAGE_DOWNLOAD_TIME = 3608\n MAPI_CREATION_VERSION = 3609\n MAPI_MODIFY_VERSION = 3610\n MAPI_HASATTACH = 3611\n MAPI_BODY_CRC = 3612\n MAPI_NORMALIZED_SUBJECT = 3613\n MAPI_RTF_IN_SYNC = 3615\n MAPI_ATTACH_SIZE = 3616\n MAPI_ATTACH_NUM = 3617\n MAPI_PREPROCESS = 3618\n MAPI_ORIGINATING_MTA_CERTIFICATE = 3621\n MAPI_PROOF_OF_SUBMISSION = 3622\n MAPI_ENTRYID = 4095\n MAPI_OBJECT_TYPE = 4094\n MAPI_ICON = 4093\n MAPI_MINI_ICON = 4092\n MAPI_STORE_ENTRYID = 4091\n MAPI_STORE_RECORD_KEY = 4090\n MAPI_RECORD_KEY = 4089\n MAPI_MAPPING_SIGNATURE = 4088\n MAPI_ACCESS_LEVEL = 4087\n MAPI_INSTANCE_KEY = 4086\n MAPI_ROW_TYPE = 4085\n MAPI_ACCESS = 4084\n MAPI_ROWID = 12288\n MAPI_DISPLAY_NAME = 12289\n MAPI_ADDRTYPE = 12290\n MAPI_EMAIL_ADDRESS = 12291\n MAPI_COMMENT = 12292\n MAPI_DEPTH = 12293\n MAPI_PROVIDER_DISPLAY = 12294\n MAPI_CREATION_TIME = 12295\n MAPI_LAST_MODIFICATION_TIME = 12296\n MAPI_RESOURCE_FLAGS = 12297\n MAPI_PROVIDER_DLL_NAME = 12298\n MAPI_SEARCH_KEY = 12299\n MAPI_PROVIDER_UID = 12300\n MAPI_PROVIDER_ORDINAL = 12301\n MAPI_FORM_VERSION = 13057\n MAPI_FORM_CLSID = 13058\n MAPI_FORM_CONTACT_NAME = 13059\n MAPI_FORM_CATEGORY = 13060\n MAPI_FORM_CATEGORY_SUB = 13061\n MAPI_FORM_HOST_MAP = 13062\n MAPI_FORM_HIDDEN = 13063\n MAPI_FORM_DESIGNER_NAME = 13064\n MAPI_FORM_DESIGNER_GUID = 13065\n MAPI_FORM_MESSAGE_BEHAVIOR = 13066\n MAPI_DEFAULT_STORE = 13312\n MAPI_STORE_SUPPORT_MASK = 13325\n MAPI_STORE_STATE = 13326\n MAPI_IPM_SUBTREE_SEARCH_KEY = 13328\n MAPI_IPM_OUTBOX_SEARCH_KEY = 13329\n MAPI_IPM_WASTEBASKET_SEARCH_KEY = 13330\n MAPI_IPM_SENTMAIL_SEARCH_KEY = 13331\n MAPI_MDB_PROVIDER = 13332\n MAPI_RECEIVE_FOLDER_SETTINGS = 13333\n MAPI_VALID_FOLDER_MASK = 13791\n MAPI_IPM_SUBTREE_ENTRYID = 13792\n MAPI_IPM_OUTBOX_ENTRYID = 13794\n MAPI_IPM_WASTEBASKET_ENTRYID = 13795\n MAPI_IPM_SENTMAIL_ENTRYID = 13796\n MAPI_VIEWS_ENTRYID = 13797\n MAPI_COMMON_VIEWS_ENTRYID = 13798\n MAPI_FINDER_ENTRYID = 13799\n MAPI_CONTAINER_FLAGS = 13824\n MAPI_FOLDER_TYPE = 13825\n MAPI_CONTENT_COUNT = 13826\n MAPI_CONTENT_UNREAD = 13827\n MAPI_CREATE_TEMPLATES = 13828\n MAPI_DETAILS_TABLE = 13829\n MAPI_SEARCH = 13831\n MAPI_SELECTABLE = 13833\n MAPI_SUBFOLDERS = 13834\n MAPI_STATUS = 13835\n MAPI_ANR = 13836\n MAPI_CONTENTS_SORT_ORDER = 13837\n MAPI_CONTAINER_HIERARCHY = 13838\n MAPI_CONTAINER_CONTENTS = 13839\n MAPI_FOLDER_ASSOCIATED_CONTENTS = 13840\n MAPI_DEF_CREATE_DL = 13841\n MAPI_DEF_CREATE_MAILUSER = 13842\n MAPI_CONTAINER_CLASS = 13843\n MAPI_CONTAINER_MODIFY_VERSION = 13844\n MAPI_AB_PROVIDER_ID = 13845\n MAPI_DEFAULT_VIEW_ENTRYID = 13846\n MAPI_ASSOC_CONTENT_COUNT = 13847\n MAPI_ATTACHMENT_X400_PARAMETERS = 14080\n MAPI_ATTACH_DATA_OBJ = 14081\n MAPI_ATTACH_ENCODING = 14082\n MAPI_ATTACH_EXTENSION = 14083\n MAPI_ATTACH_FILENAME = 14084\n MAPI_ATTACH_METHOD = 14085\n MAPI_ATTACH_LONG_FILENAME = 14087\n MAPI_ATTACH_PATHNAME = 14088\n MAPI_ATTACH_RENDERING = 14089\n MAPI_ATTACH_TAG = 14090\n MAPI_RENDERING_POSITION = 14091\n MAPI_ATTACH_TRANSPORT_NAME = 14092\n MAPI_ATTACH_LONG_PATHNAME = 14093\n MAPI_ATTACH_MIME_TAG = 14094\n MAPI_ATTACH_ADDITIONAL_INFO = 14095\n MAPI_ATTACH_MIME_SEQUENCE = 14096\n MAPI_ATTACH_CONTENT_ID = 14098\n MAPI_ATTACH_CONTENT_LOCATION = 14099\n MAPI_ATTACH_FLAGS = 14100\n MAPI_DISPLAY_TYPE = 14592\n MAPI_TEMPLATEID = 14594\n MAPI_PRIMARY_CAPABILITY = 14596\n MAPI_7BIT_DISPLAY_NAME = 14847\n MAPI_ACCOUNT = 14848\n MAPI_ALTERNATE_RECIPIENT = 14849\n MAPI_CALLBACK_TELEPHONE_NUMBER = 14850\n MAPI_CONVERSION_PROHIBITED = 14851\n MAPI_DISCLOSE_RECIPIENTS = 14852\n MAPI_GENERATION = 14853\n MAPI_GIVEN_NAME = 14854\n MAPI_GOVERNMENT_ID_NUMBER = 14855\n MAPI_BUSINESS_TELEPHONE_NUMBER = 14856\n MAPI_HOME_TELEPHONE_NUMBER = 14857\n MAPI_INITIALS = 14858\n MAPI_KEYWORD = 14859\n MAPI_LANGUAGE = 14860\n MAPI_LOCATION = 14861\n MAPI_MAIL_PERMISSION = 14862\n MAPI_MHS_COMMON_NAME = 14863\n MAPI_ORGANIZATIONAL_ID_NUMBER = 14864\n MAPI_SURNAME = 14865\n MAPI_ORIGINAL_ENTRYID = 14866\n MAPI_ORIGINAL_DISPLAY_NAME = 14867\n MAPI_ORIGINAL_SEARCH_KEY = 14868\n MAPI_POSTAL_ADDRESS = 14869\n MAPI_COMPANY_NAME = 14870\n MAPI_TITLE = 14871\n MAPI_DEPARTMENT_NAME = 14872\n MAPI_OFFICE_LOCATION = 14873\n MAPI_PRIMARY_TELEPHONE_NUMBER = 14874\n MAPI_BUSINESS2_TELEPHONE_NUMBER = 14875\n MAPI_MOBILE_TELEPHONE_NUMBER = 14876\n MAPI_RADIO_TELEPHONE_NUMBER = 14877\n MAPI_CAR_TELEPHONE_NUMBER = 14878\n MAPI_OTHER_TELEPHONE_NUMBER = 14879\n MAPI_TRANSMITABLE_DISPLAY_NAME = 14880\n MAPI_PAGER_TELEPHONE_NUMBER = 14881\n MAPI_USER_CERTIFICATE = 14882\n MAPI_PRIMARY_FAX_NUMBER = 14883\n MAPI_BUSINESS_FAX_NUMBER = 14884\n MAPI_HOME_FAX_NUMBER = 14885\n MAPI_COUNTRY = 14886\n MAPI_LOCALITY = 14887\n MAPI_STATE_OR_PROVINCE = 14888\n MAPI_STREET_ADDRESS = 14889\n MAPI_POSTAL_CODE = 14890\n MAPI_POST_OFFICE_BOX = 14891\n MAPI_TELEX_NUMBER = 14892\n MAPI_ISDN_NUMBER = 14893\n MAPI_ASSISTANT_TELEPHONE_NUMBER = 14894\n MAPI_HOME2_TELEPHONE_NUMBER = 14895\n MAPI_ASSISTANT = 14896\n MAPI_SEND_RICH_INFO = 14912\n MAPI_WEDDING_ANNIVERSARY = 14913\n MAPI_BIRTHDAY = 14914\n MAPI_HOBBIES = 14915\n MAPI_MIDDLE_NAME = 14916\n MAPI_DISPLAY_NAME_PREFIX = 14917\n MAPI_PROFESSION = 14918\n MAPI_PREFERRED_BY_NAME = 14919\n MAPI_SPOUSE_NAME = 14920\n MAPI_COMPUTER_NETWORK_NAME = 14921\n MAPI_CUSTOMER_ID = 14922\n MAPI_TTYTDD_PHONE_NUMBER = 14923\n MAPI_FTP_SITE = 14924\n MAPI_GENDER = 14925\n MAPI_MANAGER_NAME = 14926\n MAPI_NICKNAME = 14927\n MAPI_PERSONAL_HOME_PAGE = 14928\n MAPI_BUSINESS_HOME_PAGE = 14929\n MAPI_CONTACT_VERSION = 14930\n MAPI_CONTACT_ENTRYIDS = 14931\n MAPI_CONTACT_ADDRTYPES = 14932\n MAPI_CONTACT_DEFAULT_ADDRESS_INDEX = 14933\n MAPI_CONTACT_EMAIL_ADDRESSES = 14934\n MAPI_COMPANY_MAIN_PHONE_NUMBER = 14935\n MAPI_CHILDRENS_NAMES = 14936\n MAPI_HOME_ADDRESS_CITY = 14937\n MAPI_HOME_ADDRESS_COUNTRY = 14938\n MAPI_HOME_ADDRESS_POSTAL_CODE = 14939\n MAPI_HOME_ADDRESS_STATE_OR_PROVINCE = 14940\n MAPI_HOME_ADDRESS_STREET = 14941\n MAPI_HOME_ADDRESS_POST_OFFICE_BOX = 14942\n MAPI_OTHER_ADDRESS_CITY = 14943\n MAPI_OTHER_ADDRESS_COUNTRY = 14944\n MAPI_OTHER_ADDRESS_POSTAL_CODE = 14945\n MAPI_OTHER_ADDRESS_STATE_OR_PROVINCE = 14946\n MAPI_OTHER_ADDRESS_STREET = 14947\n MAPI_OTHER_ADDRESS_POST_OFFICE_BOX = 14948\n MAPI_STORE_PROVIDERS = 15616\n MAPI_AB_PROVIDERS = 15617\n MAPI_TRANSPORT_PROVIDERS = 15618\n MAPI_DEFAULT_PROFILE = 15620\n MAPI_AB_SEARCH_PATH = 15621\n MAPI_AB_DEFAULT_DIR = 15622\n MAPI_AB_DEFAULT_PAB = 15623\n MAPI_FILTERING_HOOKS = 15624\n MAPI_SERVICE_NAME = 15625\n MAPI_SERVICE_DLL_NAME = 15626\n MAPI_SERVICE_ENTRY_NAME = 15627\n MAPI_SERVICE_UID = 15628\n MAPI_SERVICE_EXTRA_UIDS = 15629\n MAPI_SERVICES = 15630\n MAPI_SERVICE_SUPPORT_FILES = 15631\n MAPI_SERVICE_DELETE_FILES = 15632\n MAPI_AB_SEARCH_PATH_UPDATE = 15633\n MAPI_PROFILE_NAME = 15634\n MAPI_IDENTITY_DISPLAY = 15872\n MAPI_IDENTITY_ENTRYID = 15873\n MAPI_RESOURCE_METHODS = 15874\n MAPI_RESOURCE_TYPE = 15875\n MAPI_STATUS_CODE = 15876\n MAPI_IDENTITY_SEARCH_KEY = 15877\n MAPI_OWN_STORE_ENTRYID = 15878\n MAPI_RESOURCE_PATH = 15879\n MAPI_STATUS_STRING = 15880\n MAPI_X400_DEFERRED_DELIVERY_CANCEL = 15881\n MAPI_HEADER_FOLDER_ENTRYID = 15882\n MAPI_REMOTE_PROGRESS = 15883\n MAPI_REMOTE_PROGRESS_TEXT = 15884\n MAPI_REMOTE_VALIDATE_OK = 15885\n MAPI_CONTROL_FLAGS = 16128\n MAPI_CONTROL_STRUCTURE = 16129\n MAPI_CONTROL_TYPE = 16130\n MAPI_DELTAX = 16131\n MAPI_DELTAY = 16132\n MAPI_XPOS = 16133\n MAPI_YPOS = 16134\n MAPI_CONTROL_ID = 16135\n MAPI_INITIAL_DETAILS_PANE = 16136\n UNCOMPRESSED_BODY = 16345\n MAPI_PRIMARY_SEND_ACCOUNT = 3624\n MAPI_NEXT_SEND_ACCT = 3625\n MAPI_INTERNET_REFERENCES = 4153\n MAPI_IN_REPLY_TO_ID = 4162\n MAPI_INTERNET_RETURN_PATH = 4166\n MAPI_ICON_INDEX = 4224\n MAPI_TARGET_ENTRY_ID = 12304\n MAPI_CONVERSATION_ID = 12307\n MAPI_STORE_UNICODE_MASK = 13327\n MAPI_INTERNET_CODEPAGE = 16350\n MAPI_MESSAGE_LOCALE_ID = 16369\n MAPI_CREATOR_NAME = 16376\n MAPI_CREATOR_ENTRY_ID = 16377\n MAPI_LAST_MODIFIER_ENTRY_ID = 16379\n MAPI_MESSAGE_CODEPAGE = 16381\n MAPI_INTERNET_MAIL_OVERRIDE_FORMAT = 22786\n MAPI_MESSAGE_EDITOR_FORMAT = 22793\n MAPI_SENDER_SMTP_ADDRESS = 23809\n MAPI_SENT_REPRESENTING_SMTP_ADDRESS = 23810\n MAPI_READ_RECEIPT_SMTP_ADDRESS = 23813\n MAPI_RECEIVED_BY_SMTP_ADDRESS = 23815\n MAPI_RECEIVED_REPRESENTING_SMTP_ADDRESS = 23816\n MAPI_SIP_ADDRESS = 24549\n MAPI_ATTACHMENT_LINK_ID = 32762\n MAPI_EXCEPTION_START_TIME = 32763\n MAPI_EXCEPTION_END_TIME = 32764\n MAPI_ATTACHMENT_FLAGS = 32765\n MAPI_ATTACHMENT_HIDDEN = 32766\n MAPI_ATTACHMENT_CONTACT_PHOTO = 32767\n MAPI_ID_SECURE_MIN = 26608\n MAPI_ID_SECURE_MAX = 26623\n codes = {MAPI_ACKNOWLEDGEMENT_MODE: 'MAPI_ACKNOWLEDGEMENT_MODE', \n MAPI_ALTERNATE_RECIPIENT_ALLOWED: 'MAPI_ALTERNATE_RECIPIENT_ALLOWED', \n MAPI_AUTHORIZING_USERS: 'MAPI_AUTHORIZING_USERS', \n MAPI_AUTO_FORWARD_COMMENT: 'MAPI_AUTO_FORWARD_COMMENT', \n MAPI_AUTO_FORWARDED: 'MAPI_AUTO_FORWARDED', \n MAPI_CONTENT_CONFIDENTIALITY_ALGORITHM_ID: 'MAPI_CONTENT_CONFIDENTIALITY_ALGORITHM_ID', \n MAPI_CONTENT_CORRELATOR: 'MAPI_CONTENT_CORRELATOR', \n MAPI_CONTENT_IDENTIFIER: 'MAPI_CONTENT_IDENTIFIER', \n MAPI_CONTENT_LENGTH: 'MAPI_CONTENT_LENGTH', \n MAPI_CONTENT_RETURN_REQUESTED: 'MAPI_CONTENT_RETURN_REQUESTED', \n MAPI_CONVERSATION_KEY: 'MAPI_CONVERSATION_KEY', \n MAPI_CONVERSION_EITS: 'MAPI_CONVERSION_EITS', \n MAPI_CONVERSION_WITH_LOSS_PROHIBITED: 'MAPI_CONVERSION_WITH_LOSS_PROHIBITED', \n MAPI_CONVERTED_EITS: 'MAPI_CONVERTED_EITS', \n MAPI_DEFERRED_DELIVERY_TIME: 'MAPI_DEFERRED_DELIVERY_TIME', \n MAPI_DELIVER_TIME: 'MAPI_DELIVER_TIME', \n MAPI_DISCARD_REASON: 'MAPI_DISCARD_REASON', \n MAPI_DISCLOSURE_OF_RECIPIENTS: 'MAPI_DISCLOSURE_OF_RECIPIENTS', \n MAPI_DL_EXPANSION_HISTORY: 'MAPI_DL_EXPANSION_HISTORY', \n MAPI_DL_EXPANSION_PROHIBITED: 'MAPI_DL_EXPANSION_PROHIBITED', \n MAPI_EXPIRY_TIME: 'MAPI_EXPIRY_TIME', \n MAPI_IMPLICIT_CONVERSION_PROHIBITED: 'MAPI_IMPLICIT_CONVERSION_PROHIBITED', \n MAPI_IMPORTANCE: 'MAPI_IMPORTANCE', \n MAPI_IPM_ID: 'MAPI_IPM_ID', \n MAPI_LATEST_DELIVERY_TIME: 'MAPI_LATEST_DELIVERY_TIME', \n MAPI_MESSAGE_CLASS: 'MAPI_MESSAGE_CLASS', \n MAPI_MESSAGE_DELIVERY_ID: 'MAPI_MESSAGE_DELIVERY_ID', \n MAPI_MESSAGE_SECURITY_LABEL: 'MAPI_MESSAGE_SECURITY_LABEL', \n MAPI_OBSOLETED_IPMS: 'MAPI_OBSOLETED_IPMS', \n MAPI_ORIGINALLY_INTENDED_RECIPIENT_NAME: 'MAPI_ORIGINALLY_INTENDED_RECIPIENT_NAME', \n MAPI_ORIGINAL_EITS: 'MAPI_ORIGINAL_EITS', \n MAPI_ORIGINATOR_CERTIFICATE: 'MAPI_ORIGINATOR_CERTIFICATE', \n MAPI_ORIGINATOR_DELIVERY_REPORT_REQUESTED: 'MAPI_ORIGINATOR_DELIVERY_REPORT_REQUESTED', \n MAPI_ORIGINATOR_RETURN_ADDRESS: 'MAPI_ORIGINATOR_RETURN_ADDRESS', \n MAPI_PARENT_KEY: 'MAPI_PARENT_KEY', \n MAPI_PRIORITY: 'MAPI_PRIORITY', \n MAPI_ORIGIN_CHECK: 'MAPI_ORIGIN_CHECK', \n MAPI_PROOF_OF_SUBMISSION_REQUESTED: 'MAPI_PROOF_OF_SUBMISSION_REQUESTED', \n MAPI_READ_RECEIPT_REQUESTED: 'MAPI_READ_RECEIPT_REQUESTED', \n MAPI_RECEIPT_TIME: 'MAPI_RECEIPT_TIME', \n MAPI_RECIPIENT_REASSIGNMENT_PROHIBITED: 'MAPI_RECIPIENT_REASSIGNMENT_PROHIBITED', \n MAPI_REDIRECTION_HISTORY: 'MAPI_REDIRECTION_HISTORY', \n MAPI_RELATED_IPMS: 'MAPI_RELATED_IPMS', \n MAPI_ORIGINAL_SENSITIVITY: 'MAPI_ORIGINAL_SENSITIVITY', \n MAPI_LANGUAGES: 'MAPI_LANGUAGES', \n MAPI_REPLY_TIME: 'MAPI_REPLY_TIME', \n MAPI_REPORT_TAG: 'MAPI_REPORT_TAG', \n MAPI_REPORT_TIME: 'MAPI_REPORT_TIME', \n MAPI_RETURNED_IPM: 'MAPI_RETURNED_IPM', \n MAPI_SECURITY: 'MAPI_SECURITY', \n MAPI_INCOMPLETE_COPY: 'MAPI_INCOMPLETE_COPY', \n MAPI_SENSITIVITY: 'MAPI_SENSITIVITY', \n MAPI_SUBJECT: 'MAPI_SUBJECT', \n MAPI_SUBJECT_IPM: 'MAPI_SUBJECT_IPM', \n MAPI_CLIENT_SUBMIT_TIME: 'MAPI_CLIENT_SUBMIT_TIME', \n MAPI_REPORT_NAME: 'MAPI_REPORT_NAME', \n MAPI_SENT_REPRESENTING_SEARCH_KEY: 'MAPI_SENT_REPRESENTING_SEARCH_KEY', \n MAPI_X400_CONTENT_TYPE: 'MAPI_X400_CONTENT_TYPE', \n MAPI_SUBJECT_PREFIX: 'MAPI_SUBJECT_PREFIX', \n MAPI_NON_RECEIPT_REASON: 'MAPI_NON_RECEIPT_REASON', \n MAPI_RECEIVED_BY_ENTRYID: 'MAPI_RECEIVED_BY_ENTRYID', \n MAPI_RECEIVED_BY_NAME: 'MAPI_RECEIVED_BY_NAME', \n MAPI_SENT_REPRESENTING_ENTRYID: 'MAPI_SENT_REPRESENTING_ENTRYID', \n MAPI_SENT_REPRESENTING_NAME: 'MAPI_SENT_REPRESENTING_NAME', \n MAPI_RCVD_REPRESENTING_ENTRYID: 'MAPI_RCVD_REPRESENTING_ENTRYID', \n MAPI_RCVD_REPRESENTING_NAME: 'MAPI_RCVD_REPRESENTING_NAME', \n MAPI_REPORT_ENTRYID: 'MAPI_REPORT_ENTRYID', \n MAPI_READ_RECEIPT_ENTRYID: 'MAPI_READ_RECEIPT_ENTRYID', \n MAPI_MESSAGE_SUBMISSION_ID: 'MAPI_MESSAGE_SUBMISSION_ID', \n MAPI_PROVIDER_SUBMIT_TIME: 'MAPI_PROVIDER_SUBMIT_TIME', \n MAPI_ORIGINAL_SUBJECT: 'MAPI_ORIGINAL_SUBJECT', \n MAPI_DISC_VAL: 'MAPI_DISC_VAL', \n MAPI_ORIG_MESSAGE_CLASS: 'MAPI_ORIG_MESSAGE_CLASS', \n MAPI_ORIGINAL_AUTHOR_ENTRYID: 'MAPI_ORIGINAL_AUTHOR_ENTRYID', \n MAPI_ORIGINAL_AUTHOR_NAME: 'MAPI_ORIGINAL_AUTHOR_NAME', \n MAPI_ORIGINAL_SUBMIT_TIME: 'MAPI_ORIGINAL_SUBMIT_TIME', \n MAPI_REPLY_RECIPIENT_ENTRIES: 'MAPI_REPLY_RECIPIENT_ENTRIES', \n MAPI_REPLY_RECIPIENT_NAMES: 'MAPI_REPLY_RECIPIENT_NAMES', \n MAPI_RECEIVED_BY_SEARCH_KEY: 'MAPI_RECEIVED_BY_SEARCH_KEY', \n MAPI_RCVD_REPRESENTING_SEARCH_KEY: 'MAPI_RCVD_REPRESENTING_SEARCH_KEY', \n MAPI_READ_RECEIPT_SEARCH_KEY: 'MAPI_READ_RECEIPT_SEARCH_KEY', \n MAPI_REPORT_SEARCH_KEY: 'MAPI_REPORT_SEARCH_KEY', \n MAPI_ORIGINAL_DELIVERY_TIME: 'MAPI_ORIGINAL_DELIVERY_TIME', \n MAPI_ORIGINAL_AUTHOR_SEARCH_KEY: 'MAPI_ORIGINAL_AUTHOR_SEARCH_KEY', \n MAPI_MESSAGE_TO_ME: 'MAPI_MESSAGE_TO_ME', \n MAPI_MESSAGE_CC_ME: 'MAPI_MESSAGE_CC_ME', \n MAPI_MESSAGE_RECIP_ME: 'MAPI_MESSAGE_RECIP_ME', \n MAPI_ORIGINAL_SENDER_NAME: 'MAPI_ORIGINAL_SENDER_NAME', \n MAPI_ORIGINAL_SENDER_ENTRYID: 'MAPI_ORIGINAL_SENDER_ENTRYID', \n MAPI_ORIGINAL_SENDER_SEARCH_KEY: 'MAPI_ORIGINAL_SENDER_SEARCH_KEY', \n MAPI_ORIGINAL_SENT_REPRESENTING_NAME: 'MAPI_ORIGINAL_SENT_REPRESENTING_NAME', \n MAPI_ORIGINAL_SENT_REPRESENTING_ENTRYID: 'MAPI_ORIGINAL_SENT_REPRESENTING_ENTRYID', \n MAPI_ORIGINAL_SENT_REPRESENTING_SEARCH_KEY: 'MAPI_ORIGINAL_SENT_REPRESENTING_SEARCH_KEY', \n MAPI_START_DATE: 'MAPI_START_DATE', \n MAPI_END_DATE: 'MAPI_END_DATE', \n MAPI_OWNER_APPT_ID: 'MAPI_OWNER_APPT_ID', \n MAPI_RESPONSE_REQUESTED: 'MAPI_RESPONSE_REQUESTED', \n MAPI_SENT_REPRESENTING_ADDRTYPE: 'MAPI_SENT_REPRESENTING_ADDRTYPE', \n MAPI_SENT_REPRESENTING_EMAIL_ADDRESS: 'MAPI_SENT_REPRESENTING_EMAIL_ADDRESS', \n MAPI_ORIGINAL_SENDER_ADDRTYPE: 'MAPI_ORIGINAL_SENDER_ADDRTYPE', \n MAPI_ORIGINAL_SENDER_EMAIL_ADDRESS: 'MAPI_ORIGINAL_SENDER_EMAIL_ADDRESS', \n MAPI_ORIGINAL_SENT_REPRESENTING_ADDRTYPE: 'MAPI_ORIGINAL_SENT_REPRESENTING_ADDRTYPE', \n MAPI_ORIGINAL_SENT_REPRESENTING_EMAIL_ADDRESS: 'MAPI_ORIGINAL_SENT_REPRESENTING_EMAIL_ADDRESS', \n MAPI_CONVERSATION_TOPIC: 'MAPI_CONVERSATION_TOPIC', \n MAPI_CONVERSATION_INDEX: 'MAPI_CONVERSATION_INDEX', \n MAPI_ORIGINAL_DISPLAY_BCC: 'MAPI_ORIGINAL_DISPLAY_BCC', \n MAPI_ORIGINAL_DISPLAY_CC: 'MAPI_ORIGINAL_DISPLAY_CC', \n MAPI_ORIGINAL_DISPLAY_TO: 'MAPI_ORIGINAL_DISPLAY_TO', \n MAPI_RECEIVED_BY_ADDRTYPE: 'MAPI_RECEIVED_BY_ADDRTYPE', \n MAPI_RECEIVED_BY_EMAIL_ADDRESS: 'MAPI_RECEIVED_BY_EMAIL_ADDRESS', \n MAPI_RCVD_REPRESENTING_ADDRTYPE: 'MAPI_RCVD_REPRESENTING_ADDRTYPE', \n MAPI_RCVD_REPRESENTING_EMAIL_ADDRESS: 'MAPI_RCVD_REPRESENTING_EMAIL_ADDRESS', \n MAPI_ORIGINAL_AUTHOR_ADDRTYPE: 'MAPI_ORIGINAL_AUTHOR_ADDRTYPE', \n MAPI_ORIGINAL_AUTHOR_EMAIL_ADDRESS: 'MAPI_ORIGINAL_AUTHOR_EMAIL_ADDRESS', \n MAPI_ORIGINALLY_INTENDED_RECIP_ADDRTYPE: 'MAPI_ORIGINALLY_INTENDED_RECIP_ADDRTYPE', \n MAPI_ORIGINALLY_INTENDED_RECIP_EMAIL_ADDRESS: 'MAPI_ORIGINALLY_INTENDED_RECIP_EMAIL_ADDRESS', \n MAPI_TRANSPORT_MESSAGE_HEADERS: 'MAPI_TRANSPORT_MESSAGE_HEADERS', \n MAPI_DELEGATION: 'MAPI_DELEGATION', \n MAPI_TNEF_CORRELATION_KEY: 'MAPI_TNEF_CORRELATION_KEY', \n MAPI_BODY: 'MAPI_BODY', \n MAPI_REPORT_TEXT: 'MAPI_REPORT_TEXT', \n MAPI_ORIGINATOR_AND_DL_EXPANSION_HISTORY: 'MAPI_ORIGINATOR_AND_DL_EXPANSION_HISTORY', \n MAPI_REPORTING_DL_NAME: 'MAPI_REPORTING_DL_NAME', \n MAPI_REPORTING_MTA_CERTIFICATE: 'MAPI_REPORTING_MTA_CERTIFICATE', \n MAPI_RTF_SYNC_BODY_CRC: 'MAPI_RTF_SYNC_BODY_CRC', \n MAPI_RTF_SYNC_BODY_COUNT: 'MAPI_RTF_SYNC_BODY_COUNT', \n MAPI_RTF_SYNC_BODY_TAG: 'MAPI_RTF_SYNC_BODY_TAG', \n MAPI_RTF_COMPRESSED: 'MAPI_RTF_COMPRESSED', \n MAPI_RTF_SYNC_PREFIX_COUNT: 'MAPI_RTF_SYNC_PREFIX_COUNT', \n MAPI_RTF_SYNC_TRAILING_COUNT: 'MAPI_RTF_SYNC_TRAILING_COUNT', \n MAPI_ORIGINALLY_INTENDED_RECIP_ENTRYID: 'MAPI_ORIGINALLY_INTENDED_RECIP_ENTRYID', \n MAPI_BODY_HTML: 'MAPI_BODY_HTML', \n MAPI_SMTP_MESSAGE_ID: 'MAPI_SMTP_MESSAGE_ID', \n MAPI_CONTENT_INTEGRITY_CHECK: 'MAPI_CONTENT_INTEGRITY_CHECK', \n MAPI_EXPLICIT_CONVERSION: 'MAPI_EXPLICIT_CONVERSION', \n MAPI_IPM_RETURN_REQUESTED: 'MAPI_IPM_RETURN_REQUESTED', \n MAPI_MESSAGE_TOKEN: 'MAPI_MESSAGE_TOKEN', \n MAPI_NDR_REASON_CODE: 'MAPI_NDR_REASON_CODE', \n MAPI_NDR_DIAG_CODE: 'MAPI_NDR_DIAG_CODE', \n MAPI_NON_RECEIPT_NOTIFICATION_REQUESTED: 'MAPI_NON_RECEIPT_NOTIFICATION_REQUESTED', \n MAPI_DELIVERY_POINT: 'MAPI_DELIVERY_POINT', \n MAPI_ORIGINATOR_NON_DELIVERY_REPORT_REQUESTED: 'MAPI_ORIGINATOR_NON_DELIVERY_REPORT_REQUESTED', \n MAPI_ORIGINATOR_REQUESTED_ALTERNATE_RECIPIENT: 'MAPI_ORIGINATOR_REQUESTED_ALTERNATE_RECIPIENT', \n MAPI_PHYSICAL_DELIVERY_BUREAU_FAX_DELIVERY: 'MAPI_PHYSICAL_DELIVERY_BUREAU_FAX_DELIVERY', \n MAPI_PHYSICAL_DELIVERY_MODE: 'MAPI_PHYSICAL_DELIVERY_MODE', \n MAPI_PHYSICAL_DELIVERY_REPORT_REQUEST: 'MAPI_PHYSICAL_DELIVERY_REPORT_REQUEST', \n MAPI_PHYSICAL_FORWARDING_ADDRESS: 'MAPI_PHYSICAL_FORWARDING_ADDRESS', \n MAPI_PHYSICAL_FORWARDING_ADDRESS_REQUESTED: 'MAPI_PHYSICAL_FORWARDING_ADDRESS_REQUESTED', \n MAPI_PHYSICAL_FORWARDING_PROHIBITED: 'MAPI_PHYSICAL_FORWARDING_PROHIBITED', \n MAPI_PHYSICAL_RENDITION_ATTRIBUTES: 'MAPI_PHYSICAL_RENDITION_ATTRIBUTES', \n MAPI_PROOF_OF_DELIVERY: 'MAPI_PROOF_OF_DELIVERY', \n MAPI_PROOF_OF_DELIVERY_REQUESTED: 'MAPI_PROOF_OF_DELIVERY_REQUESTED', \n MAPI_RECIPIENT_CERTIFICATE: 'MAPI_RECIPIENT_CERTIFICATE', \n MAPI_RECIPIENT_NUMBER_FOR_ADVICE: 'MAPI_RECIPIENT_NUMBER_FOR_ADVICE', \n MAPI_RECIPIENT_TYPE: 'MAPI_RECIPIENT_TYPE', \n MAPI_REGISTERED_MAIL_TYPE: 'MAPI_REGISTERED_MAIL_TYPE', \n MAPI_REPLY_REQUESTED: 'MAPI_REPLY_REQUESTED', \n MAPI_REQUESTED_DELIVERY_METHOD: 'MAPI_REQUESTED_DELIVERY_METHOD', \n MAPI_SENDER_ENTRYID: 'MAPI_SENDER_ENTRYID', \n MAPI_SENDER_NAME: 'MAPI_SENDER_NAME', \n MAPI_SUPPLEMENTARY_INFO: 'MAPI_SUPPLEMENTARY_INFO', \n MAPI_TYPE_OF_MTS_USER: 'MAPI_TYPE_OF_MTS_USER', \n MAPI_SENDER_SEARCH_KEY: 'MAPI_SENDER_SEARCH_KEY', \n MAPI_SENDER_ADDRTYPE: 'MAPI_SENDER_ADDRTYPE', \n MAPI_SENDER_EMAIL_ADDRESS: 'MAPI_SENDER_EMAIL_ADDRESS', \n MAPI_CURRENT_VERSION: 'MAPI_CURRENT_VERSION', \n MAPI_DELETE_AFTER_SUBMIT: 'MAPI_DELETE_AFTER_SUBMIT', \n MAPI_DISPLAY_BCC: 'MAPI_DISPLAY_BCC', \n MAPI_DISPLAY_CC: 'MAPI_DISPLAY_CC', \n MAPI_DISPLAY_TO: 'MAPI_DISPLAY_TO', \n MAPI_PARENT_DISPLAY: 'MAPI_PARENT_DISPLAY', \n MAPI_MESSAGE_DELIVERY_TIME: 'MAPI_MESSAGE_DELIVERY_TIME', \n MAPI_MESSAGE_FLAGS: 'MAPI_MESSAGE_FLAGS', \n MAPI_MESSAGE_SIZE: 'MAPI_MESSAGE_SIZE', \n MAPI_PARENT_ENTRYID: 'MAPI_PARENT_ENTRYID', \n MAPI_SENTMAIL_ENTRYID: 'MAPI_SENTMAIL_ENTRYID', \n MAPI_CORRELATE: 'MAPI_CORRELATE', \n MAPI_CORRELATE_MTSID: 'MAPI_CORRELATE_MTSID', \n MAPI_DISCRETE_VALUES: 'MAPI_DISCRETE_VALUES', \n MAPI_RESPONSIBILITY: 'MAPI_RESPONSIBILITY', \n MAPI_SPOOLER_STATUS: 'MAPI_SPOOLER_STATUS', \n MAPI_TRANSPORT_STATUS: 'MAPI_TRANSPORT_STATUS', \n MAPI_MESSAGE_RECIPIENTS: 'MAPI_MESSAGE_RECIPIENTS', \n MAPI_MESSAGE_ATTACHMENTS: 'MAPI_MESSAGE_ATTACHMENTS', \n MAPI_SUBMIT_FLAGS: 'MAPI_SUBMIT_FLAGS', \n MAPI_RECIPIENT_STATUS: 'MAPI_RECIPIENT_STATUS', \n MAPI_TRANSPORT_KEY: 'MAPI_TRANSPORT_KEY', \n MAPI_MSG_STATUS: 'MAPI_MSG_STATUS', \n MAPI_MESSAGE_DOWNLOAD_TIME: 'MAPI_MESSAGE_DOWNLOAD_TIME', \n MAPI_CREATION_VERSION: 'MAPI_CREATION_VERSION', \n MAPI_MODIFY_VERSION: 'MAPI_MODIFY_VERSION', \n MAPI_HASATTACH: 'MAPI_HASATTACH', \n MAPI_BODY_CRC: 'MAPI_BODY_CRC', \n MAPI_NORMALIZED_SUBJECT: 'MAPI_NORMALIZED_SUBJECT', \n MAPI_RTF_IN_SYNC: 'MAPI_RTF_IN_SYNC', \n MAPI_ATTACH_SIZE: 'MAPI_ATTACH_SIZE', \n MAPI_ATTACH_NUM: 'MAPI_ATTACH_NUM', \n MAPI_PREPROCESS: 'MAPI_PREPROCESS', \n MAPI_ORIGINATING_MTA_CERTIFICATE: 'MAPI_ORIGINATING_MTA_CERTIFICATE', \n MAPI_PROOF_OF_SUBMISSION: 'MAPI_PROOF_OF_SUBMISSION', \n MAPI_ENTRYID: 'MAPI_ENTRYID', \n MAPI_OBJECT_TYPE: 'MAPI_OBJECT_TYPE', \n MAPI_ICON: 'MAPI_ICON', \n MAPI_MINI_ICON: 'MAPI_MINI_ICON', \n MAPI_STORE_ENTRYID: 'MAPI_STORE_ENTRYID', \n MAPI_STORE_RECORD_KEY: 'MAPI_STORE_RECORD_KEY', \n MAPI_RECORD_KEY: 'MAPI_RECORD_KEY', \n MAPI_MAPPING_SIGNATURE: 'MAPI_MAPPING_SIGNATURE', \n MAPI_ACCESS_LEVEL: 'MAPI_ACCESS_LEVEL', \n MAPI_INSTANCE_KEY: 'MAPI_INSTANCE_KEY', \n MAPI_ROW_TYPE: 'MAPI_ROW_TYPE', \n MAPI_ACCESS: 'MAPI_ACCESS', \n MAPI_ROWID: 'MAPI_ROWID', \n MAPI_DISPLAY_NAME: 'MAPI_DISPLAY_NAME', \n MAPI_ADDRTYPE: 'MAPI_ADDRTYPE', \n MAPI_EMAIL_ADDRESS: 'MAPI_EMAIL_ADDRESS', \n MAPI_COMMENT: 'MAPI_COMMENT', \n MAPI_DEPTH: 'MAPI_DEPTH', \n MAPI_PROVIDER_DISPLAY: 'MAPI_PROVIDER_DISPLAY', \n MAPI_CREATION_TIME: 'MAPI_CREATION_TIME', \n MAPI_LAST_MODIFICATION_TIME: 'MAPI_LAST_MODIFICATION_TIME', \n MAPI_RESOURCE_FLAGS: 'MAPI_RESOURCE_FLAGS', \n MAPI_PROVIDER_DLL_NAME: 'MAPI_PROVIDER_DLL_NAME', \n MAPI_SEARCH_KEY: 'MAPI_SEARCH_KEY', \n MAPI_PROVIDER_UID: 'MAPI_PROVIDER_UID', \n MAPI_PROVIDER_ORDINAL: 'MAPI_PROVIDER_ORDINAL', \n MAPI_FORM_VERSION: 'MAPI_FORM_VERSION', \n MAPI_FORM_CLSID: 'MAPI_FORM_CLSID', \n MAPI_FORM_CONTACT_NAME: 'MAPI_FORM_CONTACT_NAME', \n MAPI_FORM_CATEGORY: 'MAPI_FORM_CATEGORY', \n MAPI_FORM_CATEGORY_SUB: 'MAPI_FORM_CATEGORY_SUB', \n MAPI_FORM_HOST_MAP: 'MAPI_FORM_HOST_MAP', \n MAPI_FORM_HIDDEN: 'MAPI_FORM_HIDDEN', \n MAPI_FORM_DESIGNER_NAME: 'MAPI_FORM_DESIGNER_NAME', \n MAPI_FORM_DESIGNER_GUID: 'MAPI_FORM_DESIGNER_GUID', \n MAPI_FORM_MESSAGE_BEHAVIOR: 'MAPI_FORM_MESSAGE_BEHAVIOR', \n MAPI_DEFAULT_STORE: 'MAPI_DEFAULT_STORE', \n MAPI_STORE_SUPPORT_MASK: 'MAPI_STORE_SUPPORT_MASK', \n MAPI_STORE_STATE: 'MAPI_STORE_STATE', \n MAPI_IPM_SUBTREE_SEARCH_KEY: 'MAPI_IPM_SUBTREE_SEARCH_KEY', \n MAPI_IPM_OUTBOX_SEARCH_KEY: 'MAPI_IPM_OUTBOX_SEARCH_KEY', \n MAPI_IPM_WASTEBASKET_SEARCH_KEY: 'MAPI_IPM_WASTEBASKET_SEARCH_KEY', \n MAPI_IPM_SENTMAIL_SEARCH_KEY: 'MAPI_IPM_SENTMAIL_SEARCH_KEY', \n MAPI_MDB_PROVIDER: 'MAPI_MDB_PROVIDER', \n MAPI_RECEIVE_FOLDER_SETTINGS: 'MAPI_RECEIVE_FOLDER_SETTINGS', \n MAPI_VALID_FOLDER_MASK: 'MAPI_VALID_FOLDER_MASK', \n MAPI_IPM_SUBTREE_ENTRYID: 'MAPI_IPM_SUBTREE_ENTRYID', \n MAPI_IPM_OUTBOX_ENTRYID: 'MAPI_IPM_OUTBOX_ENTRYID', \n MAPI_IPM_WASTEBASKET_ENTRYID: 'MAPI_IPM_WASTEBASKET_ENTRYID', \n MAPI_IPM_SENTMAIL_ENTRYID: 'MAPI_IPM_SENTMAIL_ENTRYID', \n MAPI_VIEWS_ENTRYID: 'MAPI_VIEWS_ENTRYID', \n MAPI_COMMON_VIEWS_ENTRYID: 'MAPI_COMMON_VIEWS_ENTRYID', \n MAPI_FINDER_ENTRYID: 'MAPI_FINDER_ENTRYID', \n MAPI_CONTAINER_FLAGS: 'MAPI_CONTAINER_FLAGS', \n MAPI_FOLDER_TYPE: 'MAPI_FOLDER_TYPE', \n MAPI_CONTENT_COUNT: 'MAPI_CONTENT_COUNT', \n MAPI_CONTENT_UNREAD: 'MAPI_CONTENT_UNREAD', \n MAPI_CREATE_TEMPLATES: 'MAPI_CREATE_TEMPLATES', \n MAPI_DETAILS_TABLE: 'MAPI_DETAILS_TABLE', \n MAPI_SEARCH: 'MAPI_SEARCH', \n MAPI_SELECTABLE: 'MAPI_SELECTABLE', \n MAPI_SUBFOLDERS: 'MAPI_SUBFOLDERS', \n MAPI_STATUS: 'MAPI_STATUS', \n MAPI_ANR: 'MAPI_ANR', \n MAPI_CONTENTS_SORT_ORDER: 'MAPI_CONTENTS_SORT_ORDER', \n MAPI_CONTAINER_HIERARCHY: 'MAPI_CONTAINER_HIERARCHY', \n MAPI_CONTAINER_CONTENTS: 'MAPI_CONTAINER_CONTENTS', \n MAPI_FOLDER_ASSOCIATED_CONTENTS: 'MAPI_FOLDER_ASSOCIATED_CONTENTS', \n MAPI_DEF_CREATE_DL: 'MAPI_DEF_CREATE_DL', \n MAPI_DEF_CREATE_MAILUSER: 'MAPI_DEF_CREATE_MAILUSER', \n MAPI_CONTAINER_CLASS: 'MAPI_CONTAINER_CLASS', \n MAPI_CONTAINER_MODIFY_VERSION: 'MAPI_CONTAINER_MODIFY_VERSION', \n MAPI_AB_PROVIDER_ID: 'MAPI_AB_PROVIDER_ID', \n MAPI_DEFAULT_VIEW_ENTRYID: 'MAPI_DEFAULT_VIEW_ENTRYID', \n MAPI_ASSOC_CONTENT_COUNT: 'MAPI_ASSOC_CONTENT_COUNT', \n MAPI_ATTACHMENT_X400_PARAMETERS: 'MAPI_ATTACHMENT_X400_PARAMETERS', \n MAPI_ATTACH_DATA_OBJ: 'MAPI_ATTACH_DATA_OBJ', \n MAPI_ATTACH_ENCODING: 'MAPI_ATTACH_ENCODING', \n MAPI_ATTACH_EXTENSION: 'MAPI_ATTACH_EXTENSION', \n MAPI_ATTACH_FILENAME: 'MAPI_ATTACH_FILENAME', \n MAPI_ATTACH_METHOD: 'MAPI_ATTACH_METHOD', \n MAPI_ATTACH_LONG_FILENAME: 'MAPI_ATTACH_LONG_FILENAME', \n MAPI_ATTACH_PATHNAME: 'MAPI_ATTACH_PATHNAME', \n MAPI_ATTACH_RENDERING: 'MAPI_ATTACH_RENDERING', \n MAPI_ATTACH_TAG: 'MAPI_ATTACH_TAG', \n MAPI_RENDERING_POSITION: 'MAPI_RENDERING_POSITION', \n MAPI_ATTACH_TRANSPORT_NAME: 'MAPI_ATTACH_TRANSPORT_NAME', \n MAPI_ATTACH_LONG_PATHNAME: 'MAPI_ATTACH_LONG_PATHNAME', \n MAPI_ATTACH_MIME_TAG: 'MAPI_ATTACH_MIME_TAG', \n MAPI_ATTACH_ADDITIONAL_INFO: 'MAPI_ATTACH_ADDITIONAL_INFO', \n MAPI_ATTACH_MIME_SEQUENCE: 'MAPI_ATTACH_MIME_SEQUENCE', \n MAPI_ATTACH_CONTENT_ID: 'MAPI_ATTACH_CONTENT_ID', \n MAPI_ATTACH_CONTENT_LOCATION: 'MAPI_ATTACH_CONTENT_LOCATION', \n MAPI_ATTACH_FLAGS: 'MAPI_ATTACH_FLAGS', \n MAPI_DISPLAY_TYPE: 'MAPI_DISPLAY_TYPE', \n MAPI_TEMPLATEID: 'MAPI_TEMPLATEID', \n MAPI_PRIMARY_CAPABILITY: 'MAPI_PRIMARY_CAPABILITY', \n MAPI_7BIT_DISPLAY_NAME: 'MAPI_7BIT_DISPLAY_NAME', \n MAPI_ACCOUNT: 'MAPI_ACCOUNT', \n MAPI_ALTERNATE_RECIPIENT: 'MAPI_ALTERNATE_RECIPIENT', \n MAPI_CALLBACK_TELEPHONE_NUMBER: 'MAPI_CALLBACK_TELEPHONE_NUMBER', \n MAPI_CONVERSION_PROHIBITED: 'MAPI_CONVERSION_PROHIBITED', \n MAPI_DISCLOSE_RECIPIENTS: 'MAPI_DISCLOSE_RECIPIENTS', \n MAPI_GENERATION: 'MAPI_GENERATION', \n MAPI_GIVEN_NAME: 'MAPI_GIVEN_NAME', \n MAPI_GOVERNMENT_ID_NUMBER: 'MAPI_GOVERNMENT_ID_NUMBER', \n MAPI_BUSINESS_TELEPHONE_NUMBER: 'MAPI_BUSINESS_TELEPHONE_NUMBER', \n MAPI_HOME_TELEPHONE_NUMBER: 'MAPI_HOME_TELEPHONE_NUMBER', \n MAPI_INITIALS: 'MAPI_INITIALS', \n MAPI_KEYWORD: 'MAPI_KEYWORD', \n MAPI_LANGUAGE: 'MAPI_LANGUAGE', \n MAPI_LOCATION: 'MAPI_LOCATION', \n MAPI_MAIL_PERMISSION: 'MAPI_MAIL_PERMISSION', \n MAPI_MHS_COMMON_NAME: 'MAPI_MHS_COMMON_NAME', \n MAPI_ORGANIZATIONAL_ID_NUMBER: 'MAPI_ORGANIZATIONAL_ID_NUMBER', \n MAPI_SURNAME: 'MAPI_SURNAME', \n MAPI_ORIGINAL_ENTRYID: 'MAPI_ORIGINAL_ENTRYID', \n MAPI_ORIGINAL_DISPLAY_NAME: 'MAPI_ORIGINAL_DISPLAY_NAME', \n MAPI_ORIGINAL_SEARCH_KEY: 'MAPI_ORIGINAL_SEARCH_KEY', \n MAPI_POSTAL_ADDRESS: 'MAPI_POSTAL_ADDRESS', \n MAPI_COMPANY_NAME: 'MAPI_COMPANY_NAME', \n MAPI_TITLE: 'MAPI_TITLE', \n MAPI_DEPARTMENT_NAME: 'MAPI_DEPARTMENT_NAME', \n MAPI_OFFICE_LOCATION: 'MAPI_OFFICE_LOCATION', \n MAPI_PRIMARY_TELEPHONE_NUMBER: 'MAPI_PRIMARY_TELEPHONE_NUMBER', \n MAPI_BUSINESS2_TELEPHONE_NUMBER: 'MAPI_BUSINESS2_TELEPHONE_NUMBER', \n MAPI_MOBILE_TELEPHONE_NUMBER: 'MAPI_MOBILE_TELEPHONE_NUMBER', \n MAPI_RADIO_TELEPHONE_NUMBER: 'MAPI_RADIO_TELEPHONE_NUMBER', \n MAPI_CAR_TELEPHONE_NUMBER: 'MAPI_CAR_TELEPHONE_NUMBER', \n MAPI_OTHER_TELEPHONE_NUMBER: 'MAPI_OTHER_TELEPHONE_NUMBER', \n MAPI_TRANSMITABLE_DISPLAY_NAME: 'MAPI_TRANSMITABLE_DISPLAY_NAME', \n MAPI_PAGER_TELEPHONE_NUMBER: 'MAPI_PAGER_TELEPHONE_NUMBER', \n MAPI_USER_CERTIFICATE: 'MAPI_USER_CERTIFICATE', \n MAPI_PRIMARY_FAX_NUMBER: 'MAPI_PRIMARY_FAX_NUMBER', \n MAPI_BUSINESS_FAX_NUMBER: 'MAPI_BUSINESS_FAX_NUMBER', \n MAPI_HOME_FAX_NUMBER: 'MAPI_HOME_FAX_NUMBER', \n MAPI_COUNTRY: 'MAPI_COUNTRY', \n MAPI_LOCALITY: 'MAPI_LOCALITY', \n MAPI_STATE_OR_PROVINCE: 'MAPI_STATE_OR_PROVINCE', \n MAPI_STREET_ADDRESS: 'MAPI_STREET_ADDRESS', \n MAPI_POSTAL_CODE: 'MAPI_POSTAL_CODE', \n MAPI_POST_OFFICE_BOX: 'MAPI_POST_OFFICE_BOX', \n MAPI_TELEX_NUMBER: 'MAPI_TELEX_NUMBER', \n MAPI_ISDN_NUMBER: 'MAPI_ISDN_NUMBER', \n MAPI_ASSISTANT_TELEPHONE_NUMBER: 'MAPI_ASSISTANT_TELEPHONE_NUMBER', \n MAPI_HOME2_TELEPHONE_NUMBER: 'MAPI_HOME2_TELEPHONE_NUMBER', \n MAPI_ASSISTANT: 'MAPI_ASSISTANT', \n MAPI_SEND_RICH_INFO: 'MAPI_SEND_RICH_INFO', \n MAPI_WEDDING_ANNIVERSARY: 'MAPI_WEDDING_ANNIVERSARY', \n MAPI_BIRTHDAY: 'MAPI_BIRTHDAY', \n MAPI_HOBBIES: 'MAPI_HOBBIES', \n MAPI_MIDDLE_NAME: 'MAPI_MIDDLE_NAME', \n MAPI_DISPLAY_NAME_PREFIX: 'MAPI_DISPLAY_NAME_PREFIX', \n MAPI_PROFESSION: 'MAPI_PROFESSION', \n MAPI_PREFERRED_BY_NAME: 'MAPI_PREFERRED_BY_NAME', \n MAPI_SPOUSE_NAME: 'MAPI_SPOUSE_NAME', \n MAPI_COMPUTER_NETWORK_NAME: 'MAPI_COMPUTER_NETWORK_NAME', \n MAPI_CUSTOMER_ID: 'MAPI_CUSTOMER_ID', \n MAPI_TTYTDD_PHONE_NUMBER: 'MAPI_TTYTDD_PHONE_NUMBER', \n MAPI_FTP_SITE: 'MAPI_FTP_SITE', \n MAPI_GENDER: 'MAPI_GENDER', \n MAPI_MANAGER_NAME: 'MAPI_MANAGER_NAME', \n MAPI_NICKNAME: 'MAPI_NICKNAME', \n MAPI_PERSONAL_HOME_PAGE: 'MAPI_PERSONAL_HOME_PAGE', \n MAPI_BUSINESS_HOME_PAGE: 'MAPI_BUSINESS_HOME_PAGE', \n MAPI_CONTACT_VERSION: 'MAPI_CONTACT_VERSION', \n MAPI_CONTACT_ENTRYIDS: 'MAPI_CONTACT_ENTRYIDS', \n MAPI_CONTACT_ADDRTYPES: 'MAPI_CONTACT_ADDRTYPES', \n MAPI_CONTACT_DEFAULT_ADDRESS_INDEX: 'MAPI_CONTACT_DEFAULT_ADDRESS_INDEX', \n MAPI_CONTACT_EMAIL_ADDRESSES: 'MAPI_CONTACT_EMAIL_ADDRESSES', \n MAPI_COMPANY_MAIN_PHONE_NUMBER: 'MAPI_COMPANY_MAIN_PHONE_NUMBER', \n MAPI_CHILDRENS_NAMES: 'MAPI_CHILDRENS_NAMES', \n MAPI_HOME_ADDRESS_CITY: 'MAPI_HOME_ADDRESS_CITY', \n MAPI_HOME_ADDRESS_COUNTRY: 'MAPI_HOME_ADDRESS_COUNTRY', \n MAPI_HOME_ADDRESS_POSTAL_CODE: 'MAPI_HOME_ADDRESS_POSTAL_CODE', \n MAPI_HOME_ADDRESS_STATE_OR_PROVINCE: 'MAPI_HOME_ADDRESS_STATE_OR_PROVINCE', \n MAPI_HOME_ADDRESS_STREET: 'MAPI_HOME_ADDRESS_STREET', \n MAPI_HOME_ADDRESS_POST_OFFICE_BOX: 'MAPI_HOME_ADDRESS_POST_OFFICE_BOX', \n MAPI_OTHER_ADDRESS_CITY: 'MAPI_OTHER_ADDRESS_CITY', \n MAPI_OTHER_ADDRESS_COUNTRY: 'MAPI_OTHER_ADDRESS_COUNTRY', \n MAPI_OTHER_ADDRESS_POSTAL_CODE: 'MAPI_OTHER_ADDRESS_POSTAL_CODE', \n MAPI_OTHER_ADDRESS_STATE_OR_PROVINCE: 'MAPI_OTHER_ADDRESS_STATE_OR_PROVINCE', \n MAPI_OTHER_ADDRESS_STREET: 'MAPI_OTHER_ADDRESS_STREET', \n MAPI_OTHER_ADDRESS_POST_OFFICE_BOX: 'MAPI_OTHER_ADDRESS_POST_OFFICE_BOX', \n MAPI_STORE_PROVIDERS: 'MAPI_STORE_PROVIDERS', \n MAPI_AB_PROVIDERS: 'MAPI_AB_PROVIDERS', \n MAPI_TRANSPORT_PROVIDERS: 'MAPI_TRANSPORT_PROVIDERS', \n MAPI_DEFAULT_PROFILE: 'MAPI_DEFAULT_PROFILE', \n MAPI_AB_SEARCH_PATH: 'MAPI_AB_SEARCH_PATH', \n MAPI_AB_DEFAULT_DIR: 'MAPI_AB_DEFAULT_DIR', \n MAPI_AB_DEFAULT_PAB: 'MAPI_AB_DEFAULT_PAB', \n MAPI_FILTERING_HOOKS: 'MAPI_FILTERING_HOOKS', \n MAPI_SERVICE_NAME: 'MAPI_SERVICE_NAME', \n MAPI_SERVICE_DLL_NAME: 'MAPI_SERVICE_DLL_NAME', \n MAPI_SERVICE_ENTRY_NAME: 'MAPI_SERVICE_ENTRY_NAME', \n MAPI_SERVICE_UID: 'MAPI_SERVICE_UID', \n MAPI_SERVICE_EXTRA_UIDS: 'MAPI_SERVICE_EXTRA_UIDS', \n MAPI_SERVICES: 'MAPI_SERVICES', \n MAPI_SERVICE_SUPPORT_FILES: 'MAPI_SERVICE_SUPPORT_FILES', \n MAPI_SERVICE_DELETE_FILES: 'MAPI_SERVICE_DELETE_FILES', \n MAPI_AB_SEARCH_PATH_UPDATE: 'MAPI_AB_SEARCH_PATH_UPDATE', \n MAPI_PROFILE_NAME: 'MAPI_PROFILE_NAME', \n MAPI_IDENTITY_DISPLAY: 'MAPI_IDENTITY_DISPLAY', \n MAPI_IDENTITY_ENTRYID: 'MAPI_IDENTITY_ENTRYID', \n MAPI_RESOURCE_METHODS: 'MAPI_RESOURCE_METHODS', \n MAPI_RESOURCE_TYPE: 'MAPI_RESOURCE_TYPE', \n MAPI_STATUS_CODE: 'MAPI_STATUS_CODE', \n MAPI_IDENTITY_SEARCH_KEY: 'MAPI_IDENTITY_SEARCH_KEY', \n MAPI_OWN_STORE_ENTRYID: 'MAPI_OWN_STORE_ENTRYID', \n MAPI_RESOURCE_PATH: 'MAPI_RESOURCE_PATH', \n MAPI_STATUS_STRING: 'MAPI_STATUS_STRING', \n MAPI_X400_DEFERRED_DELIVERY_CANCEL: 'MAPI_X400_DEFERRED_DELIVERY_CANCEL', \n MAPI_HEADER_FOLDER_ENTRYID: 'MAPI_HEADER_FOLDER_ENTRYID', \n MAPI_REMOTE_PROGRESS: 'MAPI_REMOTE_PROGRESS', \n MAPI_REMOTE_PROGRESS_TEXT: 'MAPI_REMOTE_PROGRESS_TEXT', \n MAPI_REMOTE_VALIDATE_OK: 'MAPI_REMOTE_VALIDATE_OK', \n MAPI_CONTROL_FLAGS: 'MAPI_CONTROL_FLAGS', \n MAPI_CONTROL_STRUCTURE: 'MAPI_CONTROL_STRUCTURE', \n MAPI_CONTROL_TYPE: 'MAPI_CONTROL_TYPE', \n MAPI_DELTAX: 'MAPI_DELTAX', \n MAPI_DELTAY: 'MAPI_DELTAY', \n MAPI_XPOS: 'MAPI_XPOS', \n MAPI_YPOS: 'MAPI_YPOS', \n MAPI_CONTROL_ID: 'MAPI_CONTROL_ID', \n MAPI_INITIAL_DETAILS_PANE: 'MAPI_INITIAL_DETAILS_PANE', \n UNCOMPRESSED_BODY: 'UNCOMPRESSED_BODY', \n MAPI_PRIMARY_SEND_ACCOUNT: 'MAPI_PRIMARY_SEND_ACCOUNT', \n MAPI_NEXT_SEND_ACCT: 'MAPI_NEXT_SEND_ACCT', \n MAPI_INTERNET_REFERENCES: 'MAPI_INTERNET_REFERENCES', \n MAPI_IN_REPLY_TO_ID: 'MAPI_IN_REPLY_TO_ID', \n MAPI_INTERNET_RETURN_PATH: 'MAPI_INTERNET_RETURN_PATH', \n MAPI_ICON_INDEX: 'MAPI_ICON_INDEX', \n MAPI_TARGET_ENTRY_ID: 'MAPI_TARGET_ENTRY_ID', \n MAPI_CONVERSATION_ID: 'MAPI_CONVERSATION_ID', \n MAPI_STORE_UNICODE_MASK: 'MAPI_STORE_UNICODE_MASK', \n MAPI_INTERNET_CODEPAGE: 'MAPI_INTERNET_CODEPAGE', \n MAPI_MESSAGE_LOCALE_ID: 'MAPI_MESSAGE_LOCALE_ID', \n MAPI_CREATOR_NAME: 'MAPI_CREATOR_NAME', \n MAPI_CREATOR_ENTRY_ID: 'MAPI_CREATOR_ENTRY_ID', \n MAPI_LAST_MODIFIER_ENTRY_ID: 'MAPI_LAST_MODIFIER_ENTRY_ID', \n MAPI_MESSAGE_CODEPAGE: 'MAPI_MESSAGE_CODEPAGE', \n MAPI_INTERNET_MAIL_OVERRIDE_FORMAT: 'MAPI_INTERNET_MAIL_OVERRIDE_FORMAT', \n MAPI_MESSAGE_EDITOR_FORMAT: 'MAPI_MESSAGE_EDITOR_FORMAT', \n MAPI_SENDER_SMTP_ADDRESS: 'MAPI_SENDER_SMTP_ADDRESS', \n MAPI_SENT_REPRESENTING_SMTP_ADDRESS: 'MAPI_SENT_REPRESENTING_SMTP_ADDRESS', \n MAPI_READ_RECEIPT_SMTP_ADDRESS: 'MAPI_READ_RECEIPT_SMTP_ADDRESS', \n MAPI_RECEIVED_BY_SMTP_ADDRESS: 'MAPI_RECEIVED_BY_SMTP_ADDRESS', \n MAPI_RECEIVED_REPRESENTING_SMTP_ADDRESS: 'MAPI_RECEIVED_REPRESENTING_SMTP_ADDRESS', \n MAPI_SIP_ADDRESS: 'MAPI_SIP_ADDRESS', \n MAPI_ATTACHMENT_LINK_ID: 'MAPI_ATTACHMENT_LINK_ID', \n MAPI_EXCEPTION_START_TIME: 'MAPI_EXCEPTION_START_TIME', \n MAPI_EXCEPTION_END_TIME: 'MAPI_EXCEPTION_END_TIME', \n MAPI_ATTACHMENT_FLAGS: 'MAPI_ATTACHMENT_FLAGS', \n MAPI_ATTACHMENT_HIDDEN: 'MAPI_ATTACHMENT_HIDDEN', \n MAPI_ATTACHMENT_CONTACT_PHOTO: 'MAPI_ATTACHMENT_CONTACT_PHOTO', \n MAPI_ID_SECURE_MIN: 'MAPI_ID_SECURE_MIN', \n MAPI_ID_SECURE_MAX: 'MAPI_ID_SECURE_MAX'}\n OutlookGuid = '05133f00aa00da98101b450b6ed8da90'\n AppointmentGuid = '46000000000000c00000000000062002'\n\n def __init__(self, attr_type, name, data, guid_id, guid_name=None, guid_prop=None):\n self.attr_type = attr_type\n self.name = name\n self.raw_data = data\n self.guid = guid_id\n self.guid_name = guid_name\n self.guid_prop = guid_prop\n if self.guid_name:\n self.guid_name = self.guid_name.rstrip('\\x00')\n\n @property\n def data(self):\n if self.attr_type == SZMAPI_NULL:\n return\n else:\n if self.attr_type == SZMAPI_SHORT:\n return int16(self.raw_data)\n else:\n if self.attr_type == SZMAPI_INT:\n return int32(self.raw_data)\n if self.attr_type == SZMAPI_FLOAT:\n return float32(self.raw_data)\n if self.attr_type == SZMAPI_DOUBLE:\n return dbl64(self.raw_data)\n if self.attr_type == SZMAPI_CURRENCY:\n return Decimal(int64(self.raw_data)) / Decimal(10000)\n if self.attr_type == SZMAPI_APPTIME:\n return apptime(self.raw_data)\n if self.attr_type == SZMAPI_ERROR:\n return uint32(self.raw_data)\n if self.attr_type == SZMAPI_BOOLEAN:\n return bool(uint16(self.raw_data))\n if self.attr_type == SZMAPI_INT8BYTE:\n return int8(self.raw_data)\n if self.attr_type == SZMAPI_SYSTIME:\n return systime(self.raw_data)\n if self.attr_type == SZMAPI_CLSID:\n return guid(self.raw_data)\n if self.attr_type == SZMAPI_BINARY:\n return ('').join([ s.rstrip('\\x00') for s in self.raw_data ])\n if self.attr_type in (SZMAPI_STRING, SZMAPI_UNICODE_STRING):\n return ('').join([ s.rstrip('\\x00') for s in self.raw_data ])\n return self.raw_data\n\n return\n\n @property\n def name_str(self):\n return self.guid_name or TNEFMAPI_Attribute.codes.get(self.name or self.guid_prop, hex(self.guid_prop or self.name))\n\n def __str__(self):\n return '' % self.name_str","sub_path":"pycfiles/tnefparse-1.3.0-py2.7/mapi.py","file_name":"mapi.py","file_ext":"py","file_size_in_byte":50388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"138253409","text":"import math\n\n# Will perform a binary search for the value on the provided dataset\ndef binarySearch(pDataset, pValue):\n _output = \"\"\n _processedDataset = pDataset\n _iteration = 0\n _found = False\n\n if len(_processedDataset) > 0:\n\n while not _found and len(_processedDataset) > 1:\n _iteration += 1\n theMiddlePoint = math.floor(len(_processedDataset) / 2) # 0 Based array\n theValue = _processedDataset[theMiddlePoint]\n\n if theValue == pValue:\n _output = \"found \" + str(pValue) + \" on the iteration #\" + str(_iteration)\n _found = True\n elif theValue < pValue:\n _processedDataset = _processedDataset[slice(theMiddlePoint, len(_processedDataset))] \n else:\n _processedDataset = _processedDataset[slice(0, theMiddlePoint - 1)] # Middle point -1 as the array is 0 based, and middle point was tested negative\n\n if len(_processedDataset) <= 1:\n _output = \"Couldn't find the result\"\n\n \n else:\n _output = \"dataset is empty\"\n\n return _output","sub_path":"src/binary-search/binarySearch.py","file_name":"binarySearch.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"77672744","text":"import flask\nfrom flask import request, jsonify\nfrom default_ner_example import DefaultNerEx\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\n\ndataFile = 'data/data1.txt'\n\n# A route to return all of the available entries from NER search.\n@app.route('/api/v1/resources/entities/all', methods=['GET'])\ndef get_all_entities():\n lst=DefaultNerEx().ner_search(dataFile)\n keys=['entity_name', 'label']\n entity_list = []\n for ent in lst:\n ent1=[]\n ent1.append(str(ent[0]))\n ent1.append(ent[1])\n entity_list.append(dict(zip(keys,ent1)))\n\n return jsonify(entity_list)\n\n# A route to return all of the available entries from NER search.\n@app.route('/api/v1/resources/entities', methods=['GET'])\ndef get_entities_by_label():\n # Check if an label was provided as part of the URL.\n # If label is provided, assign it to a variable.\n # If no label is provided, display an error in the browser.\n if 'label' in request.args:\n label = str(request.args['label'])\n else:\n return \"Error: No label field provided. Please specify an label.\"\n lst=DefaultNerEx().ner_search(dataFile)\n keys=['entity_name', 'label']\n entity_list = []\n for ent in lst:\n if ent[1] == label.upper():\n ent1=[]\n ent1.append(str(ent[0]))\n ent1.append(ent[1])\n entity_list.append(dict(zip(keys,ent1)))\n return jsonify(entity_list)\n\napp.run()\n","sub_path":"spacynlp/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"208795878","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\n\nclass BankRef(models.Model):\n _name = 'bank.ref'\n _rec_name = 'bank_ref'\n\n # Referencia bancaria\n bank_ref = fields.Char(string=\"Referencia Bancaria\", size=25)\n\n _sql_constraints = [('bank_ref_constrain', \n 'unique(bank_ref)',\n 'Esta referencia ya existe, por favor ingresa una nueva referencia.')]","sub_path":"extrasGDL/cajas_municipal/models/bank_ref.py","file_name":"bank_ref.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"357762910","text":"class Node:\n def __init__(self,k,v):\n self.key = k\n self.val = v\n self.prev = None\n self.next = None\nclass LRUCache:\n def __init__(self,capacity):\n self.capacity = capacity\n self.dic = {}\n self.head = Node(0,0)\n self.tail = Node(0,0)\n self.head.next = self.tail\n self.tail.prev = self.head\n def _add(self,node):\n p = self.tail.prev\n p.next = node\n self.tail.prev = node\n node.prev = p\n node.next = self.tail\n def _remove(self,node):\n p = node.prev\n q = node.next\n p.next = q\n q.prev = p\n def put(self,k,v):\n if k in self.dic:\n self._remove(self.dic[k])\n n = Node(k,v)\n self._add(n)\n self.dic[k] = n\n if len(self.dic) > self.capacity:\n n = self.head.next\n self._remove(n)\n del self.dic[n.key]\n def get(self,k):\n if k in self.dic:\n n = self.dic[k]\n self._remove(n)\n self._add(n)\n return n.val\n return -1\ntest = LRUCache(2)\ntest.put(1,1)\ntest.put(2,2)\nprint(test.get(1))\ntest.put(3,3)\nprint(test.get(2))\ntest.put(4,4)\nprint(test.get(1))\nprint(test.get(3))\nprint(test.get(4))\n","sub_path":"LRUCache.py","file_name":"LRUCache.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"267766358","text":"from datetime import datetime\n\ndef check_birthdate(year, month, day):\n\tbirthdate = datetime(year, month, day)\n\ttoday = datetime.now()\n\tif birthdate > today:\n\t\treturn False\n\telse:\n\t\treturn True\n\n\ndef calculate_age(year, month, day):\n\tbirthdate = datetime(year, month, day)\n\ttoday = datetime.now()\n\tage = (today - birthdate).days\n\n\n\tprint(f\"You are {age/365} years old\")\n\ndef main():\n\tday = int(input(\"What day were you born? \"))\n\tmonth = int(input(\"On which month? \"))\n\tyear = int(input(\"In which year? \"))\n\n\tif check_birthdate(year, month, day):\n\t\tcalculate_age(year, month, day)\n\telse:\n\t\tprint(\"The birthdate you entered is invalid\")\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"age_calculator.py","file_name":"age_calculator.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"183736743","text":"import os, glob, datetime\nfrom shutil import copyfile\nimport undistort\nimport cv2\nfrom undistort import FisheyeUndistorter, PerspectiveUndistorter\nfrom visionpilot import VisionPilot, get_high_contrast_image\nimport augmentation\nfrom clize import run\n\nclass VisionTrainer(object):\n\n\tdef __init__(self, dpath, outpath=\"\", shrink = None, hueshifts = 0, savepreview = False):\n\t\tself.dpath = dpath\n\t\tself.outpath = outpath\n\t\tself.fisheye = None\n\t\tself.warper = None\n\t\tself.pilot = None\n\t\tself.seqnum = 1\n\t\tself.hue_shifts = hueshifts\n\t\tself.save_preview = savepreview\n\t\tif shrink is not None:\n\t\t\tif shrink[0] <= 0 or shrink[1] <= 0:\n\t\t\t\tshrink = None\n\t\tself.shrink = shrink\n\n\tdef process_dir(self):\n\t\tallpaths = self.dpath.split(';')\n\t\tdc = len(allpaths)\n\t\tdi = 0\n\t\tfor singlepath in allpaths:\n\t\t\tdi += 1\n\t\t\tif self.outpath is None or len(self.outpath) <= 0:\n\t\t\t\tself.outpath_now = singlepath.strip(\"/\\\\\") + \"_out\"\n\t\t\telse:\n\t\t\t\tself.outpath_now = self.outpath\n\t\t\ttry:\n\t\t\t\tos.makedirs(self.outpath_now)\n\t\t\texcept FileExistsError:\n\t\t\t\tpass\n\t\t\tg = glob.glob(os.path.join(singlepath, \"*.jpg\"))\n\t\t\tfc = len(g)\n\t\t\tfi = 0\n\t\t\tfor imgfile in g:\n\t\t\t\tfi += 1\n\t\t\t\tself.process_file(imgfile)\n\t\t\t\tif dc > 1:\n\t\t\t\t\tprint(\"Progress: %u / %u ( %u / %u ) \" % (fi, fc, di, dc), end=\"\\r\")\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Progress: %u / %u \" % (fi, fc), end=\"\\r\")\n\t\tprint(\"\\r\\nDone!\")\n\n\tdef process_file(self, imgfile):\n\t\timg = cv2.imread(imgfile, -1)\n\t\torig_img = img.copy()\n\t\tif self.fisheye is None:\n\t\t\tfK, fD = undistort.get_fisheye(img.shape[1], img.shape[0])\n\t\t\tself.fisheye = FisheyeUndistorter((img.shape[1], img.shape[0]), fK, fD, bal = 0.0)\n\t\timg2 = self.fisheye.undistort_image(img)\n\t\timg3 = get_high_contrast_image(img2)\n\t\tif self.warper is None:\n\t\t\tself.warper = PerspectiveUndistorter(img.shape[1], img.shape[0])\n\t\timg4 = self.warper.undistort_image(img3)\n\t\tif self.pilot is None:\n\t\t\tself.pilot = VisionPilot(edge_mask = self.warper.get_warp_edge_mask())\n\t\tsteering, throttle = self.pilot.process(img4)\n\t\tsteering = int(round(steering + 127))\n\t\tthrottle = int(round(throttle + 127))\n\t\t#now = datetime.datetime.now()\n\t\t#fname = \"%04u%02u%02u%02u%02u%02u_%08u\" % (now.year, now.month, now.day, now.hour, now.minute, now.second, self.seqnum)\n\t\t#self.seqnum += 1\n\t\tfilename_w_ext = os.path.basename(imgfile)\n\t\tfilename, file_extension = os.path.splitext(filename_w_ext)\n\t\tfname = filename[0:(4 + 2 + 2 + 2 + 2 + 2 + 1 + 8)]\n\t\tfname += \"_%03u%03u\" % (throttle, steering)\n\t\tfpath = os.path.join(self.outpath_now, fname)\n\t\tif self.save_preview:\n\t\t\tpreviewdir = os.path.join(os.path.dirname(imgfile), \"preview\")\n\t\t\ttry:\n\t\t\t\tos.makedirs(previewdir)\n\t\t\texcept FileExistsError:\n\t\t\t\tpass\n\t\t\tpreviewpath = os.path.join(previewdir, fname) + \".jpg\"\n\t\t\tif self.pilot.save_visualization(previewpath):\n\t\t\t\t#print(\"Preview: \" + previewpath)\n\t\t\t\tpass\n\t\tif self.hue_shifts <= 0:\n\t\t\tif self.shrink is None:\n\t\t\t\tcopyfile(imgfile, fpath + \".jpg\")\n\t\t\telse:\n\t\t\t\timg = cv2.resize(orig_img, self.shrink)\n\t\t\t\tcv2.imwrite(fpath + \".jpg\", img)\n\t\telse:\n\t\t\thue_divider = self.hue_shifts + 1\n\t\t\thue_spacing = 180.0 / float(hue_divider)\n\t\t\tif self.shrink is None:\n\t\t\t\tshrunk_img = orig_img\n\t\t\telse:\n\t\t\t\tshrunk_img = cv2.resize(orig_img, self.shrink)\n\t\t\th = 0\n\t\t\twhile h < hue_divider:\n\t\t\t\thue_shift = int(round(hue_spacing * float(h)))\n\t\t\t\tshifted_img = augmentation.img_hue_shift(shrunk_img, hue_shift)\n\t\t\t\tcv2.imwrite(fpath + (\"_h%u\" % hue_shift) + \".jpg\", shifted_img)\n\t\t\t\th += 1\n\t\t#print(\"Saved: \" + fpath)\n\ndef train(dpath, hueshifts=8, outwidth=160, outheight=120, savepreview=False):\n\tx = VisionTrainer(dpath, hueshifts = hueshifts, shrink=(outwidth, outheight), savepreview=savepreview)\n\tx.process_dir()\n\nif __name__ == \"__main__\":\n\trun(train)","sub_path":"sloth/visiontrainer.py","file_name":"visiontrainer.py","file_ext":"py","file_size_in_byte":3729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"97775018","text":"import re\nimport datetime\n\ndef parse_time(s):\n \"\"\"\n Like datetime.datetime.strptime(s, \"%w %Y/%m/%d %H:%M:%S\") but 5x faster.\n \"\"\"\n _, date_part, time_part = s.split(' ')\n year, mon, day = date_part.split('/')\n hour, minute, sec = time_part.split(':')\n return datetime.datetime(*map(int, (year, mon, day, hour, minute, sec)))\n\nclass IscDhcpLeases(object):\n def __init__(self, filename):\n self.filename = filename\n self.last_leases = {}\n\n self.regex_leaseblock = re.compile(r\"lease (?P\\d+\\.\\d+\\.\\d+\\.\\d+) {(?P[\\s\\S]+?)\\n}\")\n self.regex_properties = re.compile(r\"\\s+(?P\\S+) (?P[\\s\\S]+?);\")\n\n def get(self):\n leases = []\n for match in self.regex_leaseblock.finditer(open(self.filename).read()):\n block = match.groupdict()\n\n properties = self.regex_properties.findall(block['config'])\n properties = {key: value for (key, value) in properties}\n if 'hardware' not in properties:\n # E.g. rows like {'binding': 'state abandoned', ...}\n continue\n lease = Lease(block['ip'], properties)\n leases.append(lease)\n return leases\n\n def get_current(self):\n all_leases = self.get()\n leases = {}\n for lease in all_leases:\n leases[lease.ethernet] = lease\n return leases\n\n\nclass Lease(object):\n def __init__(self, ip, data):\n self.data = data\n self.ip = ip\n self.start = parse_time(data['starts'])\n if data['ends'] == 'never':\n self.end = None\n else:\n self.end = parse_time(data['ends'])\n\n self._hardware = data['hardware'].split(' ')\n self.ethernet = self._hardware[1]\n self.hardware = self._hardware[0]\n self.hostname = data.get('client-hostname', '').replace(\"\\\"\", \"\")\n\n @property\n def valid(self):\n if self.end is None:\n return self.start <= datetime.datetime.now()\n else:\n return self.start <= datetime.datetime.now() <= self.end\n\n def __repr__(self):\n return \"\".format(self.ip, self.ethernet, self.hostname)\n\n\nif __name__ == \"__main__\":\n leases = IscDhcpLeases('dhcpd.leases')\n print(leases.get_current())\n","sub_path":"isc_dhcp_leases/iscdhcpleases.py","file_name":"iscdhcpleases.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"511736163","text":"import random\n\nclass Item:\n def __init__(self, manu, name, categ = \"\", price = \"\"):\n self.manufacturer = manu\n self.name = name\n self.category = categ\n self.price = price\n self.UPC = random.randint(1000000000,9999999999)\n def __str__(self):\n return \"Item Info:\\nManufacturer: \" + self.manufacturer + \"\\nName: \" + self.name + \"\\nCategory: \" + self.category + \"\\nPrice: \" + self.price + \"\\nUPC: \" + str(self.UPC)\n\ndef main():\n name = input(\"Enter the name of the item: \")\n manufacturer = input(\"Enter the manufacturer: \")\n \n ask = input(\"Will you enter the category and price information? (y or n): \")\n\n if ask == \"y\":\n category = input(\"Enter the category of the item(s): \")\n price = input(\"Enter the price of item(s): $\")\n Item1 = Item(name, manufacturer, category, price)\n elif ask == \"n\":\n Item1 = Item(name, manufacturer)\n \n\n print(Item1.__str__())\n \nmain()\n","sub_path":"cabulio_jake-master/Pylesson_11/Lesson_11.2/InventoryItems.py","file_name":"InventoryItems.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"344968880","text":"# Filename: Program4-09.py\r\n# Author: N. Anim\r\n# Date: Feb. 22, 2016\r\n# Purpose: To demonstrate the use and functioning of\r\n# the logical operator AND in an if statement\r\n# (or decision).\r\n# The algorithm is on page 159.\r\n\r\n# Get the input and do numeric conversions\r\nsalary = int(input(\"Enter your annual salary: \"))\r\nyearsOnJob = int(input(\"Enter the number of years on the current job: \"))\r\n\r\n# Determine whether the user qualifies for the loan.\r\nif (salary >= 30000) and (yearsOnJob >= 2):\r\n print(\"You qualify for the loan.\")\r\nelse:\r\n print(\"You do not qualify for the loan.\")\r\n","sub_path":"Programs/Program4-09.py","file_name":"Program4-09.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"164834124","text":"import tensorflow as tensorflow\nfrom tensorflow.python.platform import gfile as directory_handler\nimport numpy as math_library\nfrom PIL import Image as image_library\n\nNUMBER_OF_THREADS = 4\nIMAGE_HEIGHT = 228\nIMAGE_WIDTH = 304\nTARGET_HEIGHT = 55\nTARGET_WIDTH = 74\n \ndef load_test_image(file_path):\n \n # original Images\n image_original_file = tensorflow.read_file(file_path)\n image_original = tensorflow.image.decode_jpeg(image_original_file, channels=3) # in rgb\n image_original = tensorflow.cast(image_original, tensorflow.float32) \n \n # resize\n image_original = tensorflow.image.resize_images(image_original, (IMAGE_HEIGHT, IMAGE_WIDTH))\n \n original_images = tensorflow.train.batch(\n [image_original],\n batch_size=1,\n num_threads=NUMBER_OF_THREADS,\n capacity=50 + 3 * 1,\n )\n \n return original_images\n \n \ndef output_test_predictions_into_images(predict_test_image, test_image, output_dir):\n print(\"output test into %s\" % output_dir)\n create_output_directory(output_dir)\n \n for i, (prediction, original) in enumerate(zip(predict_test_image, test_image,)):\n \n # original \n original_pil = image_library.fromarray(math_library.uint8(original))\n original_name = \"%s/testPicture.png\" % (output_dir)\n original_pil.save(original_name)\n \n # prediction \n prediction_transposed = prediction.transpose(2, 0, 1)\n prediction_transposed = (prediction_transposed / math_library.max(prediction_transposed)) * 255.0\n prediction_pil = image_library.fromarray(math_library.uint8(prediction_transposed[0]), mode=\"L\")\n prediction_name = \"%s/prediction.png\" % (output_dir)\n prediction_pil.save(prediction_name)\n\n\ndef create_output_directory(output_dir):\n if not directory_handler.Exists(output_dir):\n directory_handler.MakeDirs(output_dir)\n","sub_path":"testdata.py","file_name":"testdata.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"457314576","text":"# TOOL hfst-lookup-swedish.py: \"HFST Lookup Swedish\" (Looks up Swedish morphological analyses of tokens, each in the first field of a line. Output format depends on the underlying combination of lookup tool, its options, and lexical transducer.)\n# INPUT tokens.tsv TYPE GENERIC\n# OUTPUT readings.txt\n# OUTPUT OPTIONAL error.log\n# PARAMETER Encoding TYPE [utf8: \"UTF-8\"] DEFAULT utf8 (Character encoding, UTF-8)\n# PARAMETER Version TYPE [v383: \"3.8.3\", v390: \"3.9.0\"] DEFAULT v383 (HFST Version)\n# RUNTIME python3\n\nimport os\nimport sys\n\nsys.path.append(os.path.join(chipster_module_path, \"python\"))\nfrom lib_pipeline import hfst_lookup\nfrom lib_errorlog import consolidate\n\ndef lookup_3_8_3():\n home = \"/homeappl/appl_taito/ling/hfst/3.8.3\"\n processor = os.path.join(home, \"bin\", \"hfst-optimized-lookup\")\n transducer = os.path.join(home, \"share/hfst/sv\", \"sv-analysis.hfst.ol\")\n\n hfst_lookup(processor, transducer)\n\ndef lookup_3_9_0():\n home = \"/homeappl/appl_taito/ling/hfst/3.9.0\"\n processor = os.path.join(home, \"bin\", \"hfst-optimized-lookup\")\n transducer = os.path.join(home, \"share/hfst/sv\", \"sv-analysis.hfst.ol\")\n\n hfst_lookup(processor, transducer)\n\ndict(v383 = lookup_3_8_3, v390 = lookup_3_9_0)[Version]()\n\nconsolidate()\n","sub_path":"tools/kielipankki/python/hfst-lookup-swedish.py","file_name":"hfst-lookup-swedish.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"522309295","text":"class Solution(object):\n def nthUglyNumber(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n a=[]\n for i in range(2,2000):\n if i % 2 != 0 and i % 3 != 0 and i % 5 != 0:\n a.append(i)\n if len(a) + n == i:\n return i\n\n\nif __name__ == '__main__':\n n=10\n print(Solution().nthUglyNumber(n))\n","sub_path":"UglyNumberII.py","file_name":"UglyNumberII.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"143597271","text":"from flask import Flask, redirect, url_for, session, request, render_template, render_template_string, abort\nimport os\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_user import login_required, UserManager, UserMixin, current_user, SQLAlchemyAdapter\nfrom forms import AddForm\nfrom models import User, Video, LearningTrack, LearningTrackCourses, LearningTrackVideos\nimport database\nfrom collections import defaultdict\n\n\n\napp = Flask(__name__)\napp.secret_key = os.urandom(24)\napp.config.from_pyfile('configs/config.py')\ndb = SQLAlchemy(app)\n\n\n# db_adapter = SQLAlchemyAdapter(db, User)\n# user_manager = UserManager(db_adapter, app)\nuser_manager = UserManager(app, db, User)\n\n\n@app.route(\"/\", methods = ['GET'])\ndef index():\n logged_in = False\n if current_user.is_authenticated:\n print ('autho!!')\n\n print (current_user.username)\n\n else:\n print ('notho')\n return render_template(\"homepage.html\", logged_in = logged_in)\n\n@app.route(\"/dashboard\", methods = ['GET'])\ndef dashboard():\n \"\"\"\n - Redirects to login page if not logged in.\n - Displays dashboard page if logged in.\n \"\"\"\n if request.method == 'GET':\n if not current_user.is_authenticated:\n return redirect(url_for(\"user.login\"))\n else:\n #TODO: Add in admin\n # is_admin = (requests.get(url='http://127.0.0.1:8080/is-admin/{}'.format(session['user'])).content).decode(\"utf-8\") # Done\n # if is_admin == \"True\":\n # user_count = (requests.get(url='http://127.0.0.1:8080/user-count').content).decode(\"utf-8\") # Done\n # video_count = (requests.get(url='http://127.0.0.1:8080/video-count').content).decode(\"utf-8\") # Done\n # view_count = (requests.get(url='http://127.0.0.1:8080/view-count').content).decode(\"utf-8\") # Done\n # flag_count = (requests.get(url='http://127.0.0.1:8080/flag-count').content).decode(\"utf-8\") # Done\n # admin_dashboard = ((requests.get(url='http://127.0.0.1:8080/html/{}'.format('administrator_dashboard.html'))).content).decode(\"utf-8\") # Done\n # return render_template_string(admin_dashboard, user_count = user_count, video_count = video_count, view_count = view_count, flag_count = flag_count)\n # else:\n print (current_user)\n\n print(current_user)\n\n # video_count = (requests.get(url='http://127.0.0.1:8080/user-video-count/{}'.format(username)).content).decode(\"utf-8\") # Done\n # view_count = (requests.get(url='http://127.0.0.1:8080/user-view-count/{}'.format(username)).content).decode(\"utf-8\") # Done\n # best_vid_ID = (requests.get(url='http://127.0.0.1:8080/user-best-video/{}'.format(username)).content).decode(\"utf-8\") # Done\n # best_vid_title = ((requests.get(url='http://127.0.0.1:8080/title/{}'.format(best_vid_ID))).content).decode(\"utf-8\") # Done\n # fav_vid_ID = (requests.get(url='http://127.0.0.1:8080/user-fav-video/{}'.format(username)).content).decode(\"utf-8\") # Done\n # fav_vid_title = ((requests.get(url='http://127.0.0.1:8080/title/{}'.format(fav_vid_ID))).content).decode(\"utf-8\") # Done\n # user_dashboard = ((requests.get(url='http://127.0.0.1:8080/html/{}'.format('user_dashboard.html'))).content).decode(\"utf-8\") # Done\n\n view_count = database.get_most_viewed()\n print (view_count)\n\n return render_template_string(user_dashboard, username = session['user'], view_count = view_count, video_count = video_count, high_video_ID = best_vid_ID, high_title = best_vid_title, fav_video_ID = fav_vid_ID, fav_title = fav_vid_title)\n\n@app.route(\"/add_video\", methods = ['GET', 'POST'])\ndef add_video():\n add_form = AddForm()\n if request.method == \"GET\":\n\n\n return render_template('add_video.html', add_form=add_form)\n\n elif request.method == \"POST\":\n print ('submitted')\n\n database.add_video(add_form)\n\n\n add_form = AddForm()\n\n return render_template('add_video.html', add_form=add_form)\n\n@app.route(\"/watch\", methods = ['GET'])\ndef watch_video():\n \"\"\"\n In GET request\n - Plays the video with the corresponding video ID.\n \"\"\"\n if request.method == 'GET':\n video = request.args.get('v', None)\n if video == None:\n return redirect(url_for('dashboard'))\n requested_vid = Video.query.filter_by(video_id=video).first()\n if requested_vid == \"False\":\n abort(404)\n\n url = requested_vid.video_url.replace(\"watch?v=\", \"embed/\")\n vid_title = requested_vid.video_title\n vid_views = requested_vid.view_count\n vid_uploader = requested_vid.uploader\n vid_upload_date = requested_vid.upload_date\n\n print ('and ur lis ', url)\n\n\n\n return render_template(\"video.html\", url=url, video_ID = video, title = vid_title,\n uploader = vid_uploader, views = vid_views, vid_upload_date = vid_upload_date)\n\n\n\n@app.route(\"/my-videos\", methods = ['GET'])\ndef my_videos():\n \"\"\"\n In GET request\n - Returns a page of videos uploaded by the logged in user.\n \"\"\"\n if request.method == 'GET':\n\n username = current_user.username\n\n user_videos = Video.query.filter_by(uploader=username).all()\n\n print (username)\n\n print (user_videos)\n\n\n\n uploaded_dictionary = {}\n\n for video in user_videos:\n video_id = video.video_id\n print (video_id)\n url = \"http://img.youtube.com/vi/\" + video.video_url.split(\"watch?v=\")[1] + \"/0.jpg\"\n title = video.video_title\n views = video.view_count\n uploaded_dictionary.update({video_id : [url, title, views]})\n\n print (uploaded_dictionary)\n\n return render_template('my_videos.html', user_videos=uploaded_dictionary)\n\n@app.route(\"/playlist-test\", methods = ['GET'])\ndef playlist_test():\n # playlists = db.session.query(LearningTrack, LearningTrackVideos).outerjoin(LearningTrack.track_id ==\n # LearningTrackVideos.track_id).all()\n\n # playlists = db.session.query(LearningTrackVideos).join(LearningTrack).join(Video).all()\n\n # playlists = LearningTrackVideos.track_id.query.join(LearningTrack.track_id).all()\n\n\n\n\n\n\n\n\n #\n # playlists = db.session.query(Video, LearningTrackVideos, LearningTrack).join(LearningTrackVideos,\n # Video.video_id ==\n # LearningTrackVideos.video_id).join(\n # LearningTrack, LearningTrack.track_id == LearningTrackVideos.track_id).all()\n #\n #\n\n playlists = db.session.query(LearningTrack, LearningTrackVideos, Video).join(LearningTrackVideos,\n LearningTrackVideos.track_id ==\n LearningTrack.track_id).join(\n Video, Video.video_id == LearningTrackVideos.video_id).all()\n\n\n # playlists = db.session.query(LearningTrack, LearningTrackVideos, Video).join(LearningTrackVideos).join(Video)\n\n playlist_dict = defaultdict(list)\n print (playlists)\n for playlist in playlists:\n playlist_dict[playlist[0].track_id].append(playlist[2])\n\n\n print (playlist_dict[1][0].video_title)\n\n\n\n\n\n #\n # playlist_dict = defaultdict(list)\n #\n # for video in playlists:\n # print (video)\n # print (video.track_id)\n # playlist_dict[video.track_id] = [video.video_title, video.video_url, video.view_count]\n # for k,v in playlist_dict:\n # print (k,v)\n\n return render_template(\"playlist-test.html\", playlist_dict=playlist_dict)\n\n\nif __name__ == \"__main__\":\n app.run(port=5000, debug=True)\n#\n# def create_app():\n# \"\"\" Flask application factory \"\"\"\n#\n#\n# # Setup Flask-User and specify the User data-model\n#\n# # The Home page is accessible to anyone\n# @app.route('/')\n# def home_page():\n# # String-based templates\n# return render_template_string(\"\"\"\n# {% extends \"flask_user_layout.html\" %}\n# {% block content %}\n#

Home page

\n#

Register

\n#

Sign in

\n#

Home page (accessible to anyone)

\n#

Member page (login required)

\n#

Sign out

\n# {% endblock %}\n# \"\"\")\n#\n# # The Members page is only accessible to authenticated users via the @login_required decorator\n# @app.route('/members')\n# @login_required # User must be authenticated\n# def member_page():\n# # String-based templates\n# return render_template_string(\"\"\"\n# {% extends \"flask_user_layout.html\" %}\n# {% block content %}\n#

Members page

\n#

Register

\n#

Sign in

\n#

Home page (accessible to anyone)

\n#

Member page (login required)

\n#

Sign out

\n# {% endblock %}\n# \"\"\")\n#\n# return app\n#\n#\n# # Start development web server\n# if __name__ == '__main__':\n# app = create_app()\n# app.run(host='0.0.0.0', port=5000, debug=True)","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":9860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"204418210","text":"# Написать программу, которая для целочисленного списка определяет,\n# сколько положительных элементов располагается между его максимальными и\n# минимальным элементами.\n\nimport sys\n\n# Ввести список одной строкой\na = list(map(int, input().split()))\n# Если список пуст, завершить программу\nif not a:\n print(\"Заданный список пуст\", file=sys.stderr)\n exit(1)\n\n# Определить индексы минимального и максимального элементов\na_min = a_max = a[0]\ni_min = i_max = 0\nfor i, item in enumerate(a):\n if item < a_min:\n i_min, a_min = i, item\n if item >= a_max:\n i_max, a_max = i, item\n\n# Проверить индексы и обменять их местами\nif i_min > i_max:\n i_min, i_max = i_max, i_min\n\n# Посчитать количество положительных элементов\ncount = 0\n\nfor item in a[i_min+1 : i_max]:\n if item > 0:\n count += 1\n\nprint(count)\n","sub_path":"examples/example 2.py","file_name":"example 2.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"569075505","text":"from django.db import IntegrityError\nfrom django.contrib.auth import authenticate, login, logout\n# from django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect, JsonResponse , HttpResponse, response\n# from django.http import HttpResponse, HttpResponseRedirect, JsonResponse\n\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom .models import Appointment, User\nfrom .forms import CreateForm\nimport json\nimport csv\nimport xlwt\nimport datetime\n\n\ndef login_view(request):\n\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n user = User.objects.filter(username=username).exists()\n\n # Check if authentication successful\n if user:\n user_name = User.objects.get(username=username) \n login(request, user_name)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"gas_q/login.html\", {\n \"message\": \"Invalid username.\"\n })\n else:\n return render(request, \"gas_q/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"login\"))\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"] \n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username)\n user.save()\n except IntegrityError:\n return render(request, \"gas_q/register.html\", {\n \"message\": \"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"gas_q/register.html\")\n\n\ndef index(request):\n\n form = CreateForm()\n\n if request.method == 'POST':\n\n form = CreateForm(request.POST or None, request.FILES or None)\n \n if form.is_valid():\n new_appointment = form.save(commit=False)\n car_num = request.POST['car_num']\n day = request.POST['day']\n timeslot = request.POST['timeslot']\n user = request.POST['user']\n place = request.POST['place']\n car_num_exists = Appointment.objects.filter(car_num=car_num).exists()\n user_has_app = Appointment.objects.filter(user=user).exists()\n time_taken = Appointment.objects.filter(day=day, timeslot=timeslot).count() >= 6 \n\n if car_num_exists or user_has_app or time_taken:\n return render(request, 'gas_q/error.html')\n else:\n new_appointment.user = user\n new_appointment.place = place\n new_appointment.save()\n return render(request, 'gas_q/done.html')\n\n return render(request, 'gas_q/index.html', {\n 'form': form\n })\n\n\ndef json(request):\n if request.method == 'GET':\n try:\n all_apps = Appointment.objects.all()\n\n except:\n return JsonResponse({\"error\": \"Invalid.\"}, status=400)\n\n all_apps = all_apps\n return JsonResponse([post.serialize() for post in all_apps], safe=False)\n\n\ndef export_data(request):\n \n response = HttpResponse(content_type='application/ms-excel')\n response['Content_Desposition']='attachment; filename=Appointment'+str(datetime.datetime.now())+'.xls'\n\n wb = xlwt.Workbook(encoding='utf-8')\n ws = wb.add_sheet('Appointment')\n row_num = 0\n font_style = xlwt.XFStyle() \n font_style.font.bold = True\n\n columns = ['user', 'car_make', 'car_num', 'day', 'timeslot', 'place']\n\n for col_num in range(len(columns)):\n ws.write(row_num, col_num, columns[col_num], font_style)\n\n font_style = xlwt.XFStyle() \n appointments = Appointment.objects.all()\n\n for appointment in appointments:\n row_num += 1\n\n for col_num in range(6):\n if col_num == 0:\n ws.write(row_num, col_num, str(appointment.user), font_style)\n elif col_num == 1:\n ws.write(row_num, col_num, str(appointment.car_make), font_style)\n elif col_num == 2:\n ws.write(row_num, col_num, str(appointment.car_num), font_style)\n elif col_num == 3:\n for slot in range(len(appointment.DAYS)):\n if appointment.DAYS[slot][0] == appointment.day:\n ws.write(row_num, col_num, str(appointment.DAYS[slot][1]), font_style)\n elif col_num == 4:\n for slot in range(len(appointment.TIMESLOT_LIST)):\n if appointment.TIMESLOT_LIST[slot][0] == appointment.timeslot:\n ws.write(row_num, col_num, str(appointment.TIMESLOT_LIST[slot][1]), font_style)\n elif col_num == 5:\n ws.write(row_num, col_num, str(appointment.place), font_style)\n\n wb.save(response)\n\n return response\n\ndef excel(request):\n return render(request, \"gas_q/excel.html\")\n","sub_path":"gas_q/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"319702428","text":"\"\"\"empty message\n\nRevision ID: eba0789c0614\nRevises: \nCreate Date: 2020-12-06 15:56:16.601770\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'eba0789c0614'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('auditoriums',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('number_of_auditorium', sa.Integer(), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('number_of_auditorium')\n )\n op.create_table('users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=70), nullable=False),\n sa.Column('email', sa.String(length=70), nullable=False),\n sa.Column('password', sa.String(length=70), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email'),\n sa.UniqueConstraint('name')\n )\n op.create_table('bookings',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('auditorium_id', sa.Integer(), nullable=False),\n sa.Column('booking_date_time', sa.DateTime(), nullable=False),\n sa.Column('expire_date_time', sa.DateTime(), nullable=False),\n sa.ForeignKeyConstraint(['auditorium_id'], ['auditoriums.id'], ondelete='cascade'),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='cascade'),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('bookings')\n op.drop_table('users')\n op.drop_table('auditoriums')\n # ### end Alembic commands ###\n","sub_path":"lab_4/migrations/versions/eba0789c0614_.py","file_name":"eba0789c0614_.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"393033765","text":"\"\"\"\nhttps://leetcode.com/problems/minimum-domino-rotations-for-equal-row/\nAlgorithm:\n\nTime Complexity: O(n)\nSpace Complexity: O(1)\n\n\"\"\"\n\n\nclass Solution:\n def minDominoRotations(self, A: List[int], B: List[int]) -> int:\n def check(x):\n rotationsA, rotationsB = 0, 0\n\n for i in range(n):\n # Rotation not possible item at ith index of either of arrays need to be same as\n # first item of array in consideration.\n if A[i] != x and B[i] != x:\n return -1\n # A[i] != x and B[i] == x\n if A[i] != x:\n rotationsA += 1\n # A[i] == x and B[i] != x\n if B[i] != x:\n rotationsB += 1\n # min number of rotations to have all elements equal to x in A or B\n return min(rotationsA, rotationsB)\n\n n = len(A)\n\n rotations = check(A[0])\n # 1. If one could make all elements in A or B equal to A[0];\n # 2. Don't run check if first items of both array's are same as that would be same as\n # that would repetition.\n if rotations != -1 or A[0] == B[0]:\n return rotations\n # If one could make all elements in A or B equal to B[0];\n else:\n return check(B[0])\n","sub_path":"minimumDominoRotationsForEqualRow.py","file_name":"minimumDominoRotationsForEqualRow.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"210474541","text":"from bs4 import BeautifulSoup\nimport urllib.request\nimport time\nimport os\n\ndef duplicate(img):\n return os.path.exists(\"./img/\" + img)\n\ndef get(max_count =1):\n start = time.time()\n base_url = \"http://10000img.com/\"\n url = \"http://10000img.com/ran.php\"\n \n count = 1 \n while count <= max_count:\n print(\"+--------[ %d번 째 이미지 ]---------+\" % count)\n \n html = urllib.request.urlopen(url)\n source = html.read()\n \n soup = BeautifulSoup(source, \"html.parser\")\n \n img = soup.find(\"img\")\n img_src = img.get(\"src\")\n img_url = base_url + img_src\n img_name = img_src.replace(\"/\", \"\")\n \n if not duplicate(img_name):\n urllib.request.urlretrieve(img_url, \"./img/\" + img_name)\n else:\n print(\"중복된 이미지!\")\n \n print(\"이미지 src:\", img_src)\n print(\"이미지 url:\", img_url)\n print(\"이미지 명:\", img_name)\n print(\"\\n\")\n \n count += 1\n else:\n print(\"크롤링 종료\")\n print(\"크롤링 소요 시간:\",round(time.time() - start, 6), \"초\")\n \n \n \nnum = int(input(\"이미지 수:\"))\nget(num)","sub_path":"crawling_ex1.py","file_name":"crawling_ex1.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"207669946","text":"from django.db import models\n\nclass TimeStampedModel(models.Model):\n '''\n An abstract base class that provides self-updating 'created' and 'modified' fields.\n '''\n created = models.DateTimeField(auto_now_add = True)\n updated = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\nclass Industry(TimeStampedModel):\n '''\n This is the class that will hold industry classification used to classify companies\n '''\n industryId = models.AutoField(primary_key=True, db_column='IndustryId')\n industryName = models.CharField(db_column='IndustryName',max_length=100,verbose_name='Industry Name', null=True)\n\n def __unicode__(self):\n return self.industryName\n\n class Meta:\n verbose_name_plural='Industries'\n db_table ='industries'\n ordering =['industryName']\n\nclass CompanyRelationshipType(TimeStampedModel):\n '''\n This is a class that will hold the company relationship types eg. Client, vendor, freelancer etc.\n '''\n coRelId = models.AutoField(primary_key=True, db_column='companyRelId')\n companyTypeName = models.CharField(db_column='companyType',max_length=100,verbose_name='Relationship Type', null=True)\n\n def __unicode__(self):\n return self.companyTypeName\n\n class Meta:\n verbose_name_plural ='Company Type'\n db_table = 'companyType'\n ordering =['companyTypeName']\n\n\nclass Company(TimeStampedModel):\n companyId = models.AutoField(primary_key=True, db_column='companyId')\n companyName = models.CharField(db_column='companyName',max_length=100,verbose_name='Company Name', null=True)\n streetAdress = models.CharField(db_column='streetAddress', max_length=200, null=True,blank=True,verbose_name='Street Address')\n city = models.CharField(db_column='city', max_length=50, null=True,blank=True,verbose_name='city')\n state = models.CharField(db_column='state', max_length=2, null=True,blank=True,verbose_name='State')\n zip = models.CharField(db_column='zip', max_length=15, null=True,blank=True,verbose_name='zip')\n website = models.URLField(db_column='website',null=True,blank=True)\n industry = models.ForeignKey('Industry', verbose_name='Industry', db_column='industry',null=True,blank=True,related_name='coIndustry')\n companyRelationshipType = models.ManyToManyField('CompanyRelationshipType',\n db_table ='companyToRelationshipType',verbose_name='Company Type', null=True, blank =True, related_name='CompanyToRelationship')\n\n def __unicode__(self):\n return self.companyName\n\n class Meta:\n verbose_name_plural='Companies'\n db_table ='companies'\n ordering =['companyName']\n\n\nclass Contacts(TimeStampedModel):\n contactId = models.AutoField(primary_key=True, db_column='contactId')\n contactName = models.CharField(db_column='contactName',max_length=100,verbose_name='Name', null=True)\n company = models.ForeignKey('Company', verbose_name='Company', db_column='relatedCompany',null=True,blank=True,related_name='coIndustry')\n email = models.EmailField(db_column='email',verbose_name='Email',null=True,blank=True, unique=True)\n officePhone = models.CharField(max_length=15,db_column='officePhone',verbose_name='Office Phone', null=True, blank=True, default=None)\n cellPhone = models.CharField(max_length=15,db_column='cellPhone',verbose_name='cell Phone', null=True, blank=True, default=None)\n contactNote = models.TextField(verbose_name='Contact Note', db_column='contactNote', null=True, blank=True)\n #linkedInId\n #twitter handle\n\n\n def __unicode__(self):\n return self.contactName\n\n class Meta:\n verbose_name_plural='Contacts'\n db_table = 'contacts'\n ordering = ['contactName']\n","sub_path":"crm/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"614304374","text":"# Med 1 20ms\nfrom collections import Counter\nclass Solution(object):\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n dic = {}\n for i in s:\n dic[i] = dic.get(i, 0)+ 1\n double, single = 0, 0\n for val in dic.values():\n if val % 2 == 0:\n double += val\n else:\n double += val-1\n single = 1\n return(double+single)\n \n \n \n \n \n \n \n# Med 2 24ms\nfrom collections import Counter\nclass Solution(object):\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n res = 0\n count = Counter(s)\n for idx, val in count.items():\n if val % 2 == 0 or res % 2 == 0:\n res += val\n else:\n res += val - 1\n return(res)\n\n \n \n \n \n \n \n# Med 3 20ms\nfrom collections import Counter\nclass Solution(object):\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n res = set()\n for i in s:\n if i in res:\n res.remove(i)\n else:\n res.add(i)\n if len(res) > 0:\n return(len(s) - len(res) + 1)\n else:\n return(len(s))\n","sub_path":"409. Longest Palindrome.py","file_name":"409. Longest Palindrome.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"477151947","text":"#!/usr/bin/env python\n\nimport os, sys\nimport urllib\nimport urllib.request\nimport time\nimport subprocess\n\nUSERNAME = 'admin'\nPASSWORD = 'admin'\nURL = \"http://192.168.0.2\"\nCMD = {'form_id':'firmware_update', 'reboot_value':'', 'tftp_server_ip_address':'192.168.0.2', 'submit': 'Start', 'reboot': 'Reboot'}\n\nclass Firmware_Downloader(object):\n def __init__(self, url=None):\n if url:\n self.url = url\n else:\n self.url = self.guess_ip()\n\n def auth(self, url):\n p = urllib.request.HTTPPasswordMgrWithDefaultRealm()\n p.add_password(None, url, USERNAME, PASSWORD)\n handler = urllib.request.HTTPBasicAuthHandler(p)\n opener = urllib.request.build_opener(handler)\n urllib.request.install_opener(opener)\n\n def post(self, posturl, params):\n '''\n do the POST action of form in html\n\n :param posturl: POST url\n :param params: dict to orgnize the parameters that needed in post action\n :return:\n '''\n # self.auth(posturl)\n data = urllib.parse.urlencode(params)\n binary_data = data.encode('utf-8')\n req = urllib.request.Request(posturl, binary_data)\n try:\n response = urllib.request.urlopen(req, timeout=3)\n except:\n # after reboot, controller will lost response\n print(\"rebooting...\")\n\n def get(self, url):\n '''\n do the GET action of form in html\n\n :param url: GET url\n :return:\n '''\n self.auth(url)\n return urllib.request.urlopen(url).read()\n\n def guess_ip(self):\n found = False\n for i in range(0, 256):\n if found:\n break\n url = \"http://192.168.0.\" + str(i) + \"/firmware_update.html\"\n self.auth(url)\n try:\n result = urllib.request.urlopen(url, timeout=1).read()\n found = True\n except:\n found = False\n if found:\n url = \"http://192.168.0.\" + str(i-1)\n print(\"found correct ip: %s\" % url)\n else:\n print(\"can't find controller's ip\")\n raise TypeError\n return url\n\n def wait(self, timeout=600):\n '''\n wait the download procedure complete\n\n :param timeout: timeout of download\n :return:\n '''\n start_time = time.time()\n geturl = self.url + \"/get.cgi?firmware_update=status\"\n current_time = time.time()\n result = self.get(geturl)\n print(result)\n while b'SUCCESS' not in result and (current_time-start_time) < timeout:\n if b'ERROR' in result:\n print(\"something error, can't download firmware!!!\")\n raise TypeError\n break\n time.sleep(5)\n current_time = time.time()\n result = self.get(geturl)\n print(result)\n\n if (current_time-start_time) >= timeout:\n print(\"Timeout!!!\")\n raise TypeError\n\n print(\"Download complete, elapsed time: %ds\" % (current_time-start_time))\n\n def download(self):\n # auth this url to connect firstly, otherwise post timeout\n print(self.url)\n url = self.url + \"/firmware_update.html\"\n self.auth(url)\n posturl = self.url + \"/post.cgi\"\n download_cmd = CMD\n self.post(posturl, download_cmd)\n\n def reboot(self):\n posturl = self.url + \"/post.cgi\"\n reboot_cmd = CMD\n reboot_cmd['reboot_value'] = 'do_it'\n self.post(posturl, reboot_cmd)\n\n def confirm(self):\n # reboot, wait 2 min to get status to confirm download complete\n time.sleep(120)\n geturl = self.url + \"/get.cgi?firmware_update=status\"\n result = self.get(geturl)\n if b\"idle\" in result.lower():\n print(\"download successful!!!\")\n else:\n print(\"download fail!!!\")\n raise TypeError\n\nif __name__ == '__main__':\n # os.system(\"taskkill /F /im tftpd64.exe\") # kill any tftpd64.exe that maybe already opened\n # time.sleep(10)\n # child = subprocess.Popen(['c:\\\\local\\\\tftpd64\\\\tftpd64.exe'],stdout=subprocess.PIPE) # open tftpd64.exe\n # time.sleep(10)\n f = Firmware_Downloader(URL)\n f.download()\n f.wait()\n f.reboot()\n f.confirm()\n # child.kill() # close tftpd64.exe\n\n","sub_path":"firmware.py","file_name":"firmware.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"248789884","text":"#! /usr/bin/python\n# -*- encoding=utf-8 -*-\n'''\nPlot the contour map of C_J(Jacobi Integral) circular restricted three-body problem\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.figure()\n\nmu = 0.1\nxs = np.linspace(-2,2,400)\nX, Y = np.meshgrid(xs, xs)\n\ndef Cj(x, y):\n r1 = np.sqrt((x + mu)**2 + y**2)\n r2 = np.sqrt((x + mu - 1)**2 + y**2)\n omega = (x*x+y*y)/2+(1-mu)/r1+mu/r2\n return 2 * omega\n\nzdata = Cj(X, Y)\nlevels = np.linspace(2.8, 3.7, 40)\nplt.contourf(X, Y, zdata, levels)\nplt.title('$C_J\\ contour\\ at\\ \\mu=0.1$')\nplt.colorbar(ticks=np.linspace(2.8, 3.7, 11))\n\nplt.savefig('contourCJ.png')\nplt.close()\n","sub_path":"chap3/plotcontour.py","file_name":"plotcontour.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"443984207","text":"import math\n\ndef segment_length(x1, y1, x2, y2):\n\n return math.sqrt((x1 - x2)**2 + (y1 - y2)**2)\n\n\ndef get_perimeter(x1, y1, x2, y2, x3, y3, x4, y4):\n seg1, seg2, seg3, seg4 = segment_length(x1, y1, x2, y2), segment_length(x2, y2, x3, y3), segment_length(x3, y3, x4, y4), segment_length(x4, y4, x1, y1)\n per = seg1 + seg2 + seg3 + seg4\n\n return per\n\n\nx1, y1 = [int(i) for i in input(\"Enter (x1, y1) coordinates of rectangle: \").split(\",\")]\nx2, y2 = [int(i) for i in input(\"Enter (x2, y2) coordinates of rectangle: \").split(\",\")]\nx3, y3 = [int(i) for i in input(\"Enter (x3, y3) coordinates of rectangle: \").split(\",\")]\nx4, y4 = [int(i) for i in input(\"Enter (x4, y4) coordinates of rectangle: \").split(\",\")]\n\nrect_per = round(get_perimeter(x1, y1, x2, y2, x3, y3, x4, y4), 2)\n\nprint(\"The perimeter of rectangle is {0}\".format(rect_per))\n\n","sub_path":"Homework_3/rectangle_perimeter.py","file_name":"rectangle_perimeter.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"447250855","text":"### https://ide.geeksforgeeks.org/K74MoW4p81\n### Maximum length of consecutive 1’s in a binary string in Python \n\ndef maxConsecutive1(input):\n count = 0 \n res = 0\n for i in range(len(input)):\n if input[i] == '0':\n count = 0\n else:\n count += 1\n res = max(res,count)\n \n print(res)\n\n\n\n# Driver program \nif __name__ == \"__main__\": \n input = '11000111101010111'\n maxConsecutive1(input)\n","sub_path":"Maximum_length_of_consecutive_1's.py","file_name":"Maximum_length_of_consecutive_1's.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"585584650","text":"import cv2\r\nimport numpy as np\r\nfrom keras.models import load_model\r\nfrom keras.preprocessing.image import img_to_array\r\nfrom keras.utils import get_file\r\nimport argparse\r\nimport os\r\nimport cvlib as cv\r\nfrom statistics import mode\r\nfrom utils.datasets import get_labels\r\nfrom utils.inference import detect_faces\r\nfrom utils.inference import draw_text\r\nfrom utils.inference import draw_bounding_box\r\nfrom utils.inference import apply_offsets\r\nfrom utils.inference import load_detection_model\r\nfrom utils.preprocessor import preprocess_input\r\n\r\nUSE_WEBCAM = True # If false, loads video file source\r\n\r\n# parameters for loading data and images\r\nemotion_model_path = './models/emotion_model.hdf5'\r\nemotion_labels = get_labels('fer2013')\r\n# download pre-trained model file (one-time download)\r\ndwnld_link = \"https://github.com/arunponnusamy/cvlib/releases/download/v0.2.0/gender_detection.model\"\r\nmodel_path = get_file(\"gender_detection.model\", dwnld_link,\r\n cache_subdir=\"pre-trained\", cache_dir=os.getcwd())\r\n\r\n# load model\r\nmodel = load_model(model_path)\r\n# hyper-parameters for bounding boxes shape\r\nframe_window = 10\r\nemotion_offsets = (20, 40)\r\n\r\n# loading models\r\nface_cascade = cv2.CascadeClassifier('./models/haarcascade_frontalface_default.xml')\r\nemotion_classifier = load_model(emotion_model_path)\r\n\r\n# getting input model shapes for inference\r\nemotion_target_size = emotion_classifier.input_shape[1:3]\r\n\r\n# starting lists for calculating modes\r\nemotion_window = []\r\n\r\n# starting video streaming\r\n\r\ncv2.namedWindow('window_frame')\r\nvideo_capture = cv2.VideoCapture(0)\r\nclasses = ['man','woman']\r\n# Select video or webcam feed\r\nleft_counter = 0 # counter for left movement\r\nright_counter = 0 # counter for right movement\r\n\r\nth_value = 5 # changeable threshold value\r\n\r\n\r\ndef thresholding(value): # function to threshold and give either left or right\r\n global left_counter\r\n global right_counter\r\n\r\n if (value <= 54): # check the parameter is less than equal or greater than range to\r\n left_counter = left_counter + 1 # increment left counter\r\n\r\n if (left_counter > th_value): # if left counter is greater than threshold value\r\n print('RIGHT') # the eye is left\r\n left_counter = 0 # reset the counter\r\n\r\n elif (value >= 54): # same procedure for right eye\r\n right_counter = right_counter + 1\r\n\r\n if (right_counter > th_value):\r\n print('LEFT')\r\n right_counter = 0\r\ncap = None\r\nif (USE_WEBCAM == True):\r\n cap = cv2.VideoCapture(0) # Webcam source\r\nelse:\r\n cap = cv2.VideoCapture('./demo/dinner.mp4') # Video file source\r\n\r\nwhile cap.isOpened(): # True:\r\n ret, bgr_image = cap.read()\r\n # apply face detection\r\n face, confidence = cv.detect_face(bgr_image)\r\n\r\n print(face)\r\n print(confidence)\r\n # loop through detected faces\r\n frame=bgr_image\r\n cv2.line(frame, (320, 0), (320, 480), (0, 200, 0), 2)\r\n cv2.line(frame, (0, 200), (640, 200), (0, 200, 0), 2)\r\n col = frame\r\n\r\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\r\n pupilFrame = frame\r\n clahe = frame\r\n blur = frame\r\n edges = frame\r\n eyes = cv2.CascadeClassifier('haarcascade_eye.xml')\r\n detected = eyes.detectMultiScale(frame, 1.3, 5)\r\n for (x, y, w, h) in detected: # similar to face detection\r\n cv2.rectangle(frame, (x, y), ((x + w), (y + h)), (0, 0, 255), 1) # draw rectangle around eyes\r\n cv2.line(frame, (x, y), ((x + w, y + h)), (0, 0, 255), 1) # draw cross\r\n cv2.line(frame, (x + w, y), ((x, y + h)), (0, 0, 255), 1)\r\n pupilFrame = cv2.equalizeHist(\r\n frame[y + int(h * .25):(y + h), x:(x + w)]) # using histogram equalization of better image.\r\n cl1 = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) # set grid size\r\n clahe = cl1.apply(pupilFrame) # clahe\r\n blur = cv2.medianBlur(clahe, 7) # median blur\r\n circles = cv2.HoughCircles(blur, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=7,\r\n maxRadius=21) # houghcircles\r\n if circles is not None: # if atleast 1 is detected\r\n circles = np.round(circles[0, :]).astype(\"int\") # change float to integer\r\n print('integer', circles)\r\n for (x, y, r) in circles:\r\n cv2.circle(pupilFrame, (x, y), r, (0, 255, 255), 2)\r\n cv2.rectangle(pupilFrame, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)\r\n # set thresholds\r\n thresholding(x)\r\n for idx, f in enumerate(face):\r\n\r\n # get corner points of face rectangle\r\n (startX, startY) = f[0], f[1]\r\n (endX, endY) = f[2], f[3]\r\n\r\n # draw rectangle over face\r\n cv2.rectangle(bgr_image, (startX, startY), (endX, endY), (0, 255, 0), 2)\r\n\r\n # crop the detected face region\r\n face_crop = np.copy(bgr_image[startY:endY, startX:endX])\r\n\r\n if (face_crop.shape[0]) < 10 or (face_crop.shape[1]) < 10:\r\n continue\r\n\r\n # preprocessing for gender detection model\r\n face_crop = cv2.resize(face_crop, (96, 96))\r\n face_crop = face_crop.astype(\"float\") / 255.0\r\n face_crop = img_to_array(face_crop)\r\n face_crop = np.expand_dims(face_crop, axis=0)\r\n\r\n # apply gender detection on face\r\n conf = model.predict(face_crop)[0]\r\n print(conf)\r\n print(classes)\r\n\r\n # get label with max accuracy\r\n idx = np.argmax(conf)\r\n label = classes[idx]\r\n\r\n label = \"{}: {:.2f}%\".format(label, conf[idx] * 100)\r\n\r\n Y = startY - 10 if startY - 10 > 10 else startY + 10\r\n\r\n # write label and confidence above face rectangle\r\n cv2.putText(bgr_image, label, (startX, Y), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.7, (0, 255, 0), 2)\r\n #bgr_image = video_capture.read()[1]\r\n\r\n gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)\r\n rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)\r\n\r\n faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5,\r\n\t\t\tminSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)\r\n\r\n for face_coordinates in faces:\r\n\r\n x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)\r\n gray_face = gray_image[y1:y2, x1:x2]\r\n try:\r\n gray_face = cv2.resize(gray_face, (emotion_target_size))\r\n except:\r\n continue\r\n\r\n gray_face = preprocess_input(gray_face, True)\r\n gray_face = np.expand_dims(gray_face, 0)\r\n gray_face = np.expand_dims(gray_face, -1)\r\n emotion_prediction = emotion_classifier.predict(gray_face)\r\n emotion_probability = np.max(emotion_prediction)\r\n emotion_label_arg = np.argmax(emotion_prediction)\r\n emotion_text = emotion_labels[emotion_label_arg]\r\n emotion_window.append(emotion_text)\r\n\r\n if len(emotion_window) > frame_window:\r\n emotion_window.pop(0)\r\n try:\r\n emotion_mode = mode(emotion_window)\r\n except:\r\n continue\r\n\r\n if emotion_text == 'angry':\r\n color = emotion_probability * np.asarray((255, 0, 0))\r\n elif emotion_text == 'sad':\r\n color = emotion_probability * np.asarray((0, 0, 255))\r\n elif emotion_text == 'happy':\r\n color = emotion_probability * np.asarray((255, 255, 0))\r\n elif emotion_text == 'surprise':\r\n color = emotion_probability * np.asarray((0, 255, 255))\r\n else:\r\n color = emotion_probability * np.asarray((0, 255, 0))\r\n\r\n color = color.astype(int)\r\n color = color.tolist()\r\n\r\n draw_bounding_box(face_coordinates, rgb_image,color)\r\n draw_text(face_coordinates, rgb_image, emotion_mode,\r\n color, 0, -45,1,3)\r\n\r\n bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)\r\n cv2.imshow('window_frame', bgr_image)\r\n cv2.imshow('image4', pupilFrame)\r\n cv2.imshow('clahe', clahe)\r\n cv2.imshow('blur', blur)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()","sub_path":"emotions.py","file_name":"emotions.py","file_ext":"py","file_size_in_byte":8102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"330668274","text":"\"\"\"\nscript outputs measurement results on SDI-12 bus to an H4161 which converts\nthem to 4-20mA output\n\nbased on K1IntWel.txt basic program for\nKaweah No 1 Intake Gage data conversion to Analog Output\n\nassociated with a setup file that has the following measurements setup:\nM1 is the H331 Gage Height at the 201 River gage shelter\nM2 is the output from the Panametrics AVM for canal flow gage no. 202\nM3 is the output from the Panametrics AVM for fish release gage no. 201a\nM4 is the output from the H377 that measures the outside air temperature\nM5 is the output from the H377 that measures the water temperature\nS1 is the script task that runs this script\n\"\"\"\n\nimport re\nimport utime\nfrom sl3 import *\n\n\nclass Sdi12Error(Exception):\n pass\n\n\ndef sdi_bus_valid(sdi_bus):\n \"\"\"\n Routine checks whether the provided parameter is a SDI-12 bus\n \n :param sdi_bus: string indicating bus: \"Port1\", \"Port2\", or \"RS485\"\n :return: True if provided parameter is a valid bus\n :rtype: Boolean\n \"\"\"\n bus_upper = sdi_bus.upper()\n if (\"PORT1\" in bus_upper) or (\"PORT2\" in bus_upper) or (\"RS485\" in bus_upper):\n return True\n else:\n return False\n\n\ndef sdi_send_command_get_reply(cmd_to_send, sdi_bus=\"Port1\"):\n \"\"\"\n Sends provided command out on the specified SDI-12 bus, gets reply from the sensor.\n \n :param cmd_to_send: the command to send on the SDI-12 bus, e.g. \"0M!\"\n :param sdi_bus: string indicating bus: \"Port1\", \"Port2\", or \"RS485\"\n :return: sensor reply, or \"No reply\"\n :rtype: str\n \"\"\"\n\n if sdi_bus_valid(sdi_bus):\n reply = command_line('!SDI {} {}'.format(sdi_bus, cmd_to_send), 128)\n if \"Got reply: \" in reply:\n reply = reply.replace(\"Got reply:\", \"\")\n else:\n raise Sdi12Error(\"No such bus\", sdi_bus)\n\n reply = reply.strip()\n return reply\n\n\n@TASK\ndef sdi_4_20():\n \"\"\"\n Converts measurement results to SDI-12 commands for H4161\n \"\"\"\n # 201 River ght to analog output (0-16 feet)\n f = measure(1).value / 16 * 16 + 4 # yes f/16*16 = f, but consistency\n\n # 202 AVM canal flow to analog output (0-30 cfs)\n g = measure(2).value / 30 * 16 + 4\n\n # 201a AVM fish release flow to analog output (0-20 cfs)\n h = measure(3).value / 20 * 16 + 4\n\n # Air Temp to analog output (0-130 deg F)\n i = measure(4).value / 130 * 16 + 4\n\n # Stilling Well Water Temp to analog output (0-100 deg F)\n j = measure(5).value / 100 * 16 + 4\n\n \"\"\"\n use extended command to set analog output values \n The sdi12 address of the river (201) H4161 is set to \"2\"\n The sdi12 address of the Canal AVM (202) H4161 is set to \"3\"\n The sdi12 address of the Fish Release AVM (201a) H4161 is set to \"4\"\n The sdi12 address of the Air temp H4161 is set to \"5\"\n The sdi12 address of the Water Tmep H4161 is set to \"6\"\n \n delays of 100 milliseconds are used to allow adequate time for writing to the SDI12 bus\n \"\"\"\n right_digits = 1 # as per H4161 manual examples\n\n cmd = '2XSM{0:.{1}f}!'.format(f, right_digits)\n sdi_send_command_get_reply(cmd)\n utime.sleep(0.1)\n\n cmd = '3XSM{0:.{1}f}!'.format(g, right_digits)\n sdi_send_command_get_reply(cmd)\n utime.sleep(0.1)\n\n cmd = '4XSM{0:.{1}f}!'.format(h, right_digits)\n sdi_send_command_get_reply(cmd)\n utime.sleep(0.1)\n\n cmd = '5XSM{0:.{1}f}!'.format(i, right_digits)\n sdi_send_command_get_reply(cmd)\n utime.sleep(0.1)\n\n cmd = '6XSM{0:.{1}f}!'.format(j, right_digits)\n sdi_send_command_get_reply(cmd)\n utime.sleep(0.1)\n","sub_path":"projects/sdi_H4161/sdi_H4161.py","file_name":"sdi_H4161.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"579379055","text":"# get kth largest element\ndef quickselect(a, k, start=0):\n if type(k) != int:\n raise TypeError('\\'{}\\' is not an integer'.format(k))\n if k == 0:\n raise ValueError('{} is not > 0'.format(k))\n if k > len(a):\n raise ValueError('{} is not < {}'.format(k, len(a)))\n\n pivot = a[start]\n left = list(filter(lambda x: x < pivot, a))\n right = list(filter(lambda x: x > pivot, a))\n new_a = left + [pivot] + right\n if len(right) == k - 1:\n return pivot\n elif len(right) > k - 1: # k is too small, search right\n return quickselect(new_a, k, len(left) + 1)\n else: # k is too large, search left\n return quickselect(new_a, k, 0)\n\nif __name__=='__main__':\n a = [5,4,6,3,7,2,8,1,9]\n cases = [1, 2, 3, 'hi', 0, 9, 10] \n for k in cases:\n try:\n print('k = {}; r = {}'.format(k, quickselect(a, k)))\n except TypeError as e:\n print(e)\n except ValueError as e:\n print(e)\n\n","sub_path":"quickselect.py","file_name":"quickselect.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"27359435","text":"import numpy as np\nfrom sklearn.neural_network import MLPClassifier\n\n\nclass NeuralNet:\n def __init__(self, nn_type='reg', hidden_layer_sizes=(5,), activation='relu'):\n self.hidden_layer_size = hidden_layer_sizes\n self.nn_type = nn_type\n self.learning_rate = 1e-3\n self.regularization_rate = 1e-4\n self.activation = activation\n self.tol = 1e-3\n self.max_iter = 300\n self.weighs = []\n self.fitted = 0\n\n def fit(self, X, y, learning_rate=1e-3, reg='l2', regularization_rate=1e-4, tol=1e-3, max_iter=300, verbose=True):\n self.fitted = True\n self.tol = tol\n self.max_iter = max_iter\n self.learning_rate = learning_rate\n self.regularization_rate = regularization_rate\n # begin\n m, n = X.shape\n out_size = 1\n if y.ndim == 2:\n out_size = y.shape[1]\n self.init_weighs(input_size=n, output_size=out_size)\n error_calculator = self.avg_squared_error\n if 'c' in self.nn_type:\n error_calculator = self.avg_cross_entropy\n for k in range(max_iter):\n history = [X.T] + self.feed_forward(X, return_history=1)\n y_p = history[-1]\n if verbose:\n print(f'iteration {k}, {error_calculator.__name__} = {error_calculator(y, y_p, reg)}')\n n_lay = len(history)\n for i in range(n_lay - 1):\n act_i = np.insert(history[i], [0], 1, axis=0) # add bias line (1)\n sigma_nxt = history[i + 1] - y_p[0]\n grad = np.dot(sigma_nxt, act_i.T) / m\n grad[:, 1:] += (regularization_rate * (self.weighs[i])[:, 1:]) / m\n self.weighs[i] -= (learning_rate * grad)\n\n def predict(self, X, prob=0, thresh=0.5):\n self.check_for_error()\n y = self.feed_forward(X, return_history=0)[0].T\n if prob:\n return y\n return np.int32(y >= thresh)\n\n def score(self, X, y):\n self.check_for_error()\n\n def feed_forward(self, X, return_history=0):\n # returns the list of layers after feed forward\n self.check_for_error()\n y_p = X.T\n history = []\n for i, w in enumerate(self.weighs):\n y_p = np.dot(w, np.insert(y_p, [0], 1, axis=0))\n if i < (len(self.weighs) - 1):\n y_p = self.activate(y_p)\n if return_history:\n history.append(y_p)\n if 'c' in self.nn_type: # classifier\n if y_p.shape[0] == 1:\n y_p = self.sig(y_p)\n if return_history:\n history.append(y_p)\n return history\n return y_p\n\n def init_weighs(self, input_size, output_size):\n # Xavier init weighs if 'tanh' activation and He if 'ReLu'\n layer_sizes = [input_size] + list(self.hidden_layer_size) + [output_size]\n n_layer = len(layer_sizes)\n # special weighs init\n c = 2 # He init\n if self.activation == 'tanh': # Xavier init\n c = 1\n cf = lambda x: np.sqrt(c / x)\n for i in range(n_layer - 1):\n n_rows = layer_sizes[i + 1]\n n_cols = layer_sizes[i] + 1\n w = np.random.randn(n_rows, n_cols - 1) * cf(n_cols - 1)\n w = np.insert(w, [0], 1, axis=1)\n self.weighs.append(w)\n\n def avg_cross_entropy(self, y, y_p, reg='l2'):\n # compute cross entropy error with regularization rate (l1 or l2)\n ce = -np.sum(y * np.log(y_p)) - np.sum((1 - y) * np.log(1 - y_p)) # average cross entropy\n regularizator = self.compute_regularization_rate(reg)\n return (ce + regularizator) / len(y)\n\n def avg_squared_error(self, y, y_p, reg='l2'):\n sqr_error = np.sum((y_p - y) ** 2)\n regularizator = self.compute_regularization_rate(reg)\n return (sqr_error + regularizator) / len(y)\n\n def compute_regularization_rate(self, reg='l2'):\n regularizator = 0\n if reg == 'l2':\n for w in self.weighs:\n regularizator += np.sum(w[:, 1:] ** 2)\n regularizator *= (self.regularization_rate / 2)\n else: # l1\n for w in self.weighs:\n regularizator += np.sum(np.abs(w[:, 1:]))\n regularizator *= self.regularization_rate\n return regularizator\n\n def check_for_error(self):\n # predict the output for the matrix of inputs\n if not self.fitted:\n raise RuntimeError('Model not fitted yet!')\n\n def activate(self, layer):\n f = getattr(self, self.activation)\n return f(layer)\n\n @staticmethod\n def sig(matrix):\n return 1 / (1 + np.exp(-matrix))\n\n @staticmethod\n def relu(matrix):\n matrix[matrix <= 0] = 0\n return matrix\n\n @staticmethod\n def tanh(matrix):\n tmp = np.exp(-2 * matrix)\n return (1 - tmp) / (1 + tmp)\n\n\nX = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])\ny = np.array([0, 1, 1, 0])\n\nmodel = NeuralNet(nn_type='class', hidden_layer_sizes=(1, 1), activation='sig')\nmodel.fit(X, y, learning_rate=0.0001, regularization_rate=0, verbose=1, max_iter=500000)\n\nprint(model.predict(X, prob=1))\n","sub_path":"NeuralNet.py","file_name":"NeuralNet.py","file_ext":"py","file_size_in_byte":5161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"590728066","text":"#coding:utf-8\r\n\r\nimport requests\r\nimport chardet\r\nimport urllib.parse\r\nimport sys\r\nimport re\r\nfrom bs4 import BeautifulSoup\r\nimport threading\r\nimport time\r\n#import tesserocr\r\n#from PIL import Image\r\n\r\n#cookie\r\ncookie =''\r\n\r\n\r\n#代理\r\nagent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4620.400 QQBrowser/9.7.13014.400'\r\n\r\n#整理header\r\nheader = {'user-agent':agent,'cookie':cookie,'referer':'http://google.com.hk'}\r\n\r\n\r\n#省份\r\nprovince = input('请输入省份:')\r\n\r\n#地区\r\ncity = input('请输入城市:')\r\n\r\n#关键词\r\nkeywords = input('请输入关键词: ')\r\n\r\n#生成超链接函数\r\n#参数:province -> 省\r\n# city -> 市\r\n# keywords -> 关键词\r\n# page -> 页数\r\ndef createURL(province,city,keywords,page=0):\r\n #各种连接地址\r\n #OO 第一种 没有地址\r\n url = 'https://s.1688.com/company/company_search.htm?keywords='\r\n\r\n #如果没有关键词\r\n if keywords == '' :\r\n print('必须输入关键词')\r\n exit()\r\n else:\r\n keywords = bytes(keywords, encoding = \"utf8\") \r\n keywords = keywords.decode('utf8').encode('gb2312')\r\n keywords = urllib.parse.quote(keywords)\r\n url = url + keywords\r\n\r\n #如果地区不为空\r\n if city != '':\r\n city = bytes(city, encoding = \"utf8\") \r\n city = city.decode('utf8').encode('gb2312')\r\n city = urllib.parse.quote(city)\r\n url = url + '&city=' + city\r\n\r\n #如果省份部位空\r\n if province != '':\r\n province = bytes(province, encoding = \"utf8\") \r\n province = province.decode('utf8').encode('gb2312')\r\n province = urllib.parse.quote(province)\r\n url = url + '&province=' + province\r\n\r\n #如果有页面拼接\r\n if page >= 1:\r\n url = url + '&offset=3&beginPage=' + str(page)\r\n\r\n\r\n # 最后 加上不明确的参数\r\n url = url+'&n=y&filt=y'\r\n return url\r\n\r\n#生成所有的带页码的链接地址\r\n#返回元祖\r\ndef createPageHref(url,allpage):\r\n pageList = []\r\n allpage = int(allpage)\r\n i = 1\r\n while i <= allpage:\r\n newurl = url + '&offset=3&beginPage=' + str(i)\r\n pageList.append(newurl)\r\n i+=1\r\n return pageList\r\n\r\n#多线程锁机制\r\nclass Num:\r\n def __init__(self):\r\n self.num = 0\r\n self.lock = threading.Lock()\r\n def add(self):\r\n self.lock.acquire()#加锁,锁住相应的资源\r\n self.num += 1\r\n num = self.num\r\n self.lock.release()#解锁,离开该资源\r\n return num\r\n \r\nn = Num()\r\n\r\n#多线程匹配链接\r\nclass myThread (threading.Thread):\r\n def __init__(self,href):\r\n threading.Thread.__init__(self)\r\n self.href = href\r\n def run(self):\r\n href = self.href\r\n #开始多线程访问href\r\n html =\tvisitHref(href)\r\n #引用\r\n htmlpop = BeautifulSoup(html,'html.parser')\r\n #匹配联系人姓名\r\n lianxiren = htmlpop.find_all('a',class_=\"membername\")[0].get_text()\r\n lianxiren = lianxiren.replace('\\n','')\r\n #获取公司名称\r\n companyname = htmlpop.find(class_=\"contact-info\").get_text()\r\n companyname = companyname.replace('\\xa0',u'')\r\n companyname = companyname.replace('\\n','')\r\n #详细信息\r\n miinfo = htmlpop.find(class_=\"contcat-desc\").get_text()\r\n miinfo = miinfo.replace('\\xa0',u'')\r\n #去除多余空格 与换行\r\n miinfo = miinfo.replace('\\n','')\r\n miinfo = miinfo.replace(' ','')\r\n\r\n #拼接\r\n pin = '公司名称:' + companyname + '\\n姓名:' + lianxiren + '\\n详细信息:' + miinfo + '\\n————————————\\n'\r\n pin = pin.replace('查看公司介绍','')\r\n pin = pin.replace('查看信用状况','')\r\n pin = pin.replace('免费电话','')\r\n pin = pin.replace('传真','\\n传真')\r\n pin = pin.replace('移动电话','\\n移动电话')\r\n pin = pin.replace('地址','\\n地址')\r\n pin = pin.replace('邮编','\\n邮编')\r\n pin = pin.replace('公司主页','\\n公司主页')\r\n pin = pin.replace('联系人','\\n联系人')\r\n pin = pin.replace('查看旺铺介绍','\\n查看旺铺介绍')\r\n \r\n whiteTXT(pin,'a.txt')\r\n #nums += 1\r\n return lianxiren\r\n\r\n \r\n#访问超链接类 * 返回HTML\r\ndef visitHref(href):\r\n visit \t\t=\trequests.get(href,headers=header)\r\n visitHTML \t=\tvisit.text\r\n\r\n #检索是否需要登录\r\n hasLogin = re.search('1688/淘宝会员(仅限会员名)请在此登录',visitHTML)\r\n if hasLogin is not None :\r\n exit('登录过期 需要登录') \r\n\r\n return visitHTML\r\n\r\n#通过传入 HTML 得到公司列表页 \r\ndef getCompanyHref(html):\r\n companyLink =\t[]\r\n pop \t =\tBeautifulSoup(html,'html.parser')\r\n\r\n findDiv \t =\tpop.find_all(\"a\", class_=\"list-item-title-text\")\r\n for clinks in findDiv :\r\n nowLink = clinks['href']\r\n searchdy = re.search('tracelog=p4p',nowLink)\r\n if searchdy is not None :\r\n nowLink = nowLink.replace('?tracelog=p4p','')\r\n\r\n nowLink = nowLink + '/page/contactinfo.htm'\r\n companyLink.append(nowLink)\r\n\r\n #如果为空\r\n if companyLink == '':\r\n print('未抓取到公司数据')\r\n \r\n return companyLink\r\n\r\n#写入到文本\r\ndef whiteTXT(content,filename):\r\n f= open(filename,'a')\r\n f.write(content)\r\n f.close\r\n\r\n#得到公司的主营行业\r\ndef getIndustry(html):\r\n return 1\r\n\r\n#生成url\r\nfwurl = createURL(province,city,keywords)\r\n\r\n#开始访问\r\nhtml = requests.get(fwurl,headers=header)\r\nhtml = html.text\r\n\r\n#载入html\r\npop = BeautifulSoup(html,'html.parser')\r\n\r\n#获取共多少页 如果没有\r\nallPagePatten = '共(\\d+)页'\r\nallPageStatus = re.search(allPagePatten,html)\r\nallPage = re.findall(allPagePatten,html)\r\n\r\nif allPageStatus is not None :\r\n allPage = allPage[0]\r\n\r\nelse:\r\n allPage = 1\r\n\r\n#生成所有的页数链接 list\r\nallPageList = createPageHref(fwurl,allPage)\r\n#统计数量\r\nnums = 0\r\n#组合文件名字\r\n#filename = provice + city + keywords + '.txt'\r\n#开始循环查找公司地址页面\r\nfor links in allPageList :\r\n #print(links)\r\n #得到每个页面 得到html\r\n getHTML = visitHref(links)\r\n #print(getHTML)\r\n #得到公司列表页面\r\n getCompanyLink = getCompanyHref(getHTML)\r\n #print(getCompanyLink)\r\n for blink in getCompanyLink:\r\n #搜索是否有detail\r\n detail = re.search('detail',blink)\r\n if detail is not None :\r\n break\r\n print(blink + '以抓取OK\\n')\r\n nums += 1\r\n ak = myThread(blink)\r\n ak.start()\r\n #print(ak)\r\n #exit()\r\n \r\nprint('抓了' + str(nums) + '条数据')\r\n \r\n\r\n\r\n \r\n\r\n","sub_path":"1688capture.py","file_name":"1688capture.py","file_ext":"py","file_size_in_byte":7136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"430870429","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport random\n\ntriX=[0,300,1000,1300,700,0]\ntriY=[1000,0,0,1000,1500,1000]\n\n## co-ordinates of the starting points\nstartX=[500]\nstartY=[500]\n\n## to randomly choose from the shuffled dice array\nind=[0,1,2,3,4]\n## Dice values\ndice=[1,2,3,4,5]\n\n## equivalent to shuffling the dice\ndef get_random():\n\trandom.shuffle(dice)\n\trandom.shuffle(ind)\n\treturn dice[ind[3]]\n\nplt.figure()\n## plotting the triangle\nplt.plot(triX, triY)\n\n## starting the iterations\nfor i in range(10000):\n\t\n\t## rolling the dice\n\ttemp=get_random()\n\t## verbose for cli\n\tprint(i, '-->', temp)\n\t\n\t## getting index of the current point on co-0rdinate system\n\tindex=len(startX)-1\n\ttempX=startX[index]\n\ttempY=startY[index]\n\n\t## updating the current point and storing it inthe array to plot later\n\tif temp == 1:\n\t\tstartX.append((tempX+triX[0])/2)\n\t\tstartY.append((tempY+triY[0])/2)\n\n\tif temp == 2:\n\t\tstartX.append((tempX+triX[2])/2)\n\t\tstartY.append((tempY+triY[2])/2)\n\n\tif temp == 3:\n\t\tstartX.append((tempX+triX[1])/2)\n\t\tstartY.append((tempY+triY[1])/2)\t\n\n\n\tif temp == 4:\n\t\tstartX.append((tempX+triX[3])/2)\n\t\tstartY.append((tempY+triY[3])/2)\t\n\n\tif temp == 5:\n\t\tstartX.append((tempX+triX[4])/2)\n\t\tstartY.append((tempY+triY[4])/2)\t\n\n\n## plotting the graph of all the locations of the point traversed on the co-ordinate plane\nplt.scatter(startX, startY)\nplt.show()\n","sub_path":"pentfrac.py","file_name":"pentfrac.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"7327888","text":"def alphabet_position_versao_1(text):\n print(text)\n alfabeto = list(\"abcdefghijklmnopqrstuvwxyz\")\n result = \"\"\n for word in text:\n if word.lower() in alfabeto:\n result += str(alfabeto.index(word.lower())+1) + \" \"\n return result.strip()\n \n\ndef alphabet_position_versao_2(text):\n alphabet = 'abcdefghijklmnopqrstuvwxyz'\n\n if type(text) == str:\n text = text.lower()\n result = ''\n for letter in text:\n if letter.isalpha() == True:\n result = result + ' ' + str(alphabet.index(letter) + 1)\n return result.lstrip(' ')\n\ndef alphabet_position_original(text):\n alphabet = {\"a\":\"1\",\n \"b\":\"2\",\n \"c\":\"3\",\n \"d\":\"4\",\n \"e\":\"5\",\n \"f\":\"6\",\n \"g\":\"7\",\n \"h\":\"8\",\n \"i\":\"9\",\n \"j\":\"10\",\n \"k\":\"11\",\n \"l\":\"12\",\n \"m\":\"13\",\n \"n\":\"14\",\n \"o\":\"15\",\n \"p\":\"16\",\n \"q\":\"17\",\n \"r\":\"18\",\n \"s\":\"19\",\n \"t\":\"20\",\n \"u\":\"21\",\n \"v\":\"22\",\n \"w\":\"23\",\n \"x\":\"24\",\n \"y\":\"25\",\n \"z\":\"26\"}\n \n new_text = \"\"\n text = text.lower().strip() \n for word in text:\n if word in alphabet:\n new_text+=\" \"+(alphabet.get(word))\n\n return new_text.lstrip()\n \ndef main():\n print(alphabet_position_original(\".The sunset sets at twelve o' clock.\"))\n \nmain()","sub_path":"semana6/alphabet.py","file_name":"alphabet.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"316807086","text":"# 法一:递归法\n# 从头结点开始递归,每次递归都负责交换一对节点,分别用first,second表示\n# 每次递归后,返回second,因为它是交换后的新头\n\n# TC:O(n), n: nodes in linked-list\n# SC:O(n), n: 递归使用的堆栈空间\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n\nclass Solution(object):\n def swapPairs(self, head: ListNode) -> ListNode:\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if not head or not head.next:\n return head\n\n first, second = head.next, head.next.next\n\n first.next = self.swapPairs(second.next)\n second.next = first\n\n return second\n","sub_path":"02-DataStructure/03-Array-Linkedlist/Linkedlist/24-swap-nodes-in-pairs/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"610057434","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re\n\nfrom FangTianXiaSingle.items import OldHouseItem, NewHouseItem\n\n\nclass SoufangSpider(scrapy.Spider):\n name = 'soufang'\n allowed_domains = ['fang.com']\n start_urls = ['https://www.fang.com/SoufunFamily.htm']\n\n def parse(self, response):\n trs = response.xpath(\"//div[@id='c02']//tr\")\n province = None\n for tr in trs:\n tds = tr.xpath(\".//td[not(@class)]\")\n province_td = tds[0].xpath(\".//text()\").get().strip()\n if province_td:\n province = province_td\n city_links = tds[1].xpath(\".//a\")\n # 不需要爬取国外的\n if province == '其它':\n continue\n for city_link in city_links:\n city_name = city_link.xpath(\".//text()\").get()\n city_url = city_link.xpath(\".//@href\").get()\n scheme, domain = city_url.split(\"//\")\n # 北京的新房和二手房链接需要特别处理\n if 'bj.' in domain:\n newHouseLink = 'http://newhouse.fang.com/house/s'\n oldHouseLink = 'http://esf.fang.com/'\n else:\n newHouseLink = scheme + \"//\" + \"newhouse.\" + domain + \"house/s\"\n oldHouseLink = scheme + \"//\" + \"esf.\" + domain\n\n yield scrapy.Request(url=newHouseLink, callback=self.parse_newhouse,\n meta={'info': (province, city_name)})\n\n yield scrapy.Request(url=oldHouseLink, callback=self.parse_oldhouse,\n meta={'info': (province, city_name)})\n\n def parse_newhouse(self, response):\n province, city = response.meta.get(\"info\")\n lis = response.xpath(\".//div[contains(@class,'nl_con')]/ul//li\")\n for li in lis:\n # 房子的名称\n name = li.xpath(\".//div[@class='nlcd_name']//text()\").getall()\n if name:\n name = re.sub(r'[\\s\\n]', '', ''.join(name))\n # 价格\n price = li.xpath(\".//div[@class='nhouse_price']//text()\").getall()\n price = re.sub(r'[\\s\\n广告]', '', ''.join(price))\n # 居式\n rooms = li.xpath(\".//div[contains(@class,'house_type')]//text()\").getall()\n rooms = re.sub('-', '一共', re.sub(r'[\\s\\n]', '', ''.join(rooms)))\n # 地址\n address = li.xpath('.//div[@class=\"address\"]/a/@title').get()\n address = re.sub(r'\\[.+\\]', '', address)\n # 地区\n district = li.xpath(\".//div[@class='address']//text()\").getall()\n try:\n district = re.search(r'(\\[.+\\])', ''.join(district)).group(1)\n except:\n district = ''\n # 是否在售\n is_sale = li.xpath(\".//div[contains(@class,'fangyuan')]/span/text()\").get()\n # 房源的链接\n orgin_url = response.urljoin(li.xpath(\".//div[@class='nlcd_name']/a/@href\").get())\n\n items = NewHouseItem(province=province, city=city, name=name, price=price, rooms=rooms, address=address,\n district=district,\n is_sale=is_sale, orgin_url=orgin_url)\n yield items\n\n next_url = response.xpath(\"//div[@class='page']//a[@class='next']/@href\").get()\n if next_url:\n next_url = response.urljoin(next_url)\n yield scrapy.Request(next_url, callback=self.parse_newhouse, meta={'info': (province, city)})\n\n def parse_oldhouse(self, response):\n province, city = response.meta.get(\"info\")\n lis = response.xpath(\"//div[contains(@class,'shop_list')]//dl[@dataflag='bg']\")\n\n for li in lis:\n try:\n name = li.xpath(\".//p[@class='add_shop']/a/@title\").get()\n address = li.xpath(\".//p[@class='add_shop']//span/text()\").get()\n house_info = li.xpath(\".//p[@class='tel_shop']//text()\").getall()\n house_info = ''.join(house_info).split('|')\n house_info = list(map(lambda x: re.sub(r'[\\r\\n\\s]', '', x), house_info))\n rooms, areas, floor, toward, year, *ags = house_info\n unit_price = li.xpath(\".//dd[@class='price_right']/span[not(@class)]/text()\").get()\n total_price = li.xpath(\".//dd[@class='price_right']/span[@class='red']//text()\").getall()\n total_price = ''.join(total_price)\n item = OldHouseItem(province=province, city=city, name=name, address=address, rooms=rooms, floor=floor,\n toward=toward, year=year, area=areas, unit_price=unit_price,\n total_price=total_price)\n yield item\n except:\n continue\n\n next_url = response.xpath(\"//div[@class='page_al']//span/following-sibling::p//a[text()='下一页']/@href\").get()\n if next_url:\n yield scrapy.Request(response.urljoin(next_url), callback=self.parse_oldhouse,\n meta={'info': (province, city)})\n","sub_path":"FangTianXiaSingle/FangTianXiaSingle/spiders/soufang.py","file_name":"soufang.py","file_ext":"py","file_size_in_byte":5242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"308399813","text":"#!/usr/bin/env python3\n# Copyright (c) 2020 Graphcore Ltd. All rights reserved.\n\nimport torch\nimport poptorch\nfrom torch import nn\n\nimport pytest\n\n# Convolutions.\n\nconvolutions = [\n torch.nn.Conv1d,\n torch.nn.Conv2d,\n torch.nn.Conv3d,\n torch.nn.ConvTranspose1d,\n torch.nn.ConvTranspose2d,\n torch.nn.ConvTranspose3d,\n torch.nn.Unfold,\n torch.nn.Fold,\n]\n\npadding_modes = ['zeros', 'reflect', 'replicate', 'circular']\n\n# Unsupported\nfolds = [] # torch.nn.Unfold, torch.nn.Fold,\n\n# Supported.\nconv_1D = [torch.nn.Conv1d, torch.nn.ConvTranspose1d]\nconv_2D = [torch.nn.Conv2d, torch.nn.ConvTranspose2d]\nconv_3D = [torch.nn.Conv3d, torch.nn.ConvTranspose3d]\n\n\ndef execute_and_check_wrapper(model, input):\n # Run on CPU.\n nativeOut = model(input)\n\n # Run on IPU.\n poptorch_model = poptorch.inferenceModel(model)\n poptorch_out = poptorch_model(input)\n\n torch.testing.assert_allclose(poptorch_out, nativeOut)\n\n\n@pytest.mark.parametrize(\"op\", conv_1D)\n@pytest.mark.parametrize(\"padding_mode\", padding_modes)\ndef test_conv1D(op, padding_mode):\n if op is torch.nn.ConvTranspose1d and padding_mode != 'zeros':\n pytest.skip('skipping unsupported padding_mode')\n torch.manual_seed(42)\n\n input = torch.randn(20, 4, 10)\n\n # With square kernels and equal stride\n model = op(4, 10, 3, stride=2, padding_mode=padding_mode)\n execute_and_check_wrapper(model, input)\n\n # Grouped convolutions.\n model = op(4, 8, 3, stride=2, groups=2, padding_mode=padding_mode)\n execute_and_check_wrapper(model, input)\n\n if op is not torch.nn.ConvTranspose1d:\n # # non-square kernels and unequal stride and with padding and dilation\n model = op(4,\n 33, (3),\n stride=(2),\n padding=(4),\n dilation=(3),\n padding_mode=padding_mode)\n execute_and_check_wrapper(model, input)\n\n\n@pytest.mark.parametrize(\"op\", conv_2D)\n@pytest.mark.parametrize(\"padding_mode\", padding_modes)\ndef test_conv2D(op, padding_mode):\n if op is torch.nn.ConvTranspose2d and padding_mode != 'zeros':\n pytest.skip('skipping unsupported padding_mode')\n torch.manual_seed(42)\n\n input = torch.randn(20, 16, 50, 10)\n\n # With square kernels and equal stride\n model = op(16, 4, 3, stride=2, padding_mode=padding_mode)\n execute_and_check_wrapper(model, input)\n\n # Grouped convolutions.\n model = op(16, 4, (3, 5), stride=2, groups=2, padding_mode=padding_mode)\n execute_and_check_wrapper(model, input)\n\n # Rectangular padding/stride\n if op is not torch.nn.ConvTranspose2d:\n # non-square kernels and unequal stride and with padding\n model = op(16, 4, (3, 5), stride=(2, 1), padding=(4, 2))\n execute_and_check_wrapper(model, input)\n\n # non-square kernels and unequal stride and with padding and dilation\n model = op(16,\n 4, (3, 5),\n stride=(2, 1),\n padding=(4, 2),\n dilation=(3),\n padding_mode=padding_mode)\n execute_and_check_wrapper(model, input)\n\n\n@pytest.mark.parametrize(\"op\", conv_3D)\n@pytest.mark.parametrize(\"padding_mode\", padding_modes)\ndef test_conv3D(op, padding_mode):\n if (op is torch.nn.ConvTranspose3d and padding_mode != 'zeros') or \\\n (op is torch.nn.Conv3d and padding_mode == 'reflect'):\n pytest.skip('skipping unsupported padding_mode')\n\n torch.manual_seed(42)\n input = torch.randn(2, 4, 3, 5, 10)\n\n # With square kernels and equal stride\n model = op(4, 6, 3, stride=2, padding_mode=padding_mode)\n execute_and_check_wrapper(model, input)\n\n # Grouped convolutions.\n model = op(4, 6, 3, stride=2, groups=2, padding_mode=padding_mode)\n execute_and_check_wrapper(model, input)\n\n if op is not torch.nn.ConvTranspose3d:\n # non-square kernels and unequal stride and with padding\n model = op(4,\n 6, (3, 2, 2),\n stride=(2, 1, 1),\n padding=(4, 2, 0),\n padding_mode=padding_mode)\n execute_and_check_wrapper(model, input)\n\n # non-square kernels and unequal stride and with padding and dilation\n model = op(4,\n 6, (3, 4, 2),\n stride=(2, 1, 1),\n padding=(4, 2, 0),\n dilation=(3, 1, 1))\n\n execute_and_check_wrapper(model, input)\n\n\ndef test_available_memory():\n torch.manual_seed(42)\n input = torch.randn(2, 4, 3, 10)\n\n class BasicNetwork(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = nn.Conv2d(4, 4, 3, stride=2)\n\n def forward(self, x):\n out = self.conv(x)\n out = poptorch.set_available_memory(out, 0.6)\n return out\n\n # Just check we don't explode when the value is set.\n model = BasicNetwork()\n execute_and_check_wrapper(model, input)\n\n\n@pytest.mark.parametrize(\"mode\", poptorch.MatMulSerializationMode)\ndef test_matmul_serialization(mode):\n torch.manual_seed(42)\n\n input_channels = 6\n reducing_dim = 2\n output_channels = 4\n lhs = torch.randn(input_channels, reducing_dim)\n rhs = torch.randn(reducing_dim, output_channels)\n if mode == poptorch.MatMulSerializationMode.Disabled:\n factor = 0\n elif mode == poptorch.MatMulSerializationMode.InputChannels:\n factor = 2\n elif mode == poptorch.MatMulSerializationMode.ReducingDim:\n factor = 2\n elif mode == poptorch.MatMulSerializationMode.OutputChannels:\n factor = 4\n else:\n assert False, \"Invalid mode\"\n\n class BasicNetwork(nn.Module):\n def forward(self, x, y):\n out = poptorch.serializedMatMul(x,\n y,\n mode,\n factor,\n keep_precision=True)\n return out\n\n # Just check we don't explode when the value is set.\n model = BasicNetwork()\n nativeOut = model(lhs, rhs)\n poptorch_model = poptorch.inferenceModel(model)\n poptorch_out = poptorch_model(lhs, rhs)\n\n torch.testing.assert_allclose(poptorch_out, nativeOut)\n\n\ndef test_available_memory_automatic():\n torch.manual_seed(42)\n\n # Just check we don't explode when the value is set.\n class Network(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.layer1 = nn.Sequential(nn.Conv2d(1, 10, 5), nn.MaxPool2d(2),\n nn.ReLU())\n self.layer2 = nn.Sequential(nn.Conv2d(10, 20, 5), nn.MaxPool2d(2),\n nn.ReLU())\n self.layer3 = nn.Linear(320, 256)\n self.layer3_act = nn.ReLU()\n self.layer4 = nn.Linear(256, 10)\n\n self.softmax = nn.LogSoftmax(1)\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = x.view(-1, 320)\n\n x = self.layer3_act(self.layer3(x))\n x = self.layer4(x)\n x = self.softmax(x)\n return x\n\n model = Network()\n # Run on CPU.\n input = torch.randn(2, 1, 28, 28)\n nativeOut = model(input)\n\n # Run on IPU.\n opts = poptorch.Options()\n opts.setAvailableMemoryProportion(available_memory_proportion={\n \"IPU0\": 0.7,\n \"IPU1\": 0.2\n })\n\n poptorch_model = poptorch.inferenceModel(model, opts)\n poptorch_out = poptorch_model(input)\n\n torch.testing.assert_allclose(poptorch_out, nativeOut)\n\n\n@pytest.mark.parametrize(\"dim\", range(-3, 3))\ndef test_cumsum(dim):\n class Model(torch.nn.Module):\n def forward(self, x):\n return torch.cumsum(x, dim=dim)\n\n model = Model()\n torch.manual_seed(0)\n input = torch.randn(1, 5, 6, dtype=torch.float32)\n\n execute_and_check_wrapper(model, input)\n","sub_path":"tests/convs_test.py","file_name":"convs_test.py","file_ext":"py","file_size_in_byte":7926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"34265988","text":"from unittest import TestCase\nfrom py_cloud_backup.service_dropbox import ServiceDropbox\nfrom credentials import credentials as c\nfrom os import remove\nfrom dropbox.exceptions import (\n ApiError,\n)\n\nfrom py_cloud_backup.data import (\n File,\n Folder\n)\n\nTEST_FOLDER = \"/py_cloud_backup_test\"\n\n\nclass TestServiceDropbox(TestCase):\n\n def setUp(self):\n self._dbx = ServiceDropbox(c[\"dropbox\"][\"token\"])\n\n def file_exists(self, path):\n try:\n self._dbx._dbx.files_get_metadata(path)\n return True\n except ApiError as e:\n if e.error.get_path().is_not_found():\n return False\n else:\n raise\n\n def test_delete(self):\n delete_file = TEST_FOLDER + \"delete_test.txt\"\n self._dbx._dbx.files_upload(b'', delete_file) # create file to delete\n self.assertTrue(self.file_exists(delete_file), \"TESTFILE DOESN'T EXIST! DELETE TEST NOT POSSIBLE.\")\n self._dbx.delete(delete_file)\n self.assertFalse(self.file_exists(delete_file))\n\n def test_exists(self):\n exist_path = TEST_FOLDER+\"/exist_test.txt\"\n no_exist_path = TEST_FOLDER+\"/doesnt.txt\"\n self.assertEqual(self.file_exists(exist_path), self._dbx.exists(exist_path))\n self.assertEqual(self.file_exists(no_exist_path), self._dbx.exists(no_exist_path))\n\n def test_dirs(self):\n data = self._dbx.dirs(TEST_FOLDER+\"/dirs_test/\")\n # should be equal with data in test folder\n self.assertEqual(sum(isinstance(x, File) for x in data), 17)\n self.assertEqual(sum(isinstance(x, Folder) for x in data), 2)\n\n def test_chunk(self):\n file, content = self._dbx.chunk(TEST_FOLDER, \"chunk_test.txt\", 20, 456)\n self.assertEqual(\"chunk_test.txt\", file.get_name())\n self.assertEqual(b'r f\\xfcr Tabelle `analy', content) # test string which is in file\n\n def test_open_file(self):\n testfile_name = \"creation_test.txt\"\n testfile_path = TEST_FOLDER+\"/\"+testfile_name\n test_payload = b'ABCDEFGHIJKLMNOP'\n with self._dbx.file(testfile_path) as f:\n f.write(test_payload)\n # file was successfully created?\n self.assertTrue(self.file_exists(testfile_path))\n self._dbx._dbx.files_download_to_file(testfile_name, testfile_path)\n with open(testfile_name, \"rb\") as f:\n # matching content?\n self.assertEqual(test_payload, f.read())\n # ok, delete it!\n self._dbx.delete(testfile_path)\n remove(testfile_name)\n\n def test_create_dir(self):\n testfolder = TEST_FOLDER+\"/testfolder\"\n self._dbx.create_dir(testfolder)\n self.assertTrue(self.file_exists(testfolder))\n self._dbx._dbx.files_delete_v2(testfolder)\n","sub_path":"test/test_service_dropbox.py","file_name":"test_service_dropbox.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"89962573","text":"from flask import Flask\nfrom flask_cors import CORS\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_redis import Redis\nfrom adapter.adapter import CephAdapter\nfrom flask_iniconfig import INIConfig\nfrom sqlalchemy_utils.functions import database_exists\n\ndb = SQLAlchemy()\nredis = Redis()\nceph = CephAdapter()\n\nDEFAULT_CONFIG_FILENAME = \"config.ini\"\nDEFAULT_KEY_FILENAME = \"key.json\"\nDEFAULT_CONFIG_DIR = \"config\"\n\ndef create_app(**config_overrides):\n app = Flask(__name__)\n INIConfig(app)\n CORS(app, supports_credentials=True)\n\n config_dir = DEFAULT_CONFIG_DIR\n\n if \"CONFIG_DIR\" in config_overrides:\n config_dir = config_overrides[\"CONFIG_DIR\"]\n\n config_path = config_dir + \"/\" + DEFAULT_CONFIG_FILENAME\n key_path = config_dir + \"/\" + DEFAULT_KEY_FILENAME\n\n app.config.from_inifile(config_path, objectify=True)\n\n app.config.update(app.config.AmoStorageConfig)\n app.config.update(app.config.CephConfig)\n\n db.init_app(app)\n redis.init_app(app)\n\n if not database_exists(app.config[\"SQLALCHEMY_DATABASE_URI\"]):\n with app.app_context():\n from models.metadata import MetaData\n from models.ownership import Ownership\n db.create_all()\n\n ceph.connect(\n host=app.config[\"HOST\"],\n port=app.config[\"PORT\"],\n keyfile_path=key_path,\n default_bucket_name=app.config[\"BUCKET_NAME\"]\n )\n\n from auth.views import auth_app\n from parcels.views import parcels_app\n\n app.register_blueprint(auth_app)\n app.register_blueprint(parcels_app)\n\n return app\n","sub_path":"amo_storage.py","file_name":"amo_storage.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"462801576","text":"from util.notify_changes import get_crossed_thresholds, values_crossed, checks\nimport unittest\n\nclass NotifyChangesTest(unittest.TestCase):\n\t\"\"\"Tests the notify_changes module for correctness.\"\"\"\n\n\tdef test_values_crossed(self):\n\t\t\"\"\"Ensures that the values_crossed method gives the expected output.\"\"\"\n\n\t\told = {\n\t\t\t'id': 0,\n\t\t\t'age': 'old',\n\t\t\t'values': [1,2,3]\n\t\t}\n\n\t\tnew = {\n\t\t\t'id': 0,\n\t\t\t'age': 'young',\n\t\t\t'values': [4,5,6]\n\t\t}\n\n\t\tthreshold = {\n\t\t\t'id': (checks['is_not'], 0),\n\t\t\t'age': (checks['is'], 'old'),\n\t\t\t'values': (checks['is_above'], 5)\n\t\t}\n\n\t\texpected = {\n\t\t\t'id': False,\n\t\t\t'age': False,\n\t\t\t'values': True\n\t\t}\n\n\t\tfor key, value in list(old.items()):\n\t\t\tself.assertEqual(expected[key], values_crossed(old, new, key, threshold[key][1], threshold[key][0]),\n\t\t\t\t\"The new value of `{0}` wasn't handled correctly.\".format(key))\n\n\tdef test_get_crossed_thresholds(self):\n\t\t\"\"\"Ensures that the get_crossed_thresholds method gives the expected output.\"\"\"\n\n\t\told = {\n\t\t\t'id': 0,\n\t\t\t'ages': ('old', 'young', 'middle-aged'),\n\t\t\t'value': 25\n\t\t}\n\n\t\tnew = {\n\t\t\t'id': 0,\n\t\t\t'ages': ('young', 'young', 'young'),\n\t\t\t'value': 75\n\t\t}\n\n\t\trules = {\n\t\t\t'is_above': {\n\t\t\t\t'value': 50\n\t\t\t},\n\t\t\t'is_not': {\n\t\t\t\t'id': 0\n\t\t\t},\n\t\t\t'is': {\n\t\t\t\t'age': 'old'\n\t\t\t}\n\t\t}\n\n\t\texpected = ['value went above 50.']\n\n\t\tself.assertEqual(expected, get_crossed_thresholds(old, new, rules))\n","sub_path":"tests/test_notify_changes.py","file_name":"test_notify_changes.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"14286952","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 18 19:37:45 2020\n\n@author: kisch\n\"\"\"\n\n\nimport unittest\n\nfrom triangle_maximum import TriangleMaximum as TM\n\n\nclass TestTriangleMaximum(unittest.TestCase):\n \n def setUp(self):\n self.tm = TM()\n self.tm.import_triangle(\"test_triangle.txt\")\n\n \n def test_import_triangle_data(self):\n ex = [[3],\n [7,4],\n [2,4,6],\n [8,5,9,3]]\n \n\n self.assertEqual(ex, self.tm.__tr_data__)\n \n def test_find_triagnle_maximum(self): \n ex = 23\n self.assertEqual(ex, self.tm.find_triangle_maximum(3))\n \n def test_find_triagnle_maximum_p18(self): \n self.tm.import_triangle(\"the_triangle_18.txt\")\n ex = 1074\n self.assertEqual(ex, self.tm.find_triangle_maximum(3))\n \n def test_get_all_sums(self):\n ex = [20, 17, 19, 23, 16, 20, 22, 16]\n sums = []\n self.tm.__add_poss_sums__(sums,0,0,4)\n self.assertEqual(ex, sums)\n \n \n \n\n\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=0)\n","sub_path":"p67-tests.py","file_name":"p67-tests.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"265809253","text":"\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import LinearSVC\n\n# load glass data set\nheart = pd.read_csv('heart2.csv')\nx = heart[['sex','slope','ca','thal']]\ny = heart['target']\n\n# Use cross validation to create training and testing part\nX_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=0)\nprint(X_train.shape, y_train.shape)\nprint(X_test.shape, y_test.shape)\n\n# Implement linear SVM method using scikit library\nsvm = LinearSVC(random_state=0, tol=1e-5)\nsvm.fit(X_train, y_train)\nprint('Accuracy of SVM classifier on training set: {:.2f}'.format(svm.score(X_train, y_train)))\n# test data set acc\nprint('Accuracy of SVM classifier on test set: {:.2f}'.format(svm.score(X_test, y_test)))\n","sub_path":"Lab1/Source/Question5/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"386179341","text":"# -*- coding: utf-8 -*-\n# author:lyh\n# datetime:2020/6/4 20:08\n\"\"\"\n238. 除自身以外数组的乘积\n\n给你一个长度为 n 的整数数组 nums,其中 n > 1,返回输出数组 output ,其中 output[i] 等于 nums 中除 nums[i] 之外其余各元素的乘积。\n\n\n\n示例:\n\n输入: [1,2,3,4]\n输出: [24,12,8,6]\n\n\n\n提示:题目数据保证数组之中任意元素的全部前缀元素和后缀(甚至是整个数组)的乘积都在 32 位整数范围内。\n\n说明: 请不要使用除法,且在 O(n) 时间复杂度内完成此题。\n\n进阶:\n你可以在常数空间复杂度内完成这个题目吗?( 出于对空间复杂度分析的目的,输出数组不被视为额外空间。)\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def productExceptSelf(self, nums: List[int]) -> List[int]:\n res = [1]\n for i in range(1, len(nums)):\n res.append(res[-1] * nums[i - 1])\n r = 1\n for i in range(len(nums) - 2, -1, -1):\n r *= nums[i + 1]\n res[i] *= r\n return res\n\n\nif __name__ == '__main__':\n print(\n Solution().productExceptSelf([1, 2, 3, 4]),\n )\n","sub_path":"Solutions/0238.productExceptSelf.py","file_name":"0238.productExceptSelf.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"355573216","text":"import math\nimport Similarity as similarity\nimport Similarity\n\ndef Datasets(db, entrada):\n list_datasets = []\n cursor = db.cursor()\n sql = \"SELECT DISTINCT a.id_dataset FROM `Dataset` AS a INNER JOIN `Entrada` AS b INNER JOIN `Features'` c ON a.nome_dataset = c.nome_dataset AND a.qE >= b.Entites AND a.qC >= b.classpartition AND a.qP >= b.propretypartition AND a.qLS >= b.Linkset AND a.qType >= b.type AND b.id_entrada = \" + str(entrada) + \"\"\n try:\n cursor.execute(sql)\n results = cursor.fetchall()\n for row in results:\n list_datasets.append(str(row[0]))\n except:\n print (\"Error fetching Datasets\")\n \n cursor.close()\n return list_datasets\n\n\ndef CheckLS(entrada, db):\n cursor = db.cursor()\n sql = \"SELECT LinkSet FROM `Entrada` WHERE id_entrada = '\"+str(entrada)+\"' \" \n try:\n cursor.execute(sql)\n results = cursor.fetchall()\n for row in results:\n num_linkset = row[0]\n \n except:\n print (\"Error Checking LinkSet\")\n \n cursor.close()\n return num_linkset\n\ndef CheckCLASS(entrada, db):\n cursor = db.cursor()\n sql = \"SELECT classpartition FROM `Entrada` WHERE id_entrada = '\"+str(entrada)+\"' \" \n try:\n cursor.execute(sql)\n results = cursor.fetchall()\n for row in results:\n classpartiton = row[0]\n \n except:\n print (\"Error Cheking Classpartition\")\n \n cursor.close()\n return classpartiton\n\ndef CheckPROPRETY(entrada, db):\n cursor = db.cursor()\n sql = \"SELECT propretypartition FROM `Entrada` WHERE id_entrada = '\"+str(entrada)+\"' \" \n try:\n cursor.execute(sql)\n results = cursor.fetchall()\n for row in results:\n propretypartition = row[0]\n \n except:\n print (\"Error Cheking PropretyPartition\")\n \n cursor.close()\n return propretypartition\n\ndef CheckCATEGORIES(entrada, db):\n cursor = db.cursor()\n sql = \"SELECT Entites FROM `Entrada` WHERE id_entrada = '\"+str(entrada)+\"' \" \n try:\n cursor.execute(sql)\n results = cursor.fetchall()\n for row in results:\n categories = row[0]\n \n except:\n print (\"Error Cheking Categories\")\n \n cursor.close()\n return categories\n\n\ndef GetNameDataset(id, db):\n cursor = db.cursor()\n sql = \"SELECT nome_dataset FROM Dataset WHERE id_dataset = '\"+str(id)+\"'\"\n try:\n cursor.execute(sql)\n result = cursor.fetchall()\n for row in result:\n name_dataset = row[0]\n except:\n print (\"Error fetching the Dataset name\")\n \n cursor.close()\n return name_dataset\n\ndef GetGroup(name_dataset, db):\n cursor = db.cursor()\n sql = \"SELECT group_dataset FROM Groups WHERE nome_dataset = '\"+str(name_dataset)+\"'\"\n try:\n cursor.execute(sql)\n result = cursor.fetchall()\n for row in result:\n group = row[0]\n except:\n \"Error while selecting group\"\n \n cursor.close()\n return group\n\ndef AmountLS(name_dataset, db):\n cursor = db.cursor()\n sql = \"SELECT qLS FROM Dataset WHERE nome_dataset = '\"+str(name_dataset)+\"'\"\n try:\n cursor.execute(sql)\n result = cursor.fetchall()\n for row in result:\n qLS = row[0]\n except:\n \"Error fetching LS amount from a dataset\"\n \n cursor.close()\n return qLS\n\ndef AmountCL(name_dataset, db):\n cursor = db.cursor()\n sql = \"SELECT qC FROM Dataset WHERE nome_dataset = '\"+str(name_dataset)+\"'\"\n try:\n cursor.execute(sql)\n result = cursor.fetchall()\n for row in result:\n qC = row[0]\n except:\n \"Error fetching QC amount from a dataset\"\n \n cursor.close()\n return qC\n\ndef AmountqP(name_dataset, db):\n cursor = db.cursor()\n sql = \"SELECT qP FROM Dataset WHERE nome_dataset = '\"+str(name_dataset)+\"'\"\n try:\n cursor.execute(sql)\n result = cursor.fetchall()\n for row in result:\n qP = row[0]\n except:\n \"Error fetching qP amount from a dataset\"\n \n cursor.close()\n return qP\n\ndef AmountqType(name_dataset, db):\n cursor = db.cursor()\n sql = \"SELECT qType FROM Dataset WHERE nome_dataset = '\"+str(name_dataset)+\"'\"\n try:\n cursor.execute(sql)\n result = cursor.fetchall()\n for row in result:\n qType = row[0]\n except:\n \"Error fetching qType amount from a dataset\"\n \n cursor.close()\n return qType\n\ndef GetLinkSet(name_dataset, db):\n void = \"void\"\n rkbexplorer = \"rkbexplorer\"\n id = \"id\"\n aux = \"http://www.johngoodwin.me.uk/family/\"\n list_ls = []\n cursor = db.cursor()\n sql = \"SELECT features FROM Features WHERE nome_dataset = '\"+str(name_dataset)+\"' AND tipo_feature = 'Linkset'\"\n try:\n cursor.execute(sql)\n result = cursor.fetchall()\n for row in result:\n nome_linkset = row[0]\n \n if(nome_linkset.find(aux) == -1):\n if(nome_linkset.find(id) == -1) or (nome_linkset.find(void) == -1): \n list_ls.append(nome_linkset)\n \n except:\n print (\"Erro ao pegar o LinkSet do dataset: \" + str(name_dataset))\n \n cursor.close()\n return list_ls\n\ndef GetClass(name_dataset, db):\n list_class = []\n cursor = db.cursor()\n sql = \"SELECT features FROM Features WHERE nome_dataset = '\"+str(name_dataset)+\"' AND tipo_feature = 'Class'\"\n try:\n cursor.execute(sql)\n result = cursor.fetchall()\n for row in result:\n nome_class = row[0]\n list_class.append(nome_class)\n \n except:\n print (\"Erro ao pegar o class do dataset: \" + str(name_dataset))\n \n cursor.close()\n return list_class\n\ndef GetProprety(name_dataset, db):\n list_proprety = []\n cursor = db.cursor()\n sql = \"SELECT features FROM Features WHERE nome_dataset = '\"+str(name_dataset)+\"' AND tipo_feature = 'Proprety'\"\n try:\n cursor.execute(sql)\n result = cursor.fetchall()\n for row in result:\n nome_proprety = row[0]\n list_proprety.append(nome_proprety)\n \n except:\n print (\"Erro ao pegar o Proprety do dataset: \" + str(name_dataset))\n \n cursor.close()\n return list_proprety\n\ndef GetTypes(name_dataset, db):\n list_types = []\n cursor = db.cursor()\n sql = \"SELECT features FROM Features WHERE nome_dataset = '\"+str(name_dataset)+\"' AND tipo_feature = 'Types'\"\n try:\n cursor.execute(sql)\n result = cursor.fetchall()\n for row in result:\n nome_types = row[0]\n list_types.append(nome_types)\n \n except:\n print (\"Erro ao pegar o Types do dataset: \" + str(name_dataset))\n \n cursor.close()\n return list_types\n\n\ndef LSTest(linkset, list_teste, db):\n for i in list_teste:\n #name_dataset = GetNameDataset(i, db)\n if(linkset == i):\n return 1\n \n return 0\n\ndef create_features_ls(list_treinamento, db):\n all_ls = []\n cursor = db.cursor()\n sql = \"SELECT DISTINCT features FROM `Features` WHERE tipo_feature = 'Linkset'\"\n try:\n cursor.execute(sql)\n result = cursor.fetchall()\n \n for row in result:\n name_linkset = row[0]\n if(name_linkset in list_treinamento):\n all_ls.append(name_linkset)\n \n \n except:\n print (\"Error fetching all LS\")\n \n print(len(all_ls))\n return all_ls\n #for i in list_treinamento:\n # if(i in all_ls):\n # features.append(i)\n \n #for i in all_ls:\n # if(i in list_treinamento):\n # features.append(i)\n \n \n #return features\n\ndef create_features_types(list_parte1, list_treinamento, db):\n list_types = []\n cursor = db.cursor()\n sql = \"SELECT features, name_dataset FROM `Features'` WHERE tipo_feature = 'Types'\"\n try:\n cursor.execute(sql)\n result = cursor.fetchall()\n for row in result:\n name_type = row[0]\n if(row[1] in list_treinamento):\n list_types.append(name_type)\n \n except:\n print (\"Error fetching all Types\")\n \n cursor.close()\n return list_types\n \n \n \n \ndef Vector_Tranning_Features(list_parte1, list_treinamento, num_linkset, num_class, num_proprety, num_types, db):\n list_ls_features = []\n list_types_featues = []\n \n if(num_linkset > 0):\n list_ls_features = create_features_ls(list_treinamento, db)\n \n if(num_types > 0):\n list_types_featues = create_features_types(list_parte1, list_treinamento, db)\n \n #fazer com class, proprety, categorias\n final_features = list_ls_features + list_types_featues\n return final_features\n\ndef tf(name_dataset, feature, db):\n tf = 0\n cursor = db.cursor()\n sql = \"SELECT `frequencia`, `dataset_size`, `features` FROM `Features'` WHERE nome_dataset = '\"+str(name_dataset)+\"' AND features = '\"+str(feature)+\"'\"\n try:\n cursor.execute(sql)\n results = cursor.fetchall()\n linhas = cursor.rowcount\n #if(linhas == 1):\n for row in results:\n frequencia = row[0]\n dataset_size = row[1]\n tf = frequencia / float(dataset_size)\n \n #else:\n # tf = 0\n except:\n print (\"Error fetching tf from dataset\" + str(name_dataset))\n \n cursor.close()\n return tf\n \ndef calucloidf_tranning(feature, datasets_tranning, datasets_test, db):\n cursor = db.cursor()\n cont = 0\n sql = \"SELECT nome_dataset FROM `Features` WHERE features = '\"+str(feature)+\"' \"\n try:\n cursor.execute(sql)\n results = cursor.fetchall()\n for row in results:\n if(row[0] not in datasets_test):\n cont = cont + 1\n \n #idf = math.log(datasets_tranning/float(cont))\n idf = (datasets_tranning/float(cont))\n \n except:\n print (\"Error compiling training idf\")\n \n cursor.close()\n return idf\n \n \n \n\ndef create_tfidf_tranning(datasets_tranning, size_vector, vector_features, datasets_test, num_linkset, num_class, num_proprety, num_types, db):\n size_tranning = len(datasets_tranning)\n list_return = []\n for i in datasets_tranning:\n dict_return = {}\n if(num_linkset > 0):\n ls = GetLinkSet(i, db)\n \n #fazer com class, proprety, categorias\n list_final = ls\n vetor_dataset = []\n for j in range(len(vector_features)): \n vetor_dataset.append(0)\n \n for k in list_final:\n if(k in vector_features):\n posicao = vector_features.index(k)\n del vetor_dataset[posicao]\n valor_tf = tf(i, k, db)\n valor_idf = calucloidf_tranning(k, size_tranning, datasets_test, db)\n tf_idf = (valor_tf * valor_idf)\n #print (tf_idf)\n vetor_dataset.insert(posicao, tf_idf)\n \n dict_return[i] = vetor_dataset\n list_return.append(dict_return)\n \n \n return list_return\n \ndef calucloidf_teste(i, total_datasets, db):\n cursor = db.cursor()\n sql = \"SELECT count(nome_dataset) as numero FROM `Features` WHERE features = '\"+str(i)+\"' \"\n try:\n cursor.execute(sql)\n results = cursor.fetchall()\n for row in results:\n quantidade_dataset = row[0]\n \n #idf = math.log(total/float(quantidade_dataset))\n idf = (total_datasets/float(quantidade_dataset))\n \n except:\n print (\"Erro ao calcular idf do teste\")\n \n cursor.close()\n return idf\n \ndef vetores_teste(nome_dataset, k, size_vector, vector_features, total_datasets, db):\n dict_return = {}\n vetor_dataset = []\n for j in range(size_vector):\n vetor_dataset.append(0)\n \n for i in k:\n if(i in vector_features):\n posicao = vector_features.index(i)\n del vetor_dataset[posicao]\n valor_tf = tf(nome_dataset, i, db)\n valor_idf = calucloidf_teste(i, total_datasets, db)\n #if(valor_tf == 0):\n # tf_idf = 0\n # vetor_dataset.insert(posicao, tf_idf)\n #else:\n \n tf_idf = float(valor_tf * valor_idf)\n vetor_dataset.insert(posicao, tf_idf)\n \n return vetor_dataset \n \n \ndef similary(vetores_teste, vetores_treinamento):\n dict = {}\n for i in vetores_treinamento:\n for nome_treinamento, vetor in i.items():\n result = similarity.cosseno(vetor, vetores_teste)\n dict[nome_treinamento] = result\n \n return dict\n\ndef LSMAP(name_dataset, verificaLS, db):\n result = 0\n cursor = db.cursor()\n sql = \"SELECT features FROM `Features'` WHERE nome_dataset = '\"+str(name_dataset)+\"' AND features = '\"+str(verificaLS)+\"' AND tipo_feature = 'Linkset'\"\n try:\n cursor.execute(sql)\n result = cursor.rowcount\n except:\n \"Erro\"\n \n cursor.close()\n return result\n\ndef Prob_ls(feature, list_tranning, db):\n sample_space = len(list_tranning)\n event = 0\n cursor = db.cursor()\n sql = \"SELECT nome_dataset FROM Features WHERE features = '\"+str(feature)+\"' AND tipo_feature = 'Linkset'\"\n try:\n cursor.execute(sql)\n result = cursor.fetchall()\n for row in result:\n if(row[0] in list_tranning):\n event = event + 1\n \n except:\n print (\"Error calculating probability\") \n \n cursor.close()\n prob = float(event)/float(sample_space)\n return prob\n\ndef getFeaturesST(names_treinamento, set_list ,db):\n list_retorno = []\n cursor = db.cursor()\n for k in set_list:\n sql = \"SELECT nome_dataset FROM Features WHERE features = '\"+str(k)+\"'\"\n try:\n cursor.execute(sql)\n result = cursor.fetchall()\n for row in result:\n if(row[0] in names_treinamento):\n if(row[0] not in list_retorno):\n list_retorno.append(row[0])\n except:\n print (\"Error\")\n \n cursor.close()\n return list_retorno \n \n \n \n \ndef ra(list_features, dict_popularity):\n sum_ = 0\n somatorio = 0\n for k in list_features:\n if(k in dict_popularity):\n get_popularity = dict_popularity[k]\n else:\n get_popularity = 0\n \n if(get_popularity == 0):\n result = 0\n else:\n result = 1/float(get_popularity)\n \n somatorio = result + sum_\n sum_ = somatorio\n \n return somatorio\n \ndef DatasetsObjetcTarget(list_datasets_features, k, db):\n list_retorno = []\n for i in list_datasets_features:\n list_ls = GetLinkSet(i, db)\n if(k in list_ls):\n list_retorno.append(i)\n \n return list_retorno\n\n\ndef similaritySN(set_list, names_treinamento, dict_popularity, num_linkset, num_class, num_proprety, num_types, db):\n list_datasets_features = getFeaturesST(names_treinamento, set_list, db)\n d = len(names_treinamento)\n dict_similarity = {}\n for k in names_treinamento:\n if(k in dict_popularity):\n get_pa = dict_popularity[k]\n else:\n get_pa = 0\n \n if(num_linkset > 0):\n list_ls = DatasetsObjetcTarget(list_datasets_features,k, db)\n #fazer pra class, entidades e propriedades\n \n list_features = list_ls\n get_ra = ra(list_features, dict_popularity)\n \n result_similarity = similarity.SN(get_pa, get_ra,d)\n dict_similarity[k] = result_similarity\n \n return dict_similarity\n \n \n \n \n \n \n\ndef similarityTI(set_list, names_treinamento, dict_prob, num_linkset, num_class, num_proprety, num_types, db):\n dict_similarity = {}\n list_features = []\n list_ls = []\n list_class = []\n list_proprety = []\n list_types = []\n sum = 0\n for k in set_list:\n get_prob = dict_prob[k]\n if(get_prob == 0):\n log_prob = 0\n else:\n log_prob = math.log(get_prob, 2)\n \n union_test = log_prob + sum\n sum = union_test\n \n sum_ = 0\n for k in names_treinamento:\n multi= 0\n inter = 0\n if(num_linkset > 0):\n list_ls = GetLinkSet(k, db)\n \n if(num_class > 0):\n list_class = []\n \n if(num_proprety > 0):\n list_proprety = []\n \n if(num_types > 0):\n list_types = []\n \n list_features = list_ls + list_class + list_proprety + list_types\n for i in set_list:\n if(i in list_features):\n get_prob = dict_prob[i]\n if(get_prob == 0):\n log_prob = 0\n else:\n log_prob = math.log(get_prob, 2)\n inter = log_prob + multi\n multi = inter\n \n for i in list_features:\n get_prob = dict_prob[i]\n if(get_prob == 0):\n log_prob = 0\n else:\n log_prob = math.log(get_prob, 2)\n \n union_tranning = log_prob + sum_\n sum_ = union_tranning\n \n union = union_tranning + union_test\n result = similarity.TI(inter , union)\n dict_similarity[k] = result\n \n return dict_similarity\n \n \n \ndef create_tfidf_tranning_binario(datasets_tranning, size_vector, vector_features, datasets_test, num_linkset, num_types, db):\n size_tranning = len(datasets_tranning)\n dict_return = {}\n\n\n \n for i in datasets_tranning:\n vetor_dataset = []\n list_ls = []\n list_types = []\n list_features = []\n \n \n if(num_linkset > 0):\n list_ls = GetLinkSet(i, db)\n \n if(num_types > 0):\n list_types = GetTypes(i, db)\n \n list_features = list_ls + list_types\n \n \n for j in range(size_tranning): \n vetor_dataset.append(0)\n \n if(len(list_features) != 0):\n for k in list_features:\n if(k in vector_features):\n posicao = vector_features.index(k)\n del vetor_dataset[posicao]\n valor_tf = tf(i, k, db)\n valor_idf = calucloidf_tranning(k, size_tranning, datasets_test, db)\n tf_idf = valor_tf * valor_idf\n vetor_dataset.insert(posicao, tf_idf) \n \n dict_return[i] = vetor_dataset\n \n return dict_return\n \ndef LastRelevant(dataset_test, ordern, db):\n total_rank = 0\n posicao = 1\n for dataset in ordern:\n total_rank = total_rank + 1\n relevante = LSMAP(dataset_test, dataset[0], db)\n if(relevante == 1):\n posicao = total_rank\n \n \n return posicao/float(total_rank)\n \ndef somar(valores):\n soma = 0\n for v in valores:\n soma += v\n \n return soma\n\ndef media(valores):\n soma = somar(valores)\n qtd_elementos = len(valores)\n media = soma / float(qtd_elementos)\n return media\n\ndef variancia(valores):\n _media = media(valores)\n print(_media)\n soma = 0\n _variancia = 0\n for valor in valores:\n soma += math.pow( (valor - _media), 2)\n \n _variancia = soma / float( len(valores) )\n return _variancia\n\ndef desvio_padrao(valores):\n return math.sqrt( variancia(valores) )\n \n\ndef Documents(rotation, datasets_tranning, db):\n cursor = db.cursor()\n for i in datasets_tranning:\n \n try:\n cursor.execute(\"\"\"INSERT INTO Results VALUES (%s,%s,%s)\"\"\",(str(rotation),str(i), 0))\n db.commit()\n except:\n db.rollback()\n \n \n cursor.close()\n\ndef Popularity_ls(ls, list_tranning, db):\n cont = 0\n cursor = db.cursor()\n sql = \"SELECT nome_dataset FROM Features WHERE features = '\"+str(ls)+\"' AND tipo_feature = 'Linkset'\"\n try:\n cursor.execute(sql)\n result = cursor.fetchall()\n for row in result:\n if(row[0] in list_tranning):\n cont = cont + 1\n except:\n print (\"Error calculating Popularity_ls\") \n \n cursor.close()\n return cont\n \n \n \n ","sub_path":"Features_Methods/Bank_Methods/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":20573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"628599143","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\n\nfrom . import views\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$', views.home, name='home'),\n url(r'^login/$', views.login, name='login'),\n url(r'^register/$', views.register, name='register'),\n url(r'^oauth/$', views.oauth, name='oauth'),\n url(r'^identity/$', views.identify, name='identity'),\n url(r'^appointments/$', views.appointment_requests, name='appointments'),\n url(r'^appointments/(?P[0-9]+)/actions/$', views.appointment_actions, name='actions'),\n url(r'^refresh_token/$', views.refresh_token, name='refresh_token'),\n url(r'^checkin/$', views.check_in, name='checkin'),\n url(r'^avatar/$', views.avatar, name='avatar'),\n url(r'^update/$', views.update, name='update'),\n url(r'^main/$', views.main, name='main'),\n url(r'^logout/$', views.logout, name='logout'),\n url(r'', include('social.apps.django_app.urls', namespace='social')),\n]\n","sub_path":"drchrono/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"349640440","text":"my_list = [1,2,3,4,5,6,7,8,9,10]\n\ndef sum_list(listing):\n adding = 0\n for x in listing:\n adding += x\n return adding\n\nprint(sum_list(my_list))\n\n","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"184833817","text":"# MIT License\n#\n# Copyright (c) 2018-2019 Red Hat, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport logging\nfrom typing import Union\n\nfrom ogr.abstract import GitProject, CommitStatus\n\nlogger = logging.getLogger(__name__)\n\n\nclass StatusReporter:\n def __init__(\n self, project: GitProject, commit_sha: str,\n ):\n self.project = project\n self.commit_sha = commit_sha\n\n def report(\n self,\n state: CommitStatus,\n description: str,\n url: str = \"\",\n check_names: Union[str, list, None] = None,\n ) -> None:\n \"\"\"\n set commit check status\n\n :param state: state accepted by github\n :param description: the long text\n :param url: url to point to (logs usually)\n :param check_names: those in bold\n \"\"\"\n\n if not check_names:\n logger.warning(\"No checks to set status for.\")\n return\n\n elif isinstance(check_names, str):\n check_names = [check_names]\n\n for check in check_names:\n self.set_status(\n state=state, description=description, check_name=check, url=url\n )\n\n def set_status(\n self, state: CommitStatus, description: str, check_name: str, url: str = \"\",\n ):\n logger.debug(f\"Setting status for check '{check_name}': {description}\")\n self.project.set_commit_status(\n self.commit_sha, state, url, description, check_name, trim=True\n )\n\n def get_statuses(self):\n self.project.get_commit_statuses(commit=self.commit_sha)\n","sub_path":"packit_service/worker/reporting.py","file_name":"reporting.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"249575336","text":"import sys\n\nsys.path.insert(0, '../')\nimport os\nimport constants\n\nimport pandas as pd\nimport numpy as np\nimport argparse\n\nfrom sklearn.metrics import average_precision_score\nfrom sklearn.metrics import precision_recall_curve\n\n\ndef main(prefix,datasets,algos,n_start,n_end,ss_ratios):\n for ss_ratio in ss_ratios:\n df_pr_auc = pd.DataFrame()\n for dataset in datasets:\n for algo in algos:\n df_detailed_pr_agg = pd.DataFrame()\n df_detailed_pr_is_sig_agg = pd.DataFrame()\n df_detailed_pr_pval_agg = pd.DataFrame()\n\n for cur in np.arange(int(n_start), int(n_end)):\n recovered_dataset_name = \"sol_{}_{}_robustness_{}_{}\".format(algo, dataset, cur, ss_ratio)\n try:\n df_detailed_pr = pd.read_csv(\n os.path.join(constants.ROBUSTNESS_SOLUTIONS_DIR, recovered_dataset_name,\n \"df_detailed_pr.tsv\"),\n sep='\\t', index_col=0)\n except IOError as e:\n print(\"error: {}\".format(e))\n continue\n\n df_detailed_pr_pval_agg = pd.concat([df_detailed_pr_pval_agg, df_detailed_pr['pval']], axis=1)\n df_detailed_pr_is_sig_agg = pd.concat([df_detailed_pr_is_sig_agg, df_detailed_pr['is_significant']],\n axis=1)\n\n try:\n ehr_terms = pd.read_csv(os.path.join(constants.OUTPUT_GLOBAL_DIR, \"oob\",\n \"emp_diff_modules_{}_{}_passed_oob.tsv\".format(dataset, algo)),\n sep='\\t')\n ehr_terms = ehr_terms.loc[ehr_terms[\"passed_oob_permutation_test\"].dropna(axis=0).apply(\n lambda a: np.any(np.array(a[1:-1].split(\", \")) == \"True\")).values, :].sort_values(\n by=[\"hg_pval_max\"], ascending=False)['GO id']\n except Exception as e:\n ehr_terms = pd.Series()\n pass\n\n print(\"intersected terms: {}/{}\".format(len(set(ehr_terms).intersection(df_detailed_pr_pval_agg.index)),\n len(ehr_terms.index)))\n df_detailed_pr_agg['pval_frequency'] = df_detailed_pr_pval_agg.apply(lambda a: np.sum(~pd.isnull(a)),\n axis=1)\n df_detailed_pr_agg['is_sig'] = df_detailed_pr_is_sig_agg.apply(lambda a: np.any(a), axis=1)\n\n missing_sig_terms = ehr_terms.loc[~ehr_terms.isin(df_detailed_pr_pval_agg.index)]\n print(\"n of missing terms: {}/{}\".format(missing_sig_terms.shape[0], df_detailed_pr_pval_agg.shape[0]))\n for cur_missing_term in missing_sig_terms.values:\n df_detailed_pr_agg.loc[cur_missing_term, 'is_sig'] = False\n df_detailed_pr_agg.loc[cur_missing_term, 'pval_frequency'] = 0\n df_detailed_pr_agg = df_detailed_pr_agg.sort_values(by=['pval_frequency'], ascending=False)\n\n y_test = df_detailed_pr_agg['is_sig'].values.astype(np.int)\n print(\"df shape: {}\".format(df_detailed_pr_agg.shape))\n y_score = df_detailed_pr_agg['pval_frequency'].values / float(n_end - n_start)\n\n print(\"y_test (total={}):\\n{}\".format(np.sum(y_test), y_test))\n print(\"y_score:\\n{}\".format(y_score))\n\n df_pr_auc.loc[algo, dataset] = np.nan\n if len(y_score) != 0:\n average_precision = average_precision_score(y_test, y_score)\n if np.isnan(average_precision):\n average_precision = 0\n df_detailed_pr_agg.to_csv(os.path.join(constants.OUTPUT_GLOBAL_DIR, \"robustness_cache\",\n \"recovery_terms_frequency_{}_{}.tsv\".format(n_end,\n ss_ratio)),\n sep='\\t')\n\n precision, recall, _ = precision_recall_curve(y_test, y_score)\n\n print(\"average precision: {}\".format(average_precision))\n df_pr_auc.loc[algo, dataset] = average_precision\n df_pr_auc.to_csv(os.path.join(constants.OUTPUT_GLOBAL_DIR, \"evaluation\",\n \"robustness_auc_{}_{}_{}.tsv\".format(prefix, n_end, ss_ratio)), sep='\\t')\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='args')\n parser.add_argument('--datasets', dest='datasets')\n parser.add_argument('--algos', dest='algos')\n parser.add_argument('--prefix', dest='prefix', default=\"GE\")\n parser.add_argument('--n_start', help=\"number of iterations (total n permutation is pf*(n_end-n_start))\",\n dest='n_start', default=0)\n parser.add_argument('--n_end', help=\"number of iterations (total n permutation is pf*(n_end-n_start))\",\n dest='n_end', default=100)\n parser.add_argument('--ss_ratios', help=\"ss_ratios\", dest='ss_ratios', default=\"0.4,0.3,0.2,0.1\")\n\n args = parser.parse_args()\n\n datasets = args.datasets.split(\",\")\n algos = args.algos.split(\",\")\n n_start = int(args.n_start)\n n_end = int(args.n_end)\n prefix = args.prefix\n ss_ratios = [float(a) for a in args.ss_ratios.split(\",\")]\n main(prefix,datasets,algos,n_start,n_end,ss_ratios)\n","sub_path":"src/evaluation/robustness_aupr.py","file_name":"robustness_aupr.py","file_ext":"py","file_size_in_byte":5692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"172009768","text":"# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# Copyright (C) 2007-2009 The NOC Project\n# See LICENSE for details\n# ---------------------------------------------------------------------\n\"\"\"\n\"\"\"\nfrom south.db import db\nfrom noc.peer.models import *\n\nclass Migration:\n\n def forwards(self):\n if db.execute(\"SELECT COUNT(*) FROM peer_maintainer\")[0][0]==0:\n rir_id=db.execute(\"SELECT id FROM peer_rir LIMIT 1\")[0][0]\n db.execute(\"INSERT INTO peer_maintainer(maintainer,description,auth,rir_id) VALUES(%s,%s,%s,%s)\",\n [\"Default maintainer\",\"Please change to your maintainer\",\"NO AUTH\",rir_id]\n )\n\n def backwards(self):\n \"Write your backwards migration here\"\n","sub_path":"peer/migrations/0017_default_maintainer.py","file_name":"0017_default_maintainer.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"277459979","text":"import matplotlib\nmatplotlib.use('Agg')\n\nimport seaborn as sns\nimport numpy as np\nimport matplotlib.pyplot as plt\nsns.set_style('white')\nsns.set_context('talk')\n\n\ndef plot_heatmap(mat, fname):\n \"\"\"Plot covariance of N samples of dimension D, shape (D, N).\"\"\"\n system_size = mat.shape[0]\n fig, ax = plt.subplots(figsize=(5 * 1.618, 5))\n sns.heatmap(mat[::-1], ax=ax, xticklabels=[], yticklabels=[])\n plt.savefig(fname, bbox_inches='tight')\n plt.close()\n\n\ndef plot_sample(sample, fname):\n \"\"\"Black and white plot of Ising model samples.\"\"\"\n fig, ax = plt.subplots(figsize=(5, 5))\n ax.imshow(sample, cmap=plt.cm.gray)\n ax.set_xticks([])\n ax.set_yticks([])\n plt.savefig(fname, bbox_inches='tight')\n plt.close()\n","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"9208454","text":"## Class holding info data for a race.\n## Examples of info_str output:\n\n## Cadence Seeded\n## Seed: 1234567\n\n## Coda Unseeded -- Flagplant\n\n## Bolt Seeded -- Sudden Death Flagplant\n## Seed: 1234567\n\n## Cadence 4-Shrine Unseeded -- Flagplant\n\n## Examples of raceroom_str output:\n\n## cadence-s\n## coda-uf\n## bolt-sdf\n## 4-shrine-uf\n\nimport clparse\nimport seedgen\n\nNDChars = ['Cadence', 'Melody', 'Aria', 'Dorian', 'Eli', 'Monk', 'Dove', 'Coda', 'Bolt', 'Bard'] \nSEEDED_FLAG = int(pow(2,0))\nSUDDEN_DEATH_FLAG = int(pow(2,1))\nFLAGPLANT_FLAG = int(pow(2,2))\n\ndef _parse_seed(args, race_info):\n #note: this allows `-s (int)` to set a specific seed, while `-s` just sets seeded.\n #important that _parse_seed be called before _parse_seeded for this to work.\n command_list = ['seed', 's'] \n if args and len(args) >= 2 and args[0] in command_list:\n try:\n race_info.seed = int(args[1])\n args = args[2:]\n return True\n except ValueError:\n return False\n return False\n \ndef _parse_seeded(args, race_info):\n seeded_commands = ['s', 'seeded']\n unseeded_commands = ['u', 'unseeded']\n\n if args:\n if args[0] in seeded_commands:\n race_info.seeded = True\n args.pop(0)\n return True\n elif args[0] in unseeded_commands:\n race_info.seeded = False\n args.pop(0)\n return True\n return False \n\ndef _parse_char(args, race_info):\n command_list = ['c', 'char', 'character']\n\n if args:\n if len(args) >= 2 and args[0] in command_list:\n if args[1].capitalize() in NDChars:\n race_info.character = args[1].capitalize()\n args = args[2:]\n return True\n elif args[0].capitalize() in NDChars:\n race_info.character = args[0].capitalize()\n args = args[1:]\n return True \n \n return False\n\n##def _parse_sudden_death(args, race_info):\n##\n##def _parse_flagplant(args, race_info):\n\ndef _parse_desc(args, race_info):\n command_list = ['custom']\n\n if args and len(args) >= 2 and args[0] in command_list:\n args.pop(0)\n desc = ''\n for arg in args:\n desc += arg + ' '\n race_info.descriptor = desc[:-1]\n return True\n return False\n \n\n# Attempts to parse the given command-line args into a race-info\n# Returns True on success, False on failure\n# Warning: destroys information in the list args\ndef parse_args(args):\n race_info = RaceInfo()\n return parse_args_modify(args, race_info)\n\ndef parse_args_modify(args, race_info):\n set_seed = False #keep track of whether we've found args for each field\n set_seeded = False \n set_char = False\n set_desc = False\n set_sd = False\n set_fp = False\n\n while args:\n next_cmd_args = clparse.pop_command(args)\n if not next_cmd_args:\n next_cmd_args.append(args[0])\n args.pop(0)\n \n if _parse_seed(next_cmd_args, race_info):\n if set_seed:\n return None\n else:\n set_seed = True\n elif _parse_seeded(next_cmd_args, race_info):\n if set_seeded:\n return None\n else:\n set_seeded = True\n elif _parse_char(next_cmd_args, race_info):\n if set_char:\n return None\n else:\n set_char = True\n## elif parse_sudden_death(args, race_info):\n## if set_sd:\n## return False\n## else:\n## set_seeded = True\n## elif parse_flagplant(args, race_info):\n## if set_fp:\n## return False\n## else:\n## set_seeded = True\n elif _parse_desc(next_cmd_args, race_info):\n if set_desc:\n return None\n else:\n set_desc = True\n else:\n return None\n\n if race_info.seeded:\n race_info.seed_fixed = set_seed\n if not set_seed:\n race_info.seed = seedgen.get_new_seed()\n elif set_seed and set_seeded: #user set a seed and asked for unseeded, so throw up our hands\n return None\n elif set_seed:\n race_info.seeded = True\n\n return race_info \n\nclass RaceInfo(object):\n\n def __init__(self):\n self.seed = int(0) #the seed for the race\n self.seed_fixed = False #True means the specific seed is part of the race rules (seed doesn't change on rematch)\n self.seeded = True #whether the race is run in seeded mode\n self.character = 'Cadence' #the character for the race\n self.descriptor = 'All-zones' #a short description (e.g. '4-shrines', 'leprechaun hunting', etc)\n self.sudden_death = False #whether the race is sudden-death (cannot restart race after death)\n self.flagplant = False #whether flagplanting is considered as a victory condition\n\n def copy(self):\n the_copy = RaceInfo()\n the_copy.seed = self.seed if self.seed_fixed else seedgen.get_new_seed()\n the_copy.seed_fixed = self.seed_fixed\n the_copy.seeded = self.seeded\n the_copy.character = self.character\n the_copy.descriptor = self.descriptor\n the_copy.sudden_death = self.sudden_death\n the_copy.flagplant = self.flagplant\n return the_copy\n\n @property\n def flags(self):\n return int(self.seeded)*SEEDED_FLAG + int(self.sudden_death)*SUDDEN_DEATH_FLAG + int(self.flagplant)*FLAGPLANT_FLAG\n \n #returns a (possibly multi-line) string that can be used to header results for the race\n #depricated. do not use. use format_str and seed_str instead.\n def info_str(self): \n seeded_rider = '\\n'\n if self.seeded:\n seeded_rider += 'Seed: {0}\\n'.format(self.seed)\n \n return self.format_str() + seeded_rider\n\n #returns a string \"Seed: (int)\" if the race is seeded, or the empty string otherwise\n def seed_str(self):\n if self.seeded:\n return 'Seed: {0}'.format(self.seed)\n else:\n return ''\n\n #returns a one-line string for identifying race format\n def format_str(self):\n char_str = (self.character.title() + ' ') if (self.character.title() in NDChars) else ''\n desc_str = (self.descriptor + ' ') if not self.descriptor == 'All-zones' else ''\n seeded_str = 'Seeded' if self.seeded else 'Unseeded'\n addon_str = ''\n if self.sudden_death:\n addon_str += \"Sudden Death \"\n if self.flagplant:\n addon_str += \"Flagplant \"\n if addon_str:\n addon_str = ' -- {0}'.format(addon_str.rstrip())\n\n return char_str + desc_str + seeded_str + addon_str\n \n #returns an abbreviated string suitable for identifying this race\n def raceroom_name(self):\n main_identifier = ''\n if self.character.title() in NDChars:\n main_identifier = self.character.lower()\n else:\n main_identifier = self.descriptor.lower()\n\n tags = 's' if self.seeded else 'u'\n if self.sudden_death:\n tags += 'd'\n if self.flagplant:\n tags += 'f'\n\n return '{0}-{1}'.format(main_identifier, tags)\n","sub_path":"raceinfo.py","file_name":"raceinfo.py","file_ext":"py","file_size_in_byte":7384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"631292578","text":"\n# coding: utf-8\n\n# In[103]:\n\nimport os\nimport numpy as np\nimport sys\nimport collections as coll\ndef train_val_test_split(trainNum=400,valNum=100,testNum=100):\n datasetName = 'scene12_' + str(trainNum) \n savePath = os.path.join(os.getcwd(),'datasets',datasetName)\n loadAPath = os.path.join(os.getcwd(),'data','A') # containing all 3600 images\n loadBPath = os.path.join(os.getcwd(),'data','B') # containing all 3600 images\n \n if not os.path.exists(savePath):\n os.makedirs(savePath)\n print('trainNum={} valNum={} testNum={}'.format(trainNum,valNum,testNum))\n # load all numpy array names \n fileNames = [name for name in os.listdir(loadAPath)]\n \n # save random indices for training, validation and testing set\n trainIdx = []\n valIdx = []\n testIdx = []\n \n beg = 0\n end = len(fileNames)//6\n delta = end\n size_per_scene = (trainNum + valNum + testNum)//6\n print(size_per_scene)\n for i in range(6):\n print('beg:{} end: {}'.format(beg,end))\n allidx = np.random.choice(range(beg,end),size_per_scene,replace=False)\n if i < 5:\n trainIdx.extend(allidx[0:trainNum//6])\n valIdx.extend(allidx[trainNum//6:(trainNum+valNum)//6])\n testIdx.extend(allidx[(trainNum+valNum)//6:])\n else:\n trainCurrentSize = len(trainIdx)\n valCurrentSize = len(valIdx)\n testCurrentSize = len(testIdx)\n\n trainIdx.extend(allidx[0:trainNum - trainCurrentSize])\n valIdx.extend(allidx[trainNum -trainCurrentSize:(trainNum+valNum) - trainCurrentSize - valCurrentSize])\n testIdx.extend(allidx[(trainNum+valNum) - trainCurrentSize - valCurrentSize:])\n beg += delta\n end += delta \n #end = min(len(fileNames),end)\n \n print('size of train: {}'.format(len(trainIdx)))\n print('size of val: {}'.format(len(valIdx)))\n print('size of test: {}'.format(len(testIdx)))\n \n # shuffle training idx once more\n np.random.shuffle(trainIdx)\n # print(trainIdx)\n # save training set\n saveData([fileNames[i] for i in trainIdx],'train',savePath,loadAPath)\n saveData([fileNames[i] for i in trainIdx],'train',savePath,loadBPath)\n\n # save val set\n saveData([fileNames[i] for i in valIdx],'val',savePath,loadAPath)\n saveData([fileNames[i] for i in valIdx],'val',savePath,loadBPath)\n \n # save test set\n saveData([fileNames[i] for i in testIdx],'test',savePath,loadAPath)\n saveData([fileNames[i] for i in testIdx],'test',savePath,loadBPath)\n \n\n\ndef saveData(data,name,savePath,loadPath):\n dataType = loadPath[-1] \n np.random.shuffle(data)\n for d in data:\n img = np.load(os.path.join(loadPath,d))\n path = os.path.join(savePath,name,dataType)\n if not os.path.exists(path):\n os.makedirs(path) \n np.save(os.path.join(path,d),img)\n\n\n \n \n\nif __name__ == '__main__':\n if len(sys.argv) == 4:\n trainNum = int(sys.argv[1])\n valNum = int(sys.argv[2])\n testNum = int(sys.argv[3])\n else:\n trainNum = 2400\n valNum = 600\n testNum = 600\n \n train_val_test_split(trainNum,valNum,testNum)\n \n \n \n \n \n \n \n\n","sub_path":"train_val_test_split.py","file_name":"train_val_test_split.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"459601172","text":"import pygame.font\nimport pygame\n\nfrom pygame.sprite import Group\nfrom star import Star\n\nclass ScoreBoard():\n\n def __init__(self, so_settings, screen, stats, bullets, ship):\n self.so_settings = so_settings\n self.screen = screen\n self.screen_rect = screen.get_rect()\n self.stats = stats\n self.ship = ship\n \n self.screen_rect = self.screen.get_rect()\n\n self.board_width = self.so_settings.screen_width\n self.board_height = (self.so_settings.screen_height/20)\n\n self.rect = pygame.Surface((self.board_width, self.board_height), pygame.SRCALPHA)\n \n \n self.rect.fill((170,170,170,240))\n \n self.text_color = (9,0,255)\n self.font = pygame.font.SysFont(None, 48)\n\n self.prep_score()\n self.prep_stars()\n\n def health_bar(self):\n if self.ship.health > 75:\n self.health_color = (0, 204, 0)\n elif self.ship.health > 50:\n self.health_color = (204, 204, 0)\n else:\n self.health_color = (204, 0, 0)\n \n\n def draw_board(self):\n pygame.draw.rect(self.screen, self.board_color, self.rect)\n\n def prep_score(self):\n round_score = int(round(self.stats.total_score, -1))\n score_str = \"{:,}\".format(round_score)\n self.score_image = self.font.render(score_str, True, self.text_color)\n\n self.score_rect = self.score_image.get_rect()\n self.score_rect.right = self.screen_rect.right - 20\n self.score_rect.top = 20\n\n def prep_stars(self):\n self.stars = Group()\n for star_number in range(self.stats.ships_left):\n star = Star()\n star.rect.x = (self.screen_rect.right - 35) - star_number * star.rect.width\n star.rect.y = 595\n self.stars.add(star)\n \n def show_board_elements(self):\n self.screen.blit(self.score_image, self.score_rect)\n self.health_bar()\n pygame.draw.rect(self.screen, self.health_color, (10, 600, self.ship.health*1.5, 15))\n self.stars.draw(self.screen)\n","sub_path":"scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"171740435","text":"#!/usr/bin/env python\n\nimport utils\nfrom utils.video import FileVideoStream\nfrom utils import face_utils\nimport numpy as np\nimport datetime\nimport argparse\nimport time\nimport dlib\nimport cv2\nimport skvideo.io\n\ndef detection(image, args):\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor(args['shape_predictor'])\n\n image = utils.resize(image, width=500)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n rects = detector(gray, 1) \n for i, rect in enumerate(rects):\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n\n for (x, y) in shape:\n cv2.circle(image, (x, y), 1, (0, 0, 255), -1)\n \n return image\n\ndef main(args):\n #vs = VideoStream(src=args['video_filepath'], usePiCamera=args['picamera'] > 0).start()\n vs = FileVideoStream(args['video_filepath']).start()\n time.sleep(2.0)\n\n frames = []\n cnt = 0\n while True:\n cnt += 1\n frame = vs.read()\n if frame is None:\n break\n frame = detection(frame, args)\n print(cnt, frame.shape)\n frames.append(frame)\n\n frames = np.stack(frames)\n print(frames.shape)\n #skvideo.io.vwrite('output', frames)\n for i, frame in enumerate(frames):\n cv2.imwrite('%d.png' % i, frame) \n vs.stop()\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n ap = argparse.ArgumentParser()\n ap.add_argument('-p', '--shape_predictor', required=None,\n default='shape_predictor_68_face_landmarks.dat',\n help='path to facial landmark predictor')\n ap.add_argument('-v', '--video_filepath', required=None,\n default='video.mpg',\n help='path to input video')\n ap.add_argument('-r', '--picamera', type=int, default=-1,\n help='whether or not the Raspberry Pi camera should be used')\n args = vars(ap.parse_args())\n print(args)\n main(args)\n","sub_path":"python/image/dlib/examples/video_detection.py","file_name":"video_detection.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"206148181","text":"from pokemon import Pokemon, typeNum, relationToLvl\n\n# 025 : Pikachu\nclass P025 (Pokemon):\n\n\tpokeID = 25\n\tevolveID = 26\n\tlvlRate = relationToLvl['MEDIUM-FAST']\n\tTYP1 = typeNum['ELECTRIC']\n\tTYP2 = typeNum['BLANK']\n\tbaseHP = 35\n\tbaseATK = 55\n\tbaseDEF = 30\n\tbaseSPC = 50\n\tbaseSPD = 90\n\n\tlearnset = {\n\t\t1 : 'ThunderShock', \n\t\t1 : 'Growl',\n\t\t9 : 'Thunder Wave',\n\t\t16 : 'Quick Attack',\n\t\t26 : 'Swift',\n\t\t33 : 'Agility',\n\t\t43 : 'Thunder',\n\t}\n\n\tlearnable_moves = {\n\t\t'Agility',\n\t\t'Bide',\n\t\t'Body Slam',\n\t\t'Double Team',\n\t\t'Double-Edge',\n\t\t'Flash',\n\t\t'Growl',\n\t\t'Light Screen',\n\t\t'Mega Kick',\n\t\t'Mega Punch',\n\t\t'Mimic',\n\t\t'Pay Day',\n\t\t'Quick Attack',\n\t\t'Rage',\n\t\t'Reflect',\n\t\t'Rest',\n\t\t'Seismic Toss',\n\t\t'Skull Bash',\n\t\t'Slam',\n\t\t'Submission',\n\t\t'Substitute',\n\t\t'Surf',\n\t\t'Swift',\n\t\t'Tail Whip',\n\t\t'Take Down',\n\t\t'Thunder',\n\t\t'Thunder Shock',\n\t\t'Thunder Wave',\n\t\t'Toxic'\t\n\t}\n\n# 123 : Scyther\nclass P123 (Pokemon):\n\n\tpokeID = 123\n\tevolveID = 0\n\tlvlRate = relationToLvl['MEDIUM-FAST']\n\tTYP1 = typeNum['BUG']\n\tTYP2 = typeNum['FLYING']\n\tbaseHP = 70\t\n\tbaseATK = 110\t\n\tbaseDEF = 80\t\n\tbaseSPC = 55\t\n\tbaseSPD = 105\n\n\tlearnset = {\n\t\t17 : 'Leer', \n\t\t20 : 'Focus Energy', \n\t\t24 : 'Double Team', \n\t\t29 : 'Slash', \n\t\t35 : 'Swords Dance', \n\t\t42 : 'Agility',\n\t}\n\n\tlearnable_moves = {\n\t\t'Agility', \n\t\t'Bide', \n\t\t'Cut', \n\t\t'Double Team', \n\t\t'Double-Edge', \n\t\t'Focus Energy', \n\t\t'Hyper Beam', \n\t\t'Leer', \n\t\t'Mimic', \n\t\t'Quick Attack', \n\t\t'Rage', \n\t\t'Rest', \n\t\t'Skull Bash', \n\t\t'Slash', \n\t\t'Substitute', \n\t\t'Swift', \n\t\t'Swords Dance', \n\t\t'Take Down', \n\t\t'Toxic', \n\t\t'Wing Attack'\n\t}\n","sub_path":"modificado/pokedex.py","file_name":"pokedex.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"331499993","text":"# 2개의 파일을 만들어서\n# earlystopping을 적용하지 않은 최고의 모델\n\nimport numpy as np\nfrom tensorflow.keras.datasets import boston_housing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import r2_score, mean_squared_error\n\n#1 데이터\n(x_train, y_train), (x_test, y_test) = boston_housing.load_data()\n\nx_train, x_val, y_train, y_val = train_test_split(x_train, y_train, train_size=0.8, random_state= 104)\n\n\nscaler = MinMaxScaler()\nscaler.fit(x_train)\nx_test = scaler.transform(x_test)\nx_val = scaler.transform(x_val)\nx_train = scaler.transform(x_train)\n\n# print(x_train.shape)\n# print(y_train.shape)\n\n#2 모델 구성\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\n\nmodel = Sequential()\nmodel.add(Dense(356,activation='relu',input_dim=13))\nmodel.add(Dense(128,activation='relu'))\nmodel.add(Dense(128,activation='relu'))\nmodel.add(Dense(64,activation='relu'))\nmodel.add(Dense(64,activation='relu'))\nmodel.add(Dense(1))\n\n#3 컴파일 훈련\nmodel.compile(loss='mse',optimizer='adam',metrics='mae')\nmodel.fit(x_train, y_train, epochs=250, batch_size=4, validation_data=(x_val, y_val), verbose=1)\n\n#4 평가 예측\nloss, mse = model.evaluate(x_test, y_test, batch_size=4)\nprint('loss, mse : ',loss, mse)\n\ny_predict = model.predict(x_test)\n\ndef RMSE(y_test, y_predict): return np.sqrt(mean_squared_error(y_test, y_predict))\nprint('RMSE : ', RMSE(y_test, y_predict ))\n\nr2 = r2_score(y_test, y_predict)\nprint('R2: ', r2)\n\n# loss, mse : 9.877310752868652 2.2219297885894775\n# RMSE : 3.1428189473448787\n# R2: 0.8813448547679702","sub_path":"keras/keras20_boston_keras1.py","file_name":"keras20_boston_keras1.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"528769377","text":"# -*- coding:utf-8 -*-\n\nimport arrow\nimport json\nimport gevent\nfrom abc import ABCMeta, abstractmethod, abstractproperty\n\nfrom influxdb import InfluxDBClient\nfrom corgi.channel import Channel, RecvPoller\nfrom corgi.log import STD_LOG as LOG\nfrom common.validator import ts_item_validator\n\n\nclass Symbol:\n COMMA = ','\n WHITESPACE = ' '\n EQUAL = '='\n\n\nclass TSItemFormat:\n DICT = 'DICT'\n JSON = 'JSON'\n LINE_PROTOCOL = 'LINE_PROTOCOL'\n\n\n# TODO 可以加密所有序列化到磁盘上的数据,避免手动修改某些数据造成错误格式\nclass TSItem(object):\n def __init__(self, data, data_format):\n if data_format == TSItemFormat.DICT:\n self._item = data\n if data_format == TSItemFormat.JSON:\n self._item = json.loads(data)\n if data_format == TSItemFormat.LINE_PROTOCOL:\n self._item = self.convert_lp_to_d(data)\n\n def convert_lp_to_d(self, lp):\n d = {}\n elements = lp.split(Symbol.WHITESPACE)\n ns_and_tags = elements[0].split(Symbol.COMMA)\n d['namespace'] = ns_and_tags[0]\n d['tags'] = []\n if Symbol.COMMA in elements[0]:\n for tag in ns_and_tags[1:]:\n d['tags'].append({tag.split(Symbol.EQUAL)[0]: tag.split(Symbol.EQUAL)[1]})\n d['fields'] = []\n fields = elements[1]\n for field in fields.split(Symbol.COMMA):\n d['fields'].append({field.split(Symbol.EQUAL)[0]: field.split(Symbol.EQUAL)[1]})\n d['timestamp'] = elements[2]\n return d\n\n @property\n def namespace(self):\n return self._item['namespace']\n\n @property\n def tags(self):\n return self._item['tags']\n\n @property\n def fields(self):\n return self._item['fields']\n\n @property\n def timestamp(self):\n return self._item['timestamp']\n\n def to_dict(self):\n return self._item\n\n def to_json(self):\n return json.dumps(self._item)\n\n def to_lp(self):\n tags = [Symbol.EQUAL.join([list(tag.keys())[0], str(list(tag.values())[0])]) for tag in self.tags]\n tags = Symbol.COMMA.join(tags)\n ns_and_tags = Symbol.COMMA.join([self.namespace, tags])\n fields = [Symbol.EQUAL.join([list(field.keys())[0], str(list(field.values())[0])]) for field in self.fields]\n fields = Symbol.COMMA.join(fields)\n timestamp = self.timestamp\n lp = Symbol.WHITESPACE.join([ns_and_tags, fields, str(timestamp)])\n return lp\n\n def __repr__(self):\n return self.to_lp()\n\n\nclass TSStorageType(object):\n STDOUT = 'STDOUT'\n INFLUXDB = 'INFLUXDB'\n\n\nclass BaseTSStorage(object):\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def push_metric(self, ts_item):\n pass\n\n\nclass InfluxCon(object):\n def __init__(self, host, port, user, passwd, db):\n self.host = host\n self.port = port\n self.user = user\n self.passwd = passwd\n self.db = db\n\n\nclass InfluxStorage(BaseTSStorage):\n def __init__(self, con):\n assert isinstance(con, InfluxCon)\n self.client = InfluxDBClient(con.host, con.port, con.user, con.passwd, con.db)\n self.client.create_database(con.db)\n\n def push_metric(self, ts_item):\n assert isinstance(ts_item, TSItem)\n point = {\n 'measurement': ts_item.namespace,\n 'tags': dict(list(e.items())[0] for e in ts_item.tags),\n 'time': arrow.get(ts_item.timestamp / 1000 / 1000).isoformat(),\n 'fields': dict(list(e.items())[0] for e in ts_item.fields)\n }\n LOG.debug('push point to influxdb. point:{}'.format(point))\n self.client.write_points([point])\n # if ts_item.namespace == 'container_events':\n # print('push success', point)\n\n\nclass StdoutStorage(BaseTSStorage):\n def __init__(self, con):\n pass\n\n def push_metric(self, ts_item):\n assert isinstance(ts_item, TSItem)\n LOG.info('push metric to stdout storage: {}'.format(ts_item))\n\n\nclass TSStorage(object):\n def __init__(self, storage_type=TSStorageType.INFLUXDB):\n self.storage_type = storage_type\n self.push_c = Channel()\n self.quit_c = Channel()\n\n def append(self, data):\n ts_item_validator.validate(data)\n ti = TSItem(data, TSItemFormat.DICT)\n self.push_c.send(ti.to_lp())\n\n def push(self, lp):\n ti = TSItem(lp, TSItemFormat.LINE_PROTOCOL)\n if self.storage_type == TSStorageType.INFLUXDB:\n pass\n if self.storage_type == TSStorageType.STDOUT:\n LOG.info('push item: {}, {}'.format(ti.to_dict(), ti.to_lp()))\n\n def start(self):\n def _run():\n polling = True\n poller = RecvPoller([self.push_c, self.quit_c])\n while polling:\n for c, s, m in poller.poll():\n if c is self.quit_c:\n polling = False\n break\n if c is self.push_c:\n self.push(m)\n\n gevent.spawn(_run)\n gevent.sleep(0)\n","sub_path":"storage/ts_storage.py","file_name":"ts_storage.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"21462591","text":"# !/usr/bin/env python \n# -*- coding: UTF-8 -*- \n# @Time: 2020/5/12 22:19 \n# @Author: Zhang Cong\n\nimport random\nimport logging\nfrom tqdm import tqdm\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)\n\ndef generate_train_data(input_file_path, output_file_path):\n '''\n 对原始csv数据进行格式转换,构建训练数据集\n :param input_file_path: 原始数据路径\n :param output_file_path: 构建完成的训练数据路径\n :return: 将数据存储至本地\n '''\n logging.info('Start get all sentence ...')\n # 获取全部句子集\n all_sentence = []\n for line in tqdm(open(input_file_path, encoding='utf-8')):\n line = line.replace('\\n', '').split('\\t')\n if line[2] == 'label': # 跳过首行\n continue\n sentence_1 = str(line[0]).replace('\\t', '') # 句子1\n sentence_2 = str(line[1]).replace('\\t', '') # 句子2\n all_sentence.append(sentence_1)\n all_sentence.append(sentence_2)\n # 去重\n all_sentence = list(set(all_sentence))\n\n logging.info('Start generate dataset ...')\n # 构建训练数据集 [query, pos, neg_1, neg_2, neg_3, neg_4]\n output_file = open(output_file_path, mode='w', encoding='utf-8')\n for line in tqdm(open(input_file_path, encoding='utf-8')):\n line = line.replace('\\n', '').split('\\t')\n if line[2] == 'label': # 跳过首行\n continue\n sentence_list = []\n sentence_1 = str(line[0]).replace('\\t', '')\n sentence_2 = str(line[1]).replace('\\t', '')\n sentence_list.append(sentence_1) # 句子1\n sentence_list.append(sentence_2) # 句子2\n label = line[2] # 标签\n\n if int(label)==1: # 如果标签为1,则保留此句子对,并随机负采样得到4个负例\n while len(sentence_list)<6: # [query, pos, neg_1, neg_2, neg_3, neg_4]\n index = random.randint(0, len(all_sentence)-1) # 随机索引\n if all_sentence[index] not in sentence_list: # 如果不重复,则加入\n sentence_list.append(all_sentence[index])\n output_file.write('\\t'.join(sentence_list) + '\\n')\n output_file.close()\n logging.info('Finishied generate dataset ...')\n\n\ndef generate_test_data(input_file_path, output_file_path):\n '''\n 对原始csv数据进行格式转换,构建测试数据集\n :param input_file_path: 原始数据路径\n :param output_file_path: 构建完成的训练数据路径\n :return: 将数据存储至本地\n '''\n logging.info('Start get all sentence ...')\n output_file = open(output_file_path, mode='w', encoding='utf-8')\n for line in tqdm(open(input_file_path, encoding='utf-8')):\n line = line.replace('\\n', '').split('\\t')\n if line[2] == 'label': # 跳过首行\n continue\n sentence_1 = str(line[0]).replace('\\t', '') # 句子1\n sentence_2 = str(line[1]).replace('\\t', '') # 句子2\n label = line[2] # 标签\n output_file.write(sentence_1 + '\\t' + sentence_2 + '\\t' + label + '\\n')\n\n\ndef check_data(input_file_path):\n '''\n 统计数据分布情况,检查数据集0/1分布是否均衡\n :param input_file_path: 数据路径\n :return:\n '''\n count = 0\n for line in tqdm(open(input_file_path, encoding='utf-8')):\n line = line.replace('\\n', '').split('\\t')\n if line[2] == 'label':\n continue\n if int(line[2]) == 1:\n count += 1\n print(count)\n\n\nif __name__ == '__main__':\n\n # 统计数据分布情况\n file_path = './data/lcqmc/lcqmc_train.tsv'\n check_data(file_path)\n\n # 构建训练数据集\n input_file_path = './data/lcqmc/lcqmc_train.tsv'\n output_file_path = './data/train.txt'\n generate_train_data(input_file_path, output_file_path)\n logging.info('Success generate train.txt')\n\n # 构建验证数据集\n input_file_path = './data/lcqmc/lcqmc_dev.tsv'\n output_file_path = './data/dev.txt'\n generate_test_data(input_file_path, output_file_path)\n logging.info('Success generate dev.txt')\n\n # 构建测试数据集\n # input_file_path = './data/lcqmc/lcqmc_test.tsv'\n # output_file_path = './data/test.txt'\n # generate_test_data(input_file_path, output_file_path)\n # logging.info('Success generate test.txt')\n\n","sub_path":"DSSM-CNN/Data_Generate.py","file_name":"Data_Generate.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"634574629","text":"\"\"\" Data model for the TerminalTeller application \"\"\"\nimport random \nimport json\nimport os\nfrom random import randint\nfrom random import Random\n\n# Locate the json file in the ttrader directory wherever the python\n# executable is run. os.path is preferred to literal paths in strings\n# because it can run on Linux, Mac, or Windows without changing\n# code.\nthisdir = os.path.dirname(os.path.realpath(__file__))\njsonfilename = \"data.json\"\nJSONFILE = os.path.join(thisdir, jsonfilename)\n\n\n\n# Note: keeping the data context of a module in a global variable will only\n# work with very simple application:\n\n# Global data store:\n\n\n\ndef initialize():\n global masterUserAccts\n \"\"\" Koad DATA from permanent jsonfile or initialize empty DATA dict \"\"\"\n # the global keyword allows a function to alter a global variabl\n if not os.path.isfile(JSONFILE):\n masterUserAccts = {}\n with open(JSONFILE, \"w\") as file_object:\n json.dump(masterUserAccts, file_object, indent=2)\n\n with open(JSONFILE, \"r\") as file_object:\n masterUserAccts = json.load(file_object)\n \n\n\ndef save():\n \"\"\" Write the updated DATA to the permanent json store \"\"\"\n\n with open(JSONFILE, \"w\") as file_object:\n json.dump(masterUserAccts, file_object)\n\n\ndef add_account(userID, firstname, lastname, pin):\n \"\"\" add a new account to the DATA store \"\"\"\n # q: Why is 'global' not needed here?\n\n masterUserAccts[userID] = {\n \"firstname\": firstname,\n \"lastname\": lastname,\n \"PIN\": pin,\n \"balance\": 0.00,\n \"Credit Card\": str(generate_credit_card())\n }\n\ndef generate_credit_card():\n \"\"\" Generates a new account number. Number is a Luhn-legal credit card #.\n\nStub method. Import your Luhn algorithm module from problem #1 and use\nyour generator method.\n\"\"\" \n newnumber = completed_number(['3', '3'], 16)\n\n while newnumber in masterUserAccts or newnumber is None: # use 'is None & not == None\n newnumber = completed_number(['3', '3'], 16)\n return newnumber\n\n\ndef validate(userID, pin): # use Python legal variable name, not PIN\n \"\"\" Determine if accountnumber exist as an account and pin is its PIN \"\"\"\n if userID in masterUserAccts:\n if pin == masterUserAccts[userID]['PIN']:\n return True\n return False\n\ndef check_balance(logIn_userID):\n return masterUserAccts[logIn_userID]['balance']\n\ndef makeDeposit(logIn_userID, depositAmount):\n masterUserAccts[logIn_userID]['balance'] = str(int(masterUserAccts[logIn_userID]['balance']) + int(depositAmount))\n save()\n return masterUserAccts[logIn_userID]['balance']\n \n\ndef makeWithdrawal(logIn_userID, withdrawalAmount): \n while int(withdrawalAmount) > int(masterUserAccts[logIn_userID]['balance']):\n withdrawalAmount = input(\"Insufficient Funds -- Please enter valid amount: \") \n masterUserAccts[logIn_userID]['balance']= str(int(masterUserAccts[logIn_userID]['balance']) - int(withdrawalAmount))\n save()\n return masterUserAccts[logIn_userID]['balance']\n\ndef transfer(logIn_userID, target_userID, transferAmt):\n masterUserAccts[logIn_userID]['balance']= str(int(masterUserAccts[logIn_userID]['balance']) - int(transferAmt))\n print(logIn_userID, target_userID, transferAmt)\n masterUserAccts[target_userID]['balance']= str(int(masterUserAccts[target_userID]['balance']) + int(transferAmt))\n save()\n\n\n\n\n\n\"\"\"\n pass\n # Recall that a function that updates an external value or produces\n # output as a 'side effect' (such as a print statement) will generally\n # not return a value. Some operations, such as the list's 'pop' method\n # will not follow this principle.\n #\n # The term 'method' is somewhat 'interchangeable with 'function'\n\n\"\"\"\ngenerator = Random()\n\ndef completed_number(prefix, length):\n \n ccnumber = prefix\n\n while len(ccnumber) < (length - 1):\n digit = str(generator.choice(range(0, 10)))\n ccnumber.append(digit)\n\n # Calculate sum\n\n sum = 0\n pos = 0\n\n reversedCCnumber = []\n reversedCCnumber.extend(ccnumber)\n reversedCCnumber.reverse()\n\n while pos < length - 1:\n\n odd = int(reversedCCnumber[pos]) * 2\n if odd > 9:\n odd -= 9\n sum += odd\n if pos != (length - 2):\n sum += int(reversedCCnumber[pos + 1])\n pos += 2\n\n # Calculate check digit\n print(ccnumber)\n checkdigit = ((sum / 10 + 1) * 10 - sum) % 10\n ccnumber.append(str(checkdigit))\n return int(float(''.join((ccnumber))))\n\n\n","sub_path":"week1/day3/BankAccountFIles/Model/Smodel.py","file_name":"Smodel.py","file_ext":"py","file_size_in_byte":4529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"525967948","text":"#Создай собственный Шутер!\n\nfrom pygame import *\nfrom random import randint\nfrom time import time as timer\n\nmixer.init()\n#mixer.music.load(\"space.ogg\")\n#mixer.music.play()\n\nfire_sound = mixer.Sound(\"fire.ogg\")\n\nback = \"ocean.jpg\"\nhero = \"rocket.png\"\nenemy = \"ufo.png\"\nbullets = \"bullet.png\"\n\nwin_width = 800\nwin_height = 600\nwindow = display.set_mode((win_width, win_height))\ndisplay.set_caption(\"Shooter\")\nbackground = transform.scale(image.load(back), (win_width, win_height))\n\nFPS = 60\nclock = time.Clock()\n\nclass GameSprite(sprite.Sprite):\n def __init__(self, sprite_image, sprite_x, sprite_y, sprite_width, sprite_height, sprite_speed):\n sprite.Sprite.__init__(self)\n self.image = transform.scale(image.load(sprite_image), (sprite_width, sprite_height))\n self.speed = sprite_speed\n\n self.rect = self.image.get_rect()\n self.rect.x = sprite_x\n self.rect.y = sprite_y\n\n def reset(self):\n window.blit(self.image, (self.rect.x, self.rect.y))\n\nclass Player(GameSprite):\n def update(self):\n keys = key.get_pressed()\n if keys[K_LEFT] and self.rect.x > 5:\n self.rect.x -= self.speed\n if keys[K_RIGHT] and self.rect.y < 740:\n self.rect.x += self.speed\n\n def fire(self):\n bullet = Bullet(bullets, self.rect.centerx, self.rect.top, 15, 20, -10)\n bullets_group.add(bullet)\n\nclass Enemy(GameSprite):\n def update(self):\n global lost\n self.rect.y += self.speed\n\n if self.rect.y > win_height:\n self.rect.y = 0\n self.rect.x = randint(50, 720)\n lost += 1\n\nclass Bullet(GameSprite):\n def update(self):\n self.rect.y += self.speed\n if self.rect.y < 0:\n self.kill()\n\nlost = 0\nscore = 0\nenemy_group = sprite.Group()\nfor i in range(1, 6):\n enemy1 = Enemy(enemy, randint(50, 650), 0, 80, 50, randint(1, 3))\n enemy_group.add(enemy1)\n\n\n\nbullets_group = sprite.Group()\n\nship = Player(hero, 400, 500, 50, 80, 5)\n\nfont.init()\nmy_font = font.SysFont('Arial',36)\nfont1 = font.SysFont('Arial' ,90)\nwin = font1.render('YOU WIN!!!!', True, (200, 150, 100))\nlose = font1.render(\"YOU LOSE!!!!!\", True, (200, 150, 100))\nreload_time = False\nnum_fire = 0\n\nfinish = False\ngame = True \nwhile game:\n for e in event.get():\n if e.type == QUIT:\n game = False\n\n if e.type == KEYDOWN:\n if e.key == K_SPACE:\n if num_fire < 5 and reload_time == False:\n fire_sound.play()\n ship.fire()\n num_fire += 1\n if num_fire >= 5 and reload_time == False:\n last_time = timer()\n reload_time = True\n if not finish:\n collides = sprite.groupcollide(bullets_group, enemy_group, True, True)\n for c in collides:\n score += 1\n enemy1 = Enemy(enemy, randint(50, 650), 0, 80, 50, randint(1, 3))\n enemy_group.add(enemy1)\n \n\n window.blit(background, (0, 0))\n if reload_time == True:\n now_time = timer()\n if now_time - last_time < 5:\n reload = font1.render(\"Wait please\", True, (200, 150, 100))\n window.blit(reload, (260,460))\n else:\n num_fire = 0\n reload_time = False\n if sprite.spritecollide(ship, enemy_group, False) or lost >= 10:\n window.blit(lose, (200,200))\n finish = True\n if score >= 10:\n window.blit(win, (200,200))\n finish = True\n score_text = my_font.render('Счет: '+str(score), 1, (200, 200, 0))\n window.blit(score_text, (0,0))\n lost_text = my_font.render('Пропущено: '+str(lost), 1, (200, 200, 0))\n window.blit(lost_text,(0, 20))\n ship.update()\n ship.reset()\n enemy_group.update()\n enemy_group.draw(window)\n bullets_group.update()\n bullets_group.draw(window)\n\n else:\n lost = 0\n score = 0\n finish = False\n\n for b in bullets_group:\n b.kill()\n for e in enemy_group:\n e.kill()\n for i in range(1, 6):\n enemy1 = Enemy(enemy, randint(50, 650), 0, 80, 50, randint(1, 3))\n enemy_group.add(enemy1)\n\n time.delay(2000)\n\n\n \n display.update()\n clock.tick(FPS)\n\n","sub_path":"shooter_game.py","file_name":"shooter_game.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"528030948","text":"import asyncio\nimport json\n\nfrom notebook.base.handlers import APIHandler\nfrom notebook.utils import url_path_join\nimport tornado\n\nfrom urllib.parse import urldefrag\n\nimport fairworkflows\n\nclass NanopubSearchHandler(APIHandler):\n\n @tornado.web.authenticated\n def get(self):\n\n type_of_search = self.get_argument('type_of_search')\n\n if type_of_search == 'text':\n search_str = self.get_argument('search_str')\n print('Searching for', search_str)\n results = fairworkflows.Nanopub.search_text(search_str)\n elif type_of_search == 'pattern':\n subj = self.get_argument('subj')\n pred = self.get_argument('pred')\n obj = self.get_argument('obj')\n print('Searching for pattern', subj, pred, obj)\n results = fairworkflows.Nanopub.search_pattern(subj=subj, pred=pred, obj=obj)\n elif type_of_search == 'things':\n thing_type = self.get_argument('thing_type')\n searchterm = self.get_argument('searchterm')\n print('Searching for \"thing\"', thing_type, searchterm)\n if not searchterm:\n searchterm = ' '\n results = fairworkflows.Nanopub.search_things(thing_type=thing_type, searchterm=searchterm)\n else:\n raise ValueError(f'Unrecognized type_of_search, {type_of_search}')\n\n ret = json.dumps(results)\n self.finish(ret)\n\ndef nanopub_search_handler(base_url='/'):\n endpoint = url_path_join(base_url, '/nanosearch')\n return endpoint, NanopubSearchHandler\n\n\n\nclass NanopubStepHandler(APIHandler):\n\n @tornado.web.authenticated\n def get(self):\n\n np_uri = self.get_argument('np_uri')\n\n print(np_uri)\n\n # Fetch the nanopub at the given URI\n np = fairworkflows.Nanopub.fetch(np_uri)\n print(np)\n\n # Look for first step (if exists)\n first_step_URI = self.get_first_step(np.rdf)\n\n if first_step_URI is not None:\n step_URIs = [first_step_URI]\n step_URIs += self.get_subsequent_steps(np.rdf)\n\n steps = []\n for step_uri in step_URIs:\n print(step_uri, type(step_uri))\n step_np = fairworkflows.Nanopub.fetch(step_uri)\n steps.append(self.get_step_from_nanopub(step_np.rdf))\n\n else:\n # If not a workflow, return the step description in this NP\n print('No first step found - assuming this np describes a step')\n steps = [self.get_step_from_nanopub(np.rdf)]\n \n ret = json.dumps(steps)\n self.finish(ret)\n\n def get_step_from_nanopub(self, np_rdf):\n # Get the description triple\n qres = np_rdf.query(\n \"\"\"SELECT DISTINCT ?code\n WHERE {\n ?a ?code .\n }\"\"\")\n\n qres_list = list([i for i in qres])\n if len(qres_list) > 0:\n result = qres_list[0]\n else:\n result = '# No step description found. Nanopub rdf was:\\n' + np_rdf.serialize(format='trig').decode('utf-8')\n\n\n print('Returning step:', result)\n return result\n\n def get_first_step(self, np_rdf):\n qres = np_rdf.query(\n \"\"\"SELECT DISTINCT ?firstStepURI\n WHERE {\n ?a ?firstStepURI .\n }\"\"\")\n\n uri_list = []\n for row in qres:\n uri = str(row['firstStepURI'].toPython())\n uri_without_fragment = urldefrag(uri)[0]\n uri_list.append(uri_without_fragment)\n\n print('uri_list', uri_list)\n if len(uri_list) == 0:\n return None\n elif len(uri_list) > 1:\n print(\"Warning: More than one first step declared.\")\n\n return uri_list[0]\n\n def get_subsequent_steps(self, np_rdf):\n qres = np_rdf.query(\n \"\"\"SELECT DISTINCT ?stepURI\n WHERE {\n ?a ?stepURI .\n }\"\"\")\n\n uri_list = []\n for row in qres:\n uri = str(row['stepURI'].toPython())\n uri_without_fragment = urldefrag(uri)[0]\n uri_list.append(uri_without_fragment)\n\n print('uri_list', uri_list)\n return uri_list\n\ndef nanopub_step_handler(base_url='/'):\n endpoint = url_path_join(base_url, '/nanostep')\n return endpoint, NanopubStepHandler\n","sub_path":"FAIRWorkflowsExtension/nanopub_handlers.py","file_name":"nanopub_handlers.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"217650127","text":"import logging, os, re, sys\nimport code.basic_utils as basic_utils\nimport code.split_fa as split_fa\nimport pprint as pp\n\n\ndef run_fanngo(config):\n fanngo_conf = config[\"mixed-meth\"][\"fanngo\"]\n fanngo_template = fanngo_conf[\"path\"]+\"/\"+fanngo_conf[\"template\"]\n run_file_str=os.path.basename(config[\"input\"][\"filt_fasta\"]).replace(\".fa\",\"\")+\".fanngo.m\"\n run_file_path = fanngo_conf[\"path\"]+\"/\"+run_file_str\n #print fanngo_template\n conf_lines = open(fanngo_template,\"r\").readlines()\n run_file = open(run_file_path,\"w\")\n cwd=os.getcwd()\n output = cwd + \"/\" + fanngo_conf[\"output\"]\n out_score = output + \"/\" + os.path.basename(config[\"input\"][\"filt_fasta\"]).replace(\".fa\",\".score.txt\")\n\n\n\n for line in conf_lines:\n line = line.strip()\n if line.find(\"$PATH\") > -1:\n code_path = cwd+\"/\"+fanngo_conf[\"path\"]+\"/code\"\n outline = line.replace(\"$PATH\",code_path)\n print >>run_file, outline\n elif line.find(\"$INPUT_FASTA\") > -1:\n input_fasta = cwd+\"/\"+config[\"input\"][\"filt_fasta\"]\n outline = line.replace(\"$INPUT_FASTA\",input_fasta)\n print >>run_file, outline\n elif line.find(\"$OUTPUT_SCORE\") > -1:\n outline = line.replace(\"$OUTPUT_SCORE\",out_score)\n print >>run_file, outline\n else:\n print >>run_file, line\n run_file.close()\n cmd = [\"matlab\", \"-nojvm\", \"-nodisplay\", \"-nosplash\"]\n basic_utils.check_output_and_run(out_score,cmd,run_file_path,\"temp/fanngo.log\")\n","sub_path":"code/pipeline/_9_run_fanngo.py","file_name":"_9_run_fanngo.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"231053138","text":"#!/bin/env python\n# -*- coding: utf-8 -*-\n\nimport oss2\nimport logging\nimport os\nimport json\nimport sys\nimport base64\n\nFORMAT = '%(asctime)-15s %(message)s'\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format=FORMAT)\n\n\ndef get_oss_file(param, local_file):\n endpoint = 'oss-cn-beijing.aliyuncs.com'\n bucket = 'video-match'\n auth = oss2.StsAuth(\n param.get('id'),\n param.get('secret'),\n param.get('stoken'),\n )\n bucket_obj = oss2.Bucket(auth, endpoint, bucket)\n '''\n here we just known the file path, can be change to your self path , or just list the bucket\n '''\n key = param.get('osspath')[18:] + os.path.basename(local_file)\n with open(local_file) as f:\n bucket_obj.put_object(key, f)\n logging.info(\"put %s success\", local_file)\n\n\ndef main():\n if len(sys.argv) != 3:\n logging.error(\n \"please run with : python oss.py \")\n sys.exit(1)\n assert(os.path.isfile(sys.argv[1]))\n assert(len(sys.argv[2]) > 10)\n obj = json.loads(base64.b64decode(sys.argv[2]))\n get_oss_file(obj, sys.argv[1])\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"upload/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"371968102","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 16 11:11:32 2018\n\n@author: Sander Oosterveld\n\"\"\"\nimport socket\nimport threading\nimport datetime\nimport time\n#from queue import Queue\n#from DataHandler import Datahandler\n\nclass SocketClient(threading.Thread):\n \n def __init__(self, hostname, port, q, dataHandler):\n '''\n Initiates the thread object needs to have:\n Input: hostname(string which is sanderroom.student.utwente.nl), port(50000>x>65535), queue opbject and a DataHandler object\n '''\n self.queue = q\n self.dataHandler = dataHandler\n threading.Thread.__init__(self) \n self.hostname = hostname\n self.port = port\n self.connected = False\n def run(self):\n print(\"started thread\")\n while True:\n try:\n print(\"Trying to make socket\")\n mySocket = socket.socket()\n print(\"socket made\")\n mySocket.bind((self.hostname,self.port))\n print(\"socket bound\")\n mySocket.listen(1)\n print(\"socket listening\")\n conn, addr = mySocket.accept()\n self.connectedAddress = addr\n print('starting connection')\n passwd = str(conn.recv(1024).decode())\n print(passwd)\n if str(passwd) in self.makeCodes():\n print(\"connected\")\n while True:\n self.connected = True\n data = conn.recv(1024).decode()\n if not data:\n break\n print(\"received message: \" + str(data))\n self.dataHandler.adddata(data)\n self.queue.put(self.dataHandler)\n print(\"Socket being closed\")\n conn.close()\n self.connected = False\n #break\n else:\n print(\"password not correct\")\n conn.close()\n self.connected = False\n time.sleep(5)\n except KeyboardInterrupt:\n self.connected = False\n break\n except OSError:\n print(\"Adress still in use\")\n self.connected = False\n time.sleep(5)\n except:\n print(i)\n time.sleep(4)\n print(\"outside while loop\")\n self.connected = False\n \n \n \n def makeCodes(self):\n currentTime = datetime.datetime.now()\n day = currentTime.day\n #print(day)\n hour = currentTime.hour\n #print(hour)\n minute = currentTime.minute\n #print(minute)\n second = currentTime.second\n if hour >= 12:\n hour = hour-12\n \n #print(hour)\n secondTens = int(second/10)\n possibleSeconds = [secondTens-1,secondTens,secondTens+1]\n codes = []\n for seconds in possibleSeconds:\n #print(seconds)\n intCode = (1213*(day+1)*100000643*(hour+1)*4124473*(minute+1)*8456876119*(seconds+1))**13\n #print(intCode)\n longCode = hex(intCode)\n #print(longCode)\n codes.append(longCode[17:295])\n #print(codes)\n return codes\n \n def getConnectionData(self):\n if self.connected:\n try:\n return \"Connected with: \" + str(self.connectedAddress)\n except:\n return \"No adress --> no connection\"\n else:\n return \"Phone not Connected\"\n \n def testConnected(self):\n return self.connected\n ","sub_path":"SocketClient.py","file_name":"SocketClient.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"350257291","text":"import matplotlib.pyplot as plt\nimport mpld3 as mp\nimport businessLogic as bl\nimport pandas as pd\n\n\ndef calculate(R_df):\n\n phy=R_df[R_df['subsector']=='Physical']\n org=R_df[R_df['subsector']=='Organisational']\n tech=R_df[R_df['subsector']=='Technical']\n phycid=phy.cid.unique()\n orgcid=org.cid.unique()\n techcid=tech.cid.unique()\n\n subs=['Physical','Organisational','Technical']\n host,base,colection,user,pwd=bl.mongoInit()\n ## userid,survey,company should be passed from UI with API\n ##userid='588'\n ##company='ITH'\n ##survey='Quater 1'\n\n x=df=pd.DataFrame()\n for sub in subs:\n subdf=R_df[R_df['subsector']==sub]\n subcid=subdf.cid.unique()\n\n for i in subcid:\n print(\"i value is \", i, sub)\n subscore=subdf[subdf.cid==i].qscore.sum()/len(subdf[subdf.cid==i].index)\n df=subdf[subdf.cid==i]\n df['cscore']=subscore\n x=pd.concat([df, x], ignore_index=True)\n print(subcid)\n\n df=x.sort_values(['cid']) # df has mean values of cscore i.e category score in R1\n print(df)\n return df\n\n \"\"\"document=bl.getSurveyDetailsByCid(userid,survey,company,host,base,colection,user,pwd,subsector='Physical',sector='R1')\n for i in document:\n print(i)\"\"\"\n\n \"\"\"\n phycid=len(list(R_df[R_df['subsector']=='Physical'].cid.unique()))\n orgcid=len(list(R_df[R_df['subsector']=='Physical'].cid.unique()))\n techcid=len(list(R_df[R_df['subsector']=='Physical'].cid.unique()))\n \"\"\"\n","sub_path":"surveyApp/Backup_Feb7/chartsLogic.py","file_name":"chartsLogic.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"322771652","text":"\"\"\"\nWrapper module around a Linux PTY which can be used to start an underlying shell\n\"\"\"\n\nimport os\nimport select\nimport subprocess\nimport struct\nimport signal\n\ntry:\n import fcntl\n import termios\nexcept ImportError:\n pass\n\n\nclass LinuxPty():\n \"\"\"\n Linux PTY class that starts an underlying and provides methods for\n communicating with it\n \"\"\"\n def __init__(self, cmd, cwd):\n self._cmd = cmd\n self._env = os.environ.copy()\n self._env[\"TERM\"] = \"linux\"\n (self._pty, self._pts) = os.openpty()\n self._process = subprocess.Popen(self._cmd, stdin=self._pts,\n stdout=self._pts, stderr=self._pts, shell=False,\n env=self._env, close_fds=True, start_new_session=True,\n cwd=cwd)\n\n def stop(self):\n \"\"\"\n Stop the shell\n \"\"\"\n if self.is_running():\n self._process.kill()\n self._process = None\n return\n\n def receive_output(self, max_read_size, timeout=0):\n \"\"\"\n Poll the shell output\n \"\"\"\n if not self.is_running():\n return None\n\n (ready, _, _) = select.select([self._pty], [], [], timeout)\n if not ready:\n return None\n\n return os.read(self._pty, max_read_size)\n\n def update_screen_size(self, lines, columns):\n \"\"\"\n Notify the shell of a terminal screen resize\n \"\"\"\n if self.is_running:\n # Note, assume ws_xpixel and ws_ypixel are zero.\n tiocswinsz = getattr(termios, 'TIOCSWINSZ', -2146929561)\n size_update = struct.pack('HHHH', lines, columns, 0, 0)\n fcntl.ioctl(self._pts, tiocswinsz, size_update)\n os.kill(self._process.pid, signal.SIGWINCH)\n\n def is_running(self):\n \"\"\"\n Check if the shell is running\n \"\"\"\n return self._process is not None and self._process.poll() is None\n\n def send_keypress(self, key, ctrl=False, alt=False, shift=False, meta=False):\n \"\"\"\n Send keypress to the shell\n \"\"\"\n if ctrl:\n keycode = self._get_ctrl_combination_key_code(key)\n elif alt:\n keycode = self._get_alt_combination_key_code(key)\n else:\n keycode = self._get_key_code(key)\n\n self._send_string(keycode)\n\n def _get_ctrl_combination_key_code(self, key):\n key = key.lower()\n if key in _LINUX_CTRL_KEY_MAP:\n return _LINUX_CTRL_KEY_MAP[key]\n elif len(key) == 1:\n unicode = ord(key)\n if (unicode >= 97) and (unicode <= 122):\n unicode = unicode - ord('a') + 1\n return chr(unicode)\n return self._get_key_code(key)\n\n return self._get_key_code(key)\n\n def _get_alt_combination_key_code(self, key):\n key = key.lower()\n if key in _LINUX_ALT_KEY_MAP:\n return _LINUX_ALT_KEY_MAP[key]\n\n code = self._get_key_code(key)\n return \"\\x1b\" + code\n\n def _get_key_code(self, key):\n if key in _LINUX_KEY_MAP:\n return _LINUX_KEY_MAP[key]\n\n return key\n\n def _send_string(self, string):\n if self.is_running():\n os.write(self._pty, string.encode('UTF-8'))\n\n\n_LINUX_KEY_MAP = {\n \"enter\": \"\\r\",\n \"backspace\": \"\\x7f\",\n \"tab\": \"\\t\",\n \"space\": \" \",\n \"escape\": \"\\x1b\",\n \"down\": \"\\x1b[B\",\n \"up\": \"\\x1b[A\",\n \"right\": \"\\x1b[C\",\n \"left\": \"\\x1b[D\",\n \"home\": \"\\x1b[1~\",\n \"end\": \"\\x1b[4~\",\n \"pageup\": \"\\x1b[5~\",\n \"pagedown\": \"\\x1b[6~\",\n \"delete\": \"\\x1b[3~\",\n \"insert\": \"\\x1b[2~\",\n \"f1\": \"\\x1bOP\",\n \"f2\": \"\\x1bOQ\",\n \"f3\": \"\\x1bOR\",\n \"f4\": \"\\x1bOS\",\n \"f5\": \"\\x1b[15~\",\n \"f6\": \"\\x1b[17~\",\n \"f7\": \"\\x1b[18~\",\n \"f8\": \"\\x1b[19~\",\n \"f9\": \"\\x1b[20~\",\n \"f10\": \"\\x1b[21~\",\n \"f12\": \"\\x1b[24~\",\n}\n\n_LINUX_CTRL_KEY_MAP = {\n \"up\": \"\\x1b[1;5A\",\n \"down\": \"\\x1b[1;5B\",\n \"right\": \"\\x1b[1;5C\",\n \"left\": \"\\x1b[1;5D\",\n \"@\": \"\\x00\",\n \"`\": \"\\x00\",\n \"[\": \"\\x1b\",\n \"{\": \"\\x1b\",\n \"\\\\\": \"\\x1c\",\n \"|\": \"\\x1c\",\n \"]\": \"\\x1d\",\n \"}\": \"\\x1d\",\n \"^\": \"\\x1e\",\n \"~\": \"\\x1e\",\n \"_\": \"\\x1f\",\n \"?\": \"\\x7f\",\n}\n\n_LINUX_ALT_KEY_MAP = {\n \"up\": \"\\x1b[1;3A\",\n \"down\": \"\\x1b[1;3B\",\n \"right\": \"\\x1b[1;3C\",\n \"left\": \"\\x1b[1;3D\",\n}\n","sub_path":"linux_pty.py","file_name":"linux_pty.py","file_ext":"py","file_size_in_byte":4356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"529063047","text":"#! /usr/bin/env python\n\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame, Series\nimport time\nimport sys\nimport os\nsys.path.append(r'/Users/shenzhouyang/mycode/数据测试/code/LR/libraries')\nfrom LogProc import *\nfrom Binning import *\nimport math\nimport hashlib\nfrom operator import itemgetter\nfrom DescStats import *\n\n\n\nfile_num_out = {}\nfile_num_woe = ''\nfile_num_sig = {}\nfile_num_csv = {}\nfile_char_out = {}\nfile_char_woe = ''\nfile_char_sig = {}\nfile_char_csv = {}\nfile_char_drop = ''\ncnstncy_check = ''\n\nmInf = -1.0e38 #The minute infinite value\neps = 1.0e-38 #global macro variable used to avoid !DIV ERROR\nwoe_cap = 4 #Cap & Floor for woe values\nMAX_ITRN = 100 #maximum number of iterations for merging categories\nmax_cat = 200 #maximum # of distinct categories for categorical variables\n\n\n\n\n\ndef OutputFileProc(FilePathName):\n\tif os.path.isfile(FilePathName):\n\t\tos.remove(FilePathName)\n\treturn open(FilePathName, 'w')\n\ndef ywgt_check(mst, var):\n\tif var not in mst:\n\t\tprint('{0} is not in the input DataFrame!'.format(var))\n\t\texit(2)\n\ndef VarlistProcess(df, master, include = None, exclude = None):\n\n\tif include == None:\n\t\tinclude_var_set = set(master)\n\telse:\n\t\toverlap_set = set(master) & set(include)\n\t\tif len(overlap_set) == 0:\n\t\t\tprint('None of the variables specified in include_var_list is in given data frame!', file = log_file)\n\t\t\texit(1)\n\t\telse:\n\t\t\tinclude_var_set = overlap_set\n\t\t\n\tif exclude == None:\n\t\texclude = []\n\texclude_var_set = set(exclude)\n\t\n\tinclude_var_set -= exclude_var_set\n\t\n\tif len(include_var_set) == 0:\n\t\tprint('No valid variables. Program terminated...', sys.stdout)\n\t\texit(3)\n\t\n\tmaster = list(include_var_set)\n\t\n\tvar_format_lst = ['string' if str(df[x].dtype).find('object') >= 0 else str(df[x].dtype) for x in master]\n\tvar_type_lst = ['CHAR' if x == 'string' else 'NUM' for x in var_format_lst]\n\t\n\tvar_info_base_df = DataFrame(dict(zip(['name', 'type', 'format'], [master, var_type_lst, var_format_lst])), index = master, columns = ['name', 'type', 'format'])\n\t\n\treturn var_info_base_df\n\ndef woe(dev_df, y, fvalue, groups, outfile, summary_dr, wgt, postfix, oot_df, num_special_value_list = None, label_df = None, include_var_list = None, exclude_var_list = None, num_corr_predefine = None, num_var_sample_rate = 1):\n\t\n\tlog_file, lst_file = AppendLogs()\n\tfor x in log_file, lst_file:\n\t\tprint('\\n', file = x)\n\n\tglobal file_num_out\n\tglobal file_num_woe\n\tglobal file_num_sig\n\tglobal file_num_csv\n\tglobal file_char_out \n\tglobal file_char_woe \n\tglobal file_char_sig \n\tglobal file_char_csv \n\tglobal file_char_drop\n\tglobal cnstncy_check\n\n\t#Initialize the output files\n\tfor tag in ['dev','oot']:\n\t\tfile_num_out[tag] \t= OutputFileProc(outfile + '_num_'+tag+'.out')\n\t\tfile_num_sig[tag] \t= OutputFileProc(outfile + '_num_'+tag+'.sig')\n\t\tfile_num_csv[tag] \t= OutputFileProc(outfile + '_num_'+tag+'.csv')\n\t\tfile_char_out[tag] = OutputFileProc(outfile + '_char_'+tag+'.out')\n\t\tfile_char_sig[tag] = OutputFileProc(outfile + '_char_'+tag+'.sig')\n\t\tfile_char_csv[tag] = OutputFileProc(outfile + '_char_'+tag+'.csv')\n\t\t\n\t\n\tfile_num_woe\t= OutputFileProc(outfile + '_num_dev.woe')\n\tfile_char_woe = OutputFileProc(outfile + '_char_dev.woe')\n\tfile_char_drop = OutputFileProc(outfile + '_char_dev_drop')\n\tcnstncy_check = OutputFileProc(outfile + '_cnstncy_check.txt')\n\t\n\t#Check y and unit weight\n\tmaster_list = list(dev_df.columns)\n\t\n\tfor item in [y, wgt]:\n\t\tywgt_check(master_list, item)\n\t\tmaster_list.remove(item)\n\t\n\t\n\t#Sort out all the variables to process\n\tVarlist_to_cal = VarlistProcess(df = dev_df, master = master_list, include = include_var_list, exclude = exclude_var_list)\n\n\t#Merge the labels\n\ttry:\n\t\tif label_df == None:\n\t\t\tVarlist_to_cal['label'] = np.nan\n\texcept:\n\t\tVarlist_to_cal = Varlist_to_cal.merge(label_df[['variable', 'label']], left_on = 'name', right_on = 'variable', how = 'left')\n\t\n\t#Merge the 'force_corr' tag\n\tif num_corr_predefine == None:\n\t\tVarlist_to_cal['force_corr'] = np.nan\n\telse:\n\t\tnum_var_predefined_corr_df = pd.read_csv(num_corr_predefine, header = None, skiprows = 1, names=['name','force_corr'])\n\t\tVarlist_to_cal = Varlist_to_cal.merge(num_var_predefined_corr_df, on = 'name', how = 'left')\n\n\tVarlist_to_cal.drop('variable', axis = 1, inplace = True)\n\tVarlist_to_cal.sort_values(by = ['type','name'], ascending = [False, True], inplace = True)\n\tVarlist_to_cal.set_index('name', drop = False, inplace = True)\n\t\n\t#Descriptive Stats of y\n\tdev_y_stats = dev_df[dev_df[y].isin([0,1])].groupby(y)[wgt].agg(['count','sum'])\n\tdev_y_stats.columns = ['raw', 'weighted']\n\toot_y_stats = oot_df[oot_df[y].isin([0,1])].groupby(y)[wgt].agg(['count','sum'])\n\toot_y_stats.columns = ['raw', 'weighted']\n\t\n\t#Calculate WoE of each var\n\twoe_bin_dict = {}\n\tvar_cnt = 0\n\tpsi_dict = {}\n\tfor var in Varlist_to_cal.index:\n\t\tvar_cnt += 1\n\t\t#For debugging\n\t\t#if var_cnt > 10:\n\t\t#\tbreak\n\t\t\t\n\t\tif Varlist_to_cal.loc[var, 'type'] == 'NUM':\n\t\t\t#For debugging\n\t\t\tif np.random.random(1)[0] >= num_var_sample_rate:\n\t\t\t\tcontinue\n\t\t\tprint('Processing numerical {0}({1}/{2})...'.format(var, var_cnt, len(Varlist_to_cal)))\n\t\t\tstart_time = time.time()\n\t\t\twoe_bin = cal_woe_num(df_dev = dev_df[dev_df[y] < 2][[var, y, wgt]], x = var, y = y, wgt = wgt, groups = groups, postfix = postfix, fvalue = fvalue, label = Varlist_to_cal.loc[var, 'label'], num = var_cnt, df_oot = oot_df[oot_df[y] < 2][[var, y, wgt]], force_corr = Varlist_to_cal.loc[var, 'force_corr'], dev_y_stats = dev_y_stats, oot_y_stats = oot_y_stats, num_special_value_list = num_special_value_list)\n\t\t\twoe_bin_dict[var] = ('NUM', woe_bin)\n\t\t\tprint('Numerical %s finished.\\nTime Cost: %.2fs'%(var, time.time() - start_time))\n\t\telse:\n\t\t\tprint('Processing character {0}({1}/{2})...'.format(var, var_cnt, len(Varlist_to_cal)))\n\t\t\tstart_time = time.time()\n\t\t\twoe_bin = cal_woe_char(df_dev = dev_df[dev_df[y] < 2][[var, y, wgt]], x = var, y = y, wgt = wgt, postfix = postfix, fvalue = fvalue, label = Varlist_to_cal.loc[var, 'label'], num = var_cnt, df_oot = oot_df[oot_df[y] < 2][[var, y, wgt]], dev_y_stats = dev_y_stats, oot_y_stats = oot_y_stats, drop_log = file_char_drop)\n\t\t\twoe_bin_dict[var] = ('CHAR', woe_bin)\n\t\t\tprint('Character %s finished.\\nTime Cost: %.2fs'%(var, time.time() - start_time))\n\t\ttry:\n\t\t\tpsi_dict[var] = [woe_bin['psi_ptot'].sum(), woe_bin['psi_br'].sum()]\n\t\texcept:\n\t\t\tpass\n\tpd.to_pickle(woe_bin_dict, outfile+'_woe_bin_py.pickle')\n\t\n\tpsi_df = pd.DataFrame(psi_dict).T\n\tpsi_df.columns = ['PSI_pTot', 'PSI_BR']\n\t\n\tfor tag in ['dev','oot']:\n\t\n\t\tfile_num_csv[tag].close() \t\n\t\tfile_char_csv[tag].close()\n\t\n\tnum_summary_df = {}\n\tfor tag in ['dev','oot']:\n\t\tnum_summary_df[tag] = sort_ksinfo_num(csv_file = outfile + '_num_'+tag+'.csv', sig_file = file_num_sig[tag])\n\t\n\tchar_summary_df = {}\n\tfor tag in ['dev','oot']:\n\t\tchar_summary_df[tag] = sort_ksinfo_char(csv_file = outfile + '_char_'+tag+'.csv', sig_file = file_char_sig[tag])\n\t\n\n\t\n\tfor i, sig_dict in enumerate([num_summary_df, char_summary_df]):\n\t\tvar_summary_dev = sig_dict['dev'][['varname', 'wvarname', 'ks', 'info', 'wlabel']]\n\t\tvar_summary_dev.columns = ['varname', 'woe', 'KS_dev', 'IV_dev', 'label']\n\t\tvar_summary_oot = sig_dict['oot'][['varname', 'ks', 'info']]\n\t\tvar_summary_oot.columns = ['varname', 'KS_oot', 'IV_oot']\n\t\tvar_summary = pd.merge(var_summary_dev, var_summary_oot, on = 'varname', how = 'left')\n\t\tvar_summary = var_summary[['varname', 'woe', 'label', 'KS_dev', 'IV_dev', 'KS_oot', 'IV_oot']]\n\t\t\n\t\tvar_summary = var_summary.merge(psi_df, left_on = 'varname', right_index = True, how = 'left')\n\t\t\n\t\tif i == 0:\n\t\t\tfilename = summary_dr + '/num_iv' + postfix + '.csv'\n\t\telse:\n\t\t\tfilename = summary_dr + '/char_iv' + postfix + '.csv'\n\t\t\n\t\tvar_summary.to_csv(filename, index = False)\n\t\n\t\t\n\t\n\tfor tag in ['dev','oot']:\n\n\t\tfile_num_out[tag].close() \t\t\n\t\tfile_num_sig[tag].close() \t\n\t\tfile_char_out[tag].close() \n\t\tfile_char_sig[tag].close() \t \n\t\n\tfile_num_woe.close() \n\tfile_char_woe.close() \n\tfile_char_drop.close()\n\tcnstncy_check.close()\n\t\n\n\tlog_file.close()\n\tlst_file.close()\n\ndef sort_ksinfo_num(csv_file, sig_file):\n\t\n\tvar_name_lst = ['num','varname','wvarname','ks','info','linearity','wlabel']\n\tvar_type_lst = [np.int32, np.string_, np.string_, np.float64, np.float64, np.float64, np.string_]\n\t\n\tcsv_df = pd.read_csv(csv_file, header = None, names = var_name_lst, dtype = dict(zip(var_name_lst, var_type_lst)))\t\n\t\n\tcsv_df.sort_values(by = ['info', 'ks'], ascending = [False, False], inplace = True)\n\t\n\tprint('%-7s%-34s%-34s%-17s%-22s%-20s%-5s'%('No','Variable','WOE variable','Maximum KS','Information Value','Linearity','Label'), file = sig_file)\n\t\n\tfor idx in csv_df.index:\n\t\tprint('%-7.0f%-34s%-34s%-17.2f%-22.2f%-20.2f%-5s'%(csv_df.loc[idx,'num'],csv_df.loc[idx,'varname'],csv_df.loc[idx,'wvarname'],csv_df.loc[idx,'ks'],csv_df.loc[idx,'info'],csv_df.loc[idx,'linearity'],csv_df.loc[idx,'wlabel']), file = sig_file)\n\treturn csv_df\t\n\n\ndef sort_ksinfo_char(csv_file, sig_file):\n\t\n\tvar_name_lst = ['num','varname','wvarname','ks','info','wlabel']\n\tvar_type_lst = [np.int32, np.string_, np.string_, np.float64, np.float64, np.string_]\n\t\n\tcsv_df = pd.read_csv(csv_file, header = None, names = var_name_lst, dtype = dict(zip(var_name_lst, var_type_lst)))\t\n\tcsv_df.sort_values(by = ['info', 'ks'], ascending = [False, False], inplace = True)\n\t\n\tprint('%-7s%-34s%-34s%-17s%-22s%-5s'%('No','Variable','WOE variable','Maximum KS','Information Value','Label'), file = sig_file)\n\t\n\tfor idx in csv_df.index:\n\t\tprint('%-7.0f%-34s%-34s%-17.2f%-22.2f%-5s'%(csv_df.loc[idx,'num'],csv_df.loc[idx,'varname'],csv_df.loc[idx,'wvarname'],csv_df.loc[idx,'ks'],csv_df.loc[idx,'info'],csv_df.loc[idx,'wlabel']), file = sig_file)\n\treturn csv_df\n\n\ndef cat_bin(dsin, x, y, wgt):\n\t\n\tdsin['bin'] = dsin[x].fillna(' ')\n\tdsin['y_weighted'] = dsin[y] * dsin[wgt]\n\t_temp__ = dsin.groupby('bin', as_index = False).agg({wgt:'sum','y_weighted':'sum'})\n\t_temp__.rename(columns = {wgt:'n','y_weighted':'y1sum'}, inplace = True)\n\t_temp__['ymean'] = _temp__['y1sum']/_temp__['n']\n\t\n\t_temp__.sort_values(by = 'ymean', inplace = True)\n\t\n\t_temp__['y0sum'] = _temp__['n'] - _temp__['y1sum']\n\t\n\t_temp__['xcat'] = _temp__['bin'].apply(add_quote)\n\t\n\treturn _temp__\n\ndef add_quote(var):\n\tif var.find(\"'\") >= 0:\n\t\treturn '\"' + var.strip() + '\"'\n\telse:\n\t\treturn \"'\" + var.strip() + \"'\"\n\tif var == ' ':\n\t\treturn \"''\"\n\ndef cal_woe_char(df_dev, x, y, wgt, postfix, fvalue, label, num, df_oot, dev_y_stats, oot_y_stats,drop_log):\n\n\t_tmp_dev = cat_bin(dsin = df_dev, x = x, y = y, wgt = wgt)\n\t\n\tglobal max_cat\n\tcat_count = len(_tmp_dev)\n\t\n\tif cat_count <= max_cat and cat_count > 1:\n\t\t\n\t\t__tmp_dev = get_ksinfo(dsin = _tmp_dev, y_stat = dev_y_stats, fvalue = fvalue)\n\t\t\n\t\tglobal file_char_out\n\t\tprint_gainschart(dsin = __tmp_dev, x = x, label = label, num = num, outfile = file_char_out['dev'])\n\t\t\n\t\t_tmp_oot = cat_bin(dsin = df_oot, x = x, y = y, wgt = wgt)\n\t\t__tmp_oot = get_ksinfo(dsin = _tmp_oot, y_stat = oot_y_stats, fvalue = fvalue)\n\t\tprint_gainschart(dsin = __tmp_oot, x = x, label = label, num = num, outfile = file_char_out['oot'])\n\t\t\n\t\t_tmp_oot = cat_bin(dsin = df_oot, x = x, y = y, wgt = wgt)\n\t\t\n\t\t_dsoot_merge = rank_oot_char(sum_dev = _tmp_dev, sum_oot = _tmp_oot)\n\t\t\n\t\t_dsoot_merge = auto_linear_char(freq_table = _dsoot_merge, fvalue = fvalue, x = x, pre_dir = _tmp_dev)\n\t\t\n\t\t_tmp_dev = apply_dev(dev_freq = _tmp_dev, oot_freq = _dsoot_merge, x = x)\n\t\t\n\t\twoe_var_name = woe_naming(x, postfix)\n\t\t\n\t\t__tmp_dev = get_ksinfo(dsin = _tmp_dev, y_stat = dev_y_stats, fvalue = fvalue)\n\t\tprint_gainschart(dsin = __tmp_dev, x = x, label = label, num = num, outfile = file_char_out['dev'])\n\t\tglobal file_char_woe\n\t\tprint_woe(data = __tmp_dev, x = x, num = num, woe_var_name = woe_var_name, label = label, outfile = file_char_woe)\n\t\t\n\t\t\n\t\tglobal file_char_csv\n\t\tprint_ksinfo(data = __tmp_dev, x = x, label = label, num = num, outfile = file_char_csv['dev'],woe_var_name = woe_var_name)\n\t\t\n\t\t__dsoot_merge = get_ksinfo(dsin = _dsoot_merge, y_stat = oot_y_stats, fvalue = fvalue)\n\t\tprint_gainschart(dsin = __dsoot_merge, x = x, label = label, num = num, outfile = file_char_out['oot'])\n\t\tprint_ksinfo(data = __dsoot_merge, x = x, label = label, num = num, outfile = file_char_csv['oot'],woe_var_name = woe_var_name)\n\t\t\n\t\t__tmp_dev = psi_char(__tmp_dev, __dsoot_merge)\n\t\n\t\treturn __tmp_dev[['xcat', 'woe', 'n', 'rate_y1', 'totrate_y1', 'psi_ptot', 'psi_br']]\n\t\t\n\t\t\n\telse:\n\t\tprint('%s has %.0f distinct categories. Hence dropped...'%(x, cat_count), file = drop_log)\n\n\ndef psi_char(ks_dev, ks_oot):\n\t\n\n\tks_dev['ptot'] = ks_dev['n']/ks_dev['n'].sum()\n\tks_oot['ptot_oot'] = ks_oot['n']/ks_oot['n'].sum()\n\tks_oot.rename(columns = {'ymean':'ymean_oot'}, inplace = True)\n\t\n\tks_dev = ks_dev.merge(ks_oot[['xcat', 'ptot_oot', 'ymean_oot']], on = 'xcat', how = 'left')\n\t\n\tfor col in ['ptot', 'ptot_oot', 'ymean', 'ymean_oot']:\n\t\tks_dev[col] = ks_dev[col].fillna(0)\n\t\n\tks_dev['psi_ptot'] = (ks_dev['ptot'] - ks_dev['ptot_oot']) * ((ks_dev['ptot'] + eps)/(ks_dev['ptot_oot'] + eps)).apply(math.log)\n\tks_dev['psi_br'] = (ks_dev['ymean'] - ks_dev['ymean_oot']) * ((ks_dev['ymean'] + eps)/(ks_dev['ymean_oot'] + eps)).apply(math.log)\n\t\n\treturn ks_dev\n\t\n\ndef print_ksinfo(data, x, label, num, outfile, woe_var_name):\n\tprint('%s,%s,%s,%.2f,%.2f,%s'%(num,x,woe_var_name,data.loc[data.index[-1],'maxks'],data.loc[data.index[-1],'tot_info'],label), file = outfile)\n\n\ndef print_woe(data, x, num, woe_var_name, label, outfile):\n\tdata.index = range(len(data))\n\tdata['xtmp'] = data['bin'].apply(add_quote)\n\tfor idx in data.index:\n\t\tif idx == 0:\n\t\t\tprint('\\n\\n/* WOE recoding for %s */'%x, file = outfile)\n\t\t\tif data.loc[idx, 'xtmp'] == data.loc[idx, 'xcat']:\n\t\t\t\tprint('if %s = %s then %s = %.6f;'%(x, data.loc[idx, 'xcat'], woe_var_name, data.loc[idx, 'woe']), file = outfile)\n\t\t\telif len(data.loc[idx, 'xcat']) < 100:\n\t\t\t\tprint('if %s in ( %s ) then %s = %.6f;'%(x, data.loc[idx, 'xcat'], woe_var_name, data.loc[idx, 'woe']), file = outfile)\n\t\t\telse:\t\n\t\t\t\txcat_lst = data.loc[idx, 'xcat'].strip().split(',')\n\t\t\t\tprint('if %s in ('%x, file = outfile, end = '')\n\t\t\t\tfor i,v in enumerate(xcat_lst):\n\t\t\t\t\tif i < len(xcat_lst) - 1:\n\t\t\t\t\t\tprint(' %s,'%v, file = outfile)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(' %s'%v, file = outfile)\n\t\t\t\tprint(') then %s = %.6f;'%(x, data.loc[idx, 'woe']), file = outfile)\n\t\telse:\n\t\t\tif data.loc[idx, 'xtmp'] == data.loc[idx, 'xcat']:\n\t\t\t\tprint('else if %s = %s then %s = %.6f;'%(x, data.loc[idx, 'xcat'], woe_var_name, data.loc[idx, 'woe']), file = outfile)\n\t\t\telif len(data.loc[idx, 'xcat']) < 100:\n\t\t\t\tprint('else if %s in ( %s ) then %s = %.6f;'%(x, data.loc[idx, 'xcat'], woe_var_name, data.loc[idx, 'woe']), file = outfile)\n\t\t\telse:\t\n\t\t\t\txcat_lst = data.loc[idx, 'xcat'].strip().split(',')\n\t\t\t\tprint('else if %s in ('%x, file = outfile, end = '')\n\t\t\t\tfor i,v in enumerate(xcat_lst):\n\t\t\t\t\tif i < len(xcat_lst) - 1:\n\t\t\t\t\t\tprint(' %s,'%v, file = outfile)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(' %s'%v, file = outfile)\n\t\t\t\tprint(') then %s = %.6f;'%(x, data.loc[idx, 'woe']), file = outfile)\n\t\t\t\t\n\tprint('else %s = 0.0;'%woe_var_name, file = outfile)\n\n\n\ndef apply_dev(dev_freq, oot_freq, x):\n\t\n\tdev_freq['bin'] = (dev_freq['bin'].apply(str.strip)).apply(str.lower)\n\tdev_freq = dev_freq[['bin', 'n', 'y1sum', 'y0sum', 'xcat', 'bin_num']]\n\t\n\toot_freq['bin'] = (oot_freq['bin'].apply(str.strip)).apply(str.lower)\n\toot_freq = oot_freq[['bin','woe']]\n\t\n\tdev_freq_tmp = pd.merge(dev_freq, oot_freq, on = 'bin', how = 'left')\n\t\n\tpre_n = 0\n\tpre_y1sum = 0\n\tpre_y0sum = 0\n\tpre_xcat = ''\n\t\n\tfor idx in dev_freq_tmp.index:\n\t\tif str(dev_freq_tmp.loc[idx, 'woe']) == 'nan':\n\t\t\tpre_n += dev_freq_tmp.loc[idx, 'n']\n\t\t\tpre_y1sum += dev_freq_tmp.loc[idx, 'y1sum']\n\t\t\tpre_y0sum += dev_freq_tmp.loc[idx, 'y0sum']\n\t\t\tif pre_xcat == '':\n\t\t\t\tpre_xcat = dev_freq_tmp.loc[idx, 'xcat'].strip()\n\t\t\telse:\n\t\t\t\tpre_xcat = pre_xcat.strip() + ', '+ dev_freq_tmp.loc[idx, 'xcat'].strip()\n\t\telse:\n\t\t\tdev_freq_tmp.loc[idx, 'n'] += pre_n\n\t\t\tdev_freq_tmp.loc[idx, 'y1sum'] += pre_y1sum\n\t\t\tdev_freq_tmp.loc[idx, 'y0sum'] += pre_y0sum\n\t\t\tif pre_xcat == '':\n\t\t\t\tdev_freq_tmp.loc[idx, 'xcat'] = dev_freq_tmp.loc[idx, 'xcat'].strip()\n\t\t\telse:\n\t\t\t\tdev_freq_tmp.loc[idx, 'xcat'] = pre_xcat.strip() + ', '+ dev_freq_tmp.loc[idx, 'xcat'].strip()\n\t\t\t\n\t\t\tdev_freq_tmp.loc[idx, 'ymean'] = dev_freq_tmp.loc[idx, 'y1sum']/dev_freq_tmp.loc[idx, 'n']\n\t\t\tpre_n = 0\n\t\t\tpre_y1sum = 0\n\t\t\tpre_y0sum = 0\n\t\t\tpre_xcat = ''\n\t\n\tdev_freq_tmp = dev_freq_tmp[dev_freq_tmp['woe'].notnull()]\n\tdev_freq_tmp.drop('woe', axis = 1, inplace = True)\n\t\n\treturn dev_freq_tmp\n\ndef auto_linear_char(freq_table, fvalue, x, pre_dir):\n\t\n\tvalid_nobs = len(pre_dir)\n\tpre_dir['bin_num'] = range(1,len(pre_dir)+1)\n\t\n\twoe_assign_char(freq_table = pre_dir)\n\t\n\tif valid_nobs < 2:\n\t\twoe_assign_char(freq_table = freq_table)\n\t\treturn freq_table\n\t\n\telse:\n\t\t\n\t\tif pre_dir['bin_num'].corr(pre_dir['woe']) >= 0:\n\t\t\tdir = 1\n\t\telse:\n\t\t\tdir = -1\n\t\t\n\t\tfreq_table_woe = freq_table[['bin','n','y1sum','xcat','y0sum']].copy()\n\t\tfreq_table_woe['bin_num'] = range(1,len(freq_table_woe)+1)\n\t\tfreq_table_woe.index = range(1,len(freq_table_woe)+1)\n\t\t\n\t\t\n\t\tlinearity_ind = 0\n\t\titer_step = 0\n\t\tglobal MAX_ITRN\n\t\twhile(linearity_ind == 0 and iter_step <= MAX_ITRN):\n\t\t\t\n\t\t\tfreq_table_woe = freq_table_woe.groupby('bin_num', as_index = False).apply(cat_sum)\n\t\t\tfreq_table_woe.drop('bin_num', axis = 1, inplace = True)\n\t\t\twoe_assign_char(freq_table_woe)\n\t\t\t\n\t\t\tfreq_table_woe['bin_num'] = np.nan\n\t\t\tfreq_table_woe['linearity'] = 0\t\t\n\t\t\tfreq_table_woe.index = range(1,len(freq_table_woe)+1)\n\n\t\t\tfor idx in freq_table_woe.index:\n\t\t\t\tif idx == 1:\n\t\t\t\t\tfreq_table_woe.loc[idx, 'bin_num'] = 1\n\t\t\t\t\tfreq_table_woe.loc[idx, 'linearity'] = 1\n\t\t\t\telse:\n\t\t\t\t\tif dir == 1:\n\t\t\t\t\t\tif freq_table_woe.loc[idx, 'woe'] >= freq_table_woe.loc[idx - 1, 'woe']:\n\t\t\t\t\t\t\tfreq_table_woe.loc[idx, 'bin_num'] = freq_table_woe.loc[idx - 1, 'bin_num'] + 1\n\t\t\t\t\t\t\tfreq_table_woe.loc[idx, 'linearity'] = 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfreq_table_woe.loc[idx, 'bin_num'] = freq_table_woe.loc[idx - 1, 'bin_num']\n\t\t\t\t\telse:\n\t\t\t\t\t\tif freq_table_woe.loc[idx, 'woe'] <= freq_table_woe.loc[idx - 1, 'woe']:\n\t\t\t\t\t\t\tfreq_table_woe.loc[idx, 'bin_num'] = freq_table_woe.loc[idx - 1, 'bin_num'] + 1\n\t\t\t\t\t\t\tfreq_table_woe.loc[idx, 'linearity'] = 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfreq_table_woe.loc[idx, 'bin_num'] = freq_table_woe.loc[idx - 1, 'bin_num']\n\t\t\t\n\t\t\tif freq_table_woe['linearity'].sum() == len(freq_table_woe):\n\t\t\t\tlinearity_ind = 1\n\t\t\titer_step += 1\n\t\t\n\t\tfreq_table_woe['ymean'] = freq_table_woe['y1sum']/freq_table_woe['n']\n\t\tfreq_table_woe.drop(['bin_num','linearity'], axis = 1, inplace = True)\n\n\t\treturn freq_table_woe\n\ndef woe_assign_char(freq_table):\n\n\tglobal woe_cap\n\tglobal eps\n\t\n\ttot = freq_table[['y1sum', 'y0sum']].sum()\n\t\n\tfor idx in freq_table.index:\n\t\t\n\t\tif freq_table.loc[idx, 'y1sum'] == 0:\n\t\t\tfreq_table.loc[idx, 'woe'] = -woe_cap\n\t\telif freq_table.loc[idx, 'y0sum'] == 0:\n\t\t\tfreq_table.loc[idx, 'woe'] = woe_cap\n\t\telse:\n\t\t\tfreq_table.loc[idx, 'woe'] = math.log((freq_table.loc[idx, 'y1sum']/(tot['y1sum'] + eps))/(freq_table.loc[idx, 'y0sum']/(tot['y0sum'] + eps)))\n\t\t\n\t\tif freq_table.loc[idx, 'woe'] > woe_cap:\n\t\t\tfreq_table.loc[idx, 'woe'] = woe_cap\n\t\tif freq_table.loc[idx, 'woe'] < -woe_cap:\n\t\t\tfreq_table.loc[idx, 'woe'] = -woe_cap\n\t\ndef cat_sum(df):\n\tcnt = 0\n\tfor idx in df.index:\n\t\tcnt += 1\n\t\tif cnt > 1:\n\t\t\tdf.loc[idx, 'n'] += df.loc[idx - 1, 'n']\n\t\t\tdf.loc[idx, 'y1sum'] += df.loc[idx - 1, 'y1sum']\n\t\t\tdf.loc[idx, 'y0sum'] += df.loc[idx - 1, 'y0sum']\n\t\t\tdf.loc[idx, 'xcat'] = df.loc[idx - 1, 'xcat'].strip() + ', ' + df.loc[idx, 'xcat'].strip()\n\t\t\n\t\tif cnt == len(df.index):\n\t\t\treturn df.loc[idx]\t\t\n\ndef rank_oot_char(sum_dev,sum_oot):\n\t\n\tsum_dev['seq'] = range(len(sum_dev))\n\t\n\tsum_oot = sum_oot.merge(sum_dev[['xcat','seq']], on = 'xcat', how = 'left')\n\t\n\tsum_oot.sort_values(by = 'seq', na_position = 'first', inplace = True)\n\t\n\tsum_oot.drop('seq', axis = 1, inplace = True)\n\t\n\treturn sum_oot\n\n\ndef print_gainschart(dsin, x, label, num, outfile):\n\tundln1 = '-' * 120\n\tundln2 = '=' * 120\n\t\n\tprint('\\n', file = outfile)\n\tprint('Variable # = %.0f Variable = %s'%(num, x), file = outfile)\n\tprint(undln1, file = outfile)\n\t\n\t\n\tcnt = 0\n\tfor idx in dsin.index:\n\t\tcnt += 1\n\t\tif len(dsin.loc[idx, 'xcat'].strip()) < 100:\n\t\t\tprint('%-6.0f%-100s'%(cnt, dsin.loc[idx, 'xcat']), file = outfile)\n\t\telse:\n\t\t\txcat_lst = dsin.loc[idx, 'xcat'].strip().split(',')\n\t\t\tprint('%-6.0f'%(cnt), file = outfile, end = '')\n\t\t\tfor i,v in enumerate(xcat_lst):\n\t\t\t\tif i < len(xcat_lst) - 1:\n\t\t\t\t\tprint(' %s,'%v, file = outfile)\n\t\t\t\telse:\n\t\t\t\t\tprint(' %s'%v, file = outfile)\n\tprint(undln1, file = outfile)\n\t\t\t\n\t\n\t\n\tdsin['lenrat'] = (((5*dsin['woe']).apply(round, 1)).apply(abs) - 1).apply(lambda t: min(t, 10))\n\t\n\tdsin['lenstar'] = ''\n\t\n\tfor idx in dsin.index:\n\t\tif dsin.loc[idx, 'lenrat'] < 0:\n\t\t\tdsin.loc[idx, 'lenstar'] = ' '*10 + '+' + ' '*10\n\t\t\t\n\t\tif dsin.loc[idx, 'woe'] > 0:\n\t\t\tdsin.loc[idx, 'lenstar'] = ' '*10 +'+' + ('*' * dsin.loc[idx, 'lenrat']).ljust(10)\n\t\t\t\n\t\telse:\n\t\t\tdsin.loc[idx, 'lenstar'] = ('*' * dsin.loc[idx, 'lenrat']).rjust(10) + '+' + ' '*10\n\t\n\tprint('\\nVariable # = %.0f Variable = %s'%(num, x), file = outfile)\n\tprint(undln1, file = outfile)\n\tprint(' # # %cum # %cum odds % histogram of', file = outfile)\n\tprint(' # x total (y=1) (y=1) (y=0) (y=0) ratio (y=1) woe ks woe (normalized)', file = outfile)\n\tprint(undln1, file = outfile)\n\n\n\tcnt = 0\n\tfor idx in dsin.index:\n\t\tcnt += 1\n\t\t\n\t\ty1sum = int(dsin.loc[idx, 'y1sum'])\n\t\ty0sum = int(dsin.loc[idx, 'y0sum'])\n\t\todds_ratio = str(round(dsin.loc[idx, 'ratio'],0))\n\t\tif dsin.loc[idx, 'ratio'] >= 1e6:\n\t\t\todds_ratio = '+Inf'\n\t\tif odds_ratio[-2:] == '.0':\n\t\t\todds_ratio = odds_ratio[:-2]\n\t\tprint('%-6.0f%3s%15s%10s%9.2f%10s%9.2f%8s%9.2f%10.3f%9.2f%22s'%(cnt, 'cat', '{0:,}'.format(dsin.loc[idx, 'n']),'{0:,}'.format(y1sum),dsin.loc[idx, 'p_cum_y1'],'{0:,}'.format(y0sum),\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t dsin.loc[idx, 'p_cum_y0'],odds_ratio,dsin.loc[idx, 'rate_y1'],dsin.loc[idx, 'woe'],dsin.loc[idx, 'ks'],dsin.loc[idx, 'lenstar']), file = outfile)\n\n\tprint(undln2, file = outfile)\n\tprint('%-5s%19s%10s%19s%26.2f Max KS =%6.2f Info Val =%8.4f'%('Total', '{0:,}'.format(dsin.loc[dsin.index[-1], 'tot']),'{0:,}'.format(dsin.loc[dsin.index[-1], 'tot_y1']),'{0:,}'.format(dsin.loc[dsin.index[-1], 'tot_y0']),\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdsin.loc[dsin.index[-1], 'totrate_y1'],dsin.loc[dsin.index[-1], 'maxks'],dsin.loc[dsin.index[-1], 'tot_info']), file = outfile)\n\tprint(undln2, file = outfile)\n\t\n\t\ndef get_ksinfo(dsin, y_stat, fvalue):\n\t\n\tglobal eps\n\tglobal woe_cap\n\tdsout = dsin.copy()\n\tdsout['tot_y1'] = y_stat.loc[1,'weighted']\n\tdsout['tot_y0'] = y_stat.loc[0,'weighted']\n\tdsout['tot'] = dsout['tot_y1'] + dsout['tot_y0']\n\t\n\tdsout['cum_y1'] = dsout['y1sum'].cumsum()\n\tdsout['cum_y0'] = dsout['y0sum'].cumsum()\n\t\n\tdsout['p_cum_y1'] = 100 * dsout['cum_y1']/(dsout['tot_y1'] + eps)\n\tdsout['p_cum_y0'] = 100 * dsout['cum_y0']/(dsout['tot_y0'] + eps)\n\tdsout['ks'] = (dsout['p_cum_y1'] - dsout['p_cum_y0']).apply(abs)\n\tdsout['maxks'] = dsout['ks'].max()\n\t\n\tdsout['p_y1'] = dsout['y1sum']/dsout['tot_y1']\n\tdsout['p_y0'] = dsout['y0sum']/dsout['tot_y0']\n\t\n\tdsout['woe'] = ((dsout['p_y1']+ eps)/(dsout['p_y0']+ eps)).apply(math.log)\n\tdsout['woe'] = dsout['woe'].apply(lambda t: min(t,woe_cap))\n\tdsout['woe'] = dsout['woe'].apply(lambda t: max(t,-woe_cap))\n\t\t\n\tfor idx in dsout.index:\n\t\tif str(dsout.loc[idx, 'woe']) == 'nan' or dsout.loc[idx, 'y1sum'] + dsout.loc[idx, 'y0sum'] < fvalue:\n\t\t\tdsout.loc[idx, 'woe'] = 0.0\n\n\t\t\n\tdsout['info'] = (dsout['p_y1'] - dsout['p_y0']) * dsout['woe']\n\tdsout['tot_info'] = dsout['info'].cumsum()\n\t\n\tdsout['ratio'] = 100 * dsout['p_y1']/(dsout['p_y0'] + eps)\n\tdsout['rate_y1'] = 100 * dsout['y1sum']/(dsout['y1sum'] + dsout['y0sum'] + eps)\n\tdsout['totrate_y1'] = 100 * dsout['tot_y1']/(dsout['tot_y1'] + dsout['tot_y0'] + eps) \n\n\treturn dsout\n\t\ndef get_overall_stats(dsin, x, wgt):\n\tglobal eps\n\tn_valid = dsin[dsin[x].notnull()][wgt].count()\n\tn_miss = dsin[dsin[x].isnull()][wgt].count()\n\treturn_dict = {}\n\treturn_dict['TOTAL_RECORD'] = n_valid + n_miss\n\treturn_dict['TOTAL_RECORD_NONMISSING'] = n_valid\n\treturn_dict['VALIDITY'] = 100 * n_valid/(n_valid + n_miss + eps)\n\treturn return_dict\n\ndef cal_woe_num(df_dev, x, y, wgt, groups, postfix, fvalue, label, num, df_oot, force_corr, dev_y_stats, oot_y_stats, num_special_value_list):\n\t\n\tdev_overall_stats = get_overall_stats(dsin = df_dev, x = x, wgt = wgt)\n\toot_overall_stats = get_overall_stats(dsin = df_oot, x = x, wgt = wgt)\n\n\t\n\tif dev_overall_stats['TOTAL_RECORD_NONMISSING'] == 0 or oot_overall_stats['TOTAL_RECORD_NONMISSING'] == 0:\n\t\tprint('WARNING: variable {0} has missing value for all rows!'.format(x), sys.stdout)\n\t\treturn 0\n\t\n\t#Append the bin column to DEV\n\t_rtmp_dev = weightedBin(df_dev, wgt).numBin(var = x, groups = groups, bin_tag = None)\n\t\n\t#Calculate stats by x bin on DEV\n\t_devfreq = get_frequency(_rtmp_dev, x, y, wgt)\n\n\t_devfreq['bin'] = _devfreq.index\n\t\n\t#Calculate KS, WoE, IV, etc on grouped DEV\n\t_devks = get_ksinfo_num(dsin = _devfreq, y_stat = dev_y_stats)\t\n\t\n\t#Output WoE Patterns to .out file\n\tglobal file_num_out\n\tprint_gainschart_num(dsin = _devks, x = x, label = label, num = num, outfile = file_num_out['dev'], overall_stats = dev_overall_stats)\n\t\n\t#Apply the group on OOT\t\n\t_rtmp_oot = rank_oot(dsin=df_oot, xrank=_devfreq, x = x)\n\t \n\t#Calculate stats by x bin on OOT\n\t_ootfreq = get_frequency(_rtmp_oot, x, y, wgt)\n\t\n\t#Calculate KS, WoE, IV, etc on grouped OOT\n\t_ootks = get_ksinfo_num(dsin = _ootfreq, y_stat = oot_y_stats)\n\t\n\t#Output WoE Patterns to .out file\n\t#global file_num_out\n\tprint_gainschart_num(dsin = _ootks, x = x, label = label, num = num, outfile = file_num_out['oot'], overall_stats = oot_overall_stats)\n\t\n\t#print(_devfreq)\n\t#Linearize WoE patterns on DEV using Greedy Algorithm\n\t_devfreq = auto_linear(freq_table = _devfreq, x = x, pre_dir = force_corr, num_special_value_lst = num_special_value_list)\n\t#print(_devfreq)\n\t#Apply linearized WoE patterns on OOT\n\t_rtmp_oot = rank_oot(dsin=df_oot, xrank=_devfreq, x = x)\n\t\n\t#Calculate stats by x bin on OOT\n\t_ootfreq = get_frequency(_rtmp_oot, x, y, wgt)\n\t_ootfreq['bin'] = _ootfreq.index\n\t\n\t#Use the woe trend direction of DEV to further linearize OOT to ensure consistency\n\tdev_corr_dir = _devfreq.loc[_devfreq.index[0],'direction']\n\t_ootfreq = auto_linear(freq_table = _ootfreq, x = x, pre_dir = dev_corr_dir, num_special_value_lst = num_special_value_list)\n\t#print(_ootfreq)\n\t#Get the final bins and determine if special bins need to be neutralized\n\tglobal cnstncy_check\n\t_devfreq = consistency_chk(dev_freq = _devfreq, oot_freq = _ootfreq, x = x, fvalue = fvalue, num_special_value_lst = num_special_value_list, output = cnstncy_check)\n\n\t#Calculate stats based on ultimate woe patterns\n\t_devks = get_ksinfo_num(dsin = _devfreq, y_stat = dev_y_stats)\n\tprint_gainschart_num(dsin = _devks, x = x, label = label, num = num, outfile = file_num_out['dev'], overall_stats = dev_overall_stats)\n\t\n\t#Print out WoE production code\n\twoe_var_name = woe_naming(x, postfix) \n\t\n\tglobal file_num_woe\n\tprint_woe_num(data = _devks, x = x, woe_var_name = woe_var_name, label = label, outfile = file_num_woe)\n\t\n\tglobal file_num_csv\n\tprint_ksinfo_num(data = _devks, x = x, label = label, num = num, outfile = file_num_csv['dev'], woe_var_name = woe_var_name) \n\n\t_rtmp_oot = rank_oot(dsin=df_oot, xrank=_devfreq, x = x)\n\t_ootfreq = get_frequency(_rtmp_oot, x, y, wgt)\n\t_ootks = get_ksinfo_num(dsin = _ootfreq, y_stat = oot_y_stats)\n\t\n\tprint_ksinfo_num(data = _ootks, x = x, label = label, num = num, outfile = file_num_csv['oot'], woe_var_name = woe_var_name)\n\tprint_gainschart_num(dsin = _ootks, x = x, label = label, num = num, outfile = file_num_out['oot'], overall_stats = oot_overall_stats)\n\t\n\t_devks = psi(_devks, _ootks)\n\treturn _devks[['bin', 'xmin','xmax','neutral_ind','woe', 'n', 'rate_y1', 'totrate_y1', 'psi_ptot', 'psi_br']]\n\t\ndef psi(ks_dev, ks_oot):\n\t\n\n\tks_dev['ptot'] = ks_dev['n']/ks_dev['n'].sum()\n\tks_oot['ptot_oot'] = ks_oot['n']/ks_oot['n'].sum()\n\tks_oot.rename(columns = {'ymean':'ymean_oot'}, inplace = True)\n\t\n\tks_dev = ks_dev.merge(ks_oot[['ptot_oot', 'ymean_oot']], left_on = 'bin', right_index = True, how = 'left')\n\t\n\tfor col in ['ptot', 'ptot_oot', 'ymean', 'ymean_oot']:\n\t\tks_dev[col] = ks_dev[col].fillna(0)\n\t\n\tks_dev['psi_ptot'] = (ks_dev['ptot'] - ks_dev['ptot_oot']) * ((ks_dev['ptot'] + eps)/(ks_dev['ptot_oot'] + eps)).apply(math.log)\n\tks_dev['psi_br'] = (ks_dev['ymean'] - ks_dev['ymean_oot']) * ((ks_dev['ymean'] + eps)/(ks_dev['ymean_oot'] + eps)).apply(math.log)\n\t\n\treturn ks_dev\n\ndef print_ksinfo_num(data, x, label, num, outfile, woe_var_name):\n\t\n\tprint('%s,%s,%s,%.2f,%.2f,%.2f,%s'%(num,x,woe_var_name,data.loc[data.index[-1],'maxks'],data.loc[data.index[-1],'tot_info'],data.loc[data.index[-1],'linearity'],label), file = outfile)\n\t\n\ndef print_woe_num(data, x, woe_var_name, label, outfile):\n\tprint('\\n\\n/* WOE recoding for %s */'%x, file = outfile)\n\tfor idx in data.index:\n\t\tif data.loc[idx, 'neutral_ind'] == 1:\n\t\t\tdata.loc[idx, 'woe'] = 0.0\n\t\tif idx == 0:\n\t\t\tif str(data.loc[idx, 'xmax']) == 'nan' and data.loc[idx, 'neutral_ind'] == 0:\n\t\t\t\tmis_woe = data.loc[idx, 'woe']\n\t\t\telse:\n\t\t\t\tmis_woe = 0.0\n\t\t\tprint('if %s = . then %s = %.6f;'%(x, woe_var_name, mis_woe), file = outfile)\n\t\t\t\n\t\t\t\n\t\t\tif str(data.loc[idx, 'xmax']) != 'nan':\n\t\t\t\tprint('else if ( -1e38 < %s <= %.6f) then %s = %.6f;'%(x,data.loc[idx, 'xmax'],woe_var_name,data.loc[idx, 'woe']), file = outfile)\n\t\t\n\t\telif idx == data.index[-1]:\n\t\t\t\n\t\t\tif str(data.loc[idx - 1, 'xmax']) == 'nan':\n\t\t\t\tprint('else if ( %s > -1e38 ) then %s = %.6f;'%(x, woe_var_name, data.loc[idx, 'woe']), file = outfile)\n\t\t\t\n\t\t\telse:\n\t\t\t\tprint('else if ( %s > %.6f ) then %s = %.6f;'%(x, data.loc[idx - 1, 'xmax'],woe_var_name,data.loc[idx, 'woe']), file = outfile)\n\t\t\t\tprint('else %s = 0.0;'%woe_var_name, file = outfile)\t\t\n\t\t\n\t\telse:\n\t\t\t\n\t\t\tif str(data.loc[idx - 1, 'xmax']) == 'nan':\n\t\t\t\tprint('else if ( -1e38 < %s <= %.6f ) then %s = %.6f;'%(x, data.loc[idx, 'xmax'],woe_var_name,data.loc[idx, 'woe']), file = outfile)\n\t\t\telse:\n\t\t\t\tprint('else if ( %.6f < %s <= %.6f ) then %s = %.6f;'%(data.loc[idx - 1, 'xmax'], x, data.loc[idx, 'xmax'], woe_var_name, data.loc[idx, 'woe']), file = outfile)\n\t\t\t\t\n\tif len(data) == 1:\n\t\tprint('else %s = 0.0;'%woe_var_name, file = outfile)\n\t\n\ndef woe_naming(varname, postfix):\n\t\n\trn = (hashlib.md5(varname.encode('utf8')).hexdigest().upper())[:4]\n\tif len(varname) > 25:\n\t\treturn 'w' + varname[:16] + '_' + varname[-3:] + '_' + rn + postfix\n\telse:\n\t\treturn 'w' + varname + postfix\n\n\ndef consistency_chk(dev_freq, oot_freq, x, fvalue, num_special_value_lst, output):\n\t\n\tvalid_obs = len(oot_freq[oot_freq['special_bin'] == 0])\n\t\n\t_t_ = dev_freq.copy()\n\t_t_.index.name = ''\n\t\n\t_tt_ = oot_freq[['bin', 'woe']]\n\t_tt_.index.name = ''\n\t\n\t#print(_t_)\n\t#print(_tt_)\n\t\n\tmerged = pd.merge(_t_,_tt_, on = 'bin', how = 'left')\n\t\n\t#print(merged)\n\t\n\tfor idx in merged.index:\n\t\tif math.isnan(merged.loc[idx, 'woe_y']) == False:\n\t\t\tmerged.loc[idx, 'rx_retain'] = merged.loc[idx, 'bin']\n\t\telif len(merged) == 1:\n\t\t\tmerged.loc[idx, 'rx_retain'] = merged.loc[idx, 'bin']\n\t\telse:\n\t\t\ttry:\n\t\t\t\tmerged.loc[idx, 'rx_retain'] = merged.loc[idx - 1, 'rx_retain']\n\t\t\texcept:\n\t\t\t\tmerged.loc[idx, 'rx_retain'] = merged.loc[idx + 1, 'bin']\n\n\t\n\t\n\tdev_freq = merged.groupby('rx_retain', as_index = False).agg({'bin':'min','xmin':'min','tot_y1':'min','tot_y0':'min','xmax':'max','xsum':'sum','n':'sum','y1sum':'sum','nvar':'sum','ry1sum':'sum','y0sum':'sum','ry0sum':'sum'})\n\t\n\tdev_freq['xmean'] = dev_freq['xsum']/dev_freq['n']\n\tdev_freq['ymean'] = dev_freq['y1sum']/dev_freq['n']\n\t\n\twoe_assign(dev_freq, num_special_value_lst = num_special_value_lst)\n\tdev_freq.drop('rx_retain', axis = 1, inplace = True)\n\t\n\t\n\t_final_chk = pd.merge(dev_freq,oot_freq[['bin','woe','nvar', 'n']], on = 'bin', how = 'left')\n\t\n\t_final_chk['neutral_ind'] = 0\n\t\n\t\n\tfor idx in _final_chk.index:\n\t\t\n\t\tif valid_obs == 1:\n\t\t\tif _final_chk.loc[idx,'xmin'] != _final_chk.loc[idx,'xmax'] and (_final_chk.loc[idx,'woe_x'] * _final_chk.loc[idx,'woe_y'] < 0):\n\t\t\t\t_final_chk.loc[idx,'neutral_ind'] = 1\n\t\t\n\t\t\n\t\tif _final_chk.loc[idx,'special_bin'] == 1 and (_final_chk.loc[idx,'woe_x'] * _final_chk.loc[idx,'woe_y'] < 0 or _final_chk.loc[idx,'nvar_x'] < fvalue or _final_chk.loc[idx,'nvar_y'] < fvalue):\n\t\t\t_final_chk.loc[idx,'neutral_ind'] = 1\n\t\n\tdev_freq = dev_freq.merge(_final_chk[['bin','neutral_ind']], on = 'bin', how = 'left')\n\tglobal woe_cap\n\tdev_freq['bin_mrg'] = 0\n\tfor idx in dev_freq.index:\n\t\tif idx == 0:\n\t\t\tdev_freq.loc[idx, 'bin_mrg'] = dev_freq.loc[idx, 'bin']\n\t\telse:\n\t\t\tif dev_freq.loc[idx - 1, 'special_bin'] == 0 and dev_freq.loc[idx, 'special_bin'] == 0 and abs(dev_freq.loc[idx, 'woe']) == woe_cap and dev_freq.loc[idx, 'woe'] == dev_freq.loc[idx - 1, 'woe']:\n\t\t\t\tdev_freq.loc[idx, 'bin_mrg'] = dev_freq.loc[idx - 1, 'bin_mrg']\n\t\t\telse:\n\t\t\t\tdev_freq.loc[idx, 'bin_mrg'] = dev_freq.loc[idx, 'bin']\n\t\n\tdev_freq = dev_freq.groupby('bin_mrg', as_index = False).agg({'bin':'min','xmin':'min','tot_y1':'min','tot_y0':'min','xmax':'max','xsum':'sum','n':'sum','y1sum':'sum','nvar':'sum','ry1sum':'sum','y0sum':'sum','ry0sum':'sum','neutral_ind':'max'})\n\tdev_freq['xmean'] = dev_freq['xsum']/dev_freq['n']\n\tdev_freq['ymean'] = dev_freq['y1sum']/dev_freq['n']\n\twoe_assign(dev_freq, num_special_value_lst = num_special_value_lst)\n\tdev_freq.drop(['bin_mrg','xsum'], axis = 1, inplace = True)\n\tprint('\\nConsistency Check -- %s'%x, file = output)\n\tprint('%-15s%-15s%-11s%-13s%-13s%-22s%-22s%-14s%-14s%-25s'%('bin', 'min', 'max', 'DEV WoE', 'OOT WoE', 'weighted # - DEV', 'weighted # - OOT', 'raw # - DEV', 'raw # - OOT', 'neutralize or not'), file = output)\n\t\n\tfor idx in _final_chk.index:\n\t\t\n\t\tif str(_final_chk.loc[idx,'xmax']) == 'nan':\n\t\t\txmax = '.'\n\t\telse:\n\t\t\trd = 0\n\t\t\tfor i in range(4,0,-1):\n\t\t\t\tif _final_chk.loc[idx,'xmax'] < 10 ** (7 - i):\n\t\t\t\t\trd = i\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\txmax = str(round(_final_chk.loc[idx,'xmax'], rd))\n\t\t\tif xmax[-2:] == '.0':\n\t\t\t\txmax = xmax[:-2]\n\t\t\n\t\tif str(_final_chk.loc[idx,'xmin']) == 'nan':\n\t\t\txmin = '.'\n\t\telse:\n\t\t\trd = 0\n\t\t\tfor i in range(4,0,-1):\n\t\t\t\tif _final_chk.loc[idx,'xmin'] < 10 ** (7 - i):\n\t\t\t\t\trd = i\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\txmin = str(round(_final_chk.loc[idx,'xmin'], rd))\n\t\t\tif xmin[-2:] == '.0':\n\t\t\t\txmin = xmin[:-2]\n\t\t\n\t\tprint('%-3s%15s%15s%15.4f%13.4f%22.0f%22.0f%17.0f%14.0f%20.0f'%(_final_chk.loc[idx,'bin'],xmin,xmax,_final_chk.loc[idx,'woe_x'],_final_chk.loc[idx,'woe_y'],_final_chk.loc[idx,'n_x'],_final_chk.loc[idx,'n_y'],_final_chk.loc[idx,'nvar_x'],_final_chk.loc[idx,'nvar_y'],_final_chk.loc[idx,'neutral_ind']), file = output)\n\t\t\n\treturn dev_freq\t\n\t\ndef auto_linear(freq_table, x, pre_dir, num_special_value_lst):\n\ttot = freq_table[['y1sum', 'y0sum']].sum()\n\t\n\t#Unique value bins are exempted from linearity check\n\tflag_check = freq_table[(freq_table['xmin'].notnull())&(freq_table['xmax'].notnull())]\n\t\n\t#Check if var x is a 0-1 flag\n\tflag_ind = 0\n\tif len(flag_check) == 2 and flag_check.loc[flag_check.index[0], 'xmin'] == 0 and flag_check.loc[flag_check.index[0], 'xmax'] == 0\\\n\t\t\t\t\t\t\t\t\t\t\t\t\tand flag_check.loc[flag_check.index[1], 'xmin'] == 1 and flag_check.loc[flag_check.index[1], 'xmax'] == 1:\n\t\tflag_ind = 1\n\t\n\twoe_assign(freq_table, num_special_value_lst)\n\n\t\n\tif flag_ind == 1:\n\t\tfor idx in freq_table.index:\n\t\t\tif freq_table.loc[idx, 'xmin'] == freq_table.loc[idx, 'xmax'] and freq_table.loc[idx, 'xmax'] == 0:\n\t\t\t\tfreq_table.loc[idx, 'special_bin'] = 0\n\t\t\tif freq_table.loc[idx, 'xmin'] == freq_table.loc[idx, 'xmax'] and freq_table.loc[idx, 'xmax'] == 1:\n\t\t\t\tfreq_table.loc[idx, 'special_bin'] = 0\n\t\n\t_tmp4corr = freq_table[freq_table['special_bin'] == 0]\n\t#If there are less than 1 non-unique value bins, then return directly\n\tif len(_tmp4corr) <= 1:\n\t\t\n\t\tfreq_table['xsum'] = freq_table['xmean'] * freq_table['n']\n\t\tfreq_table['tot_y1'] = tot['y1sum']\n\t\tfreq_table['tot_y0'] = tot['y0sum']\n\t\tfreq_table['valid_nobs'] = len(_tmp4corr)\n\t\tfreq_table['direction'] = 0\n\t\treturn freq_table\n\t\n\telse:\n\t\tif str(pre_dir) == 'nan':\n\t\t\tif freq_table['xmin'].corr(freq_table['woe']) >= 0:\n\t\t\t\tdir = 1\n\t\t\telse:\n\t\t\t\tdir = -1 \n\t\telse:\n\t\t\tdir = pre_dir\n\t\t\t\n\t\tfreq_table_woe = freq_table.copy()\n\t\tfreq_table_woe['bin_num'] = range(len(freq_table_woe))\n\t\tfreq_table_woe['xsum'] = freq_table_woe['xmean'] * freq_table_woe['n']\n\t\t\n\t\tlinearity_ind = 0\n\t\titer_step = 0\n\t\tglobal MAX_ITRN\n\t\t\n\t\twhile(linearity_ind == 0 and iter_step <= MAX_ITRN):\n\t\t\t\n\t\t\tfreq_table_woe = freq_table_woe.groupby('bin_num', as_index = False).agg({'bin':'min','xmin':'min','xmax':'max','xsum':'sum','n':'sum','y1sum':'sum','nvar':'sum','ry1sum':'sum','y0sum':'sum','ry0sum':'sum'})\n\t\t\tfreq_table_woe.drop('bin_num', axis = 1, inplace = True)\n\t\t\tfreq_table_woe.index = range(1, len(freq_table_woe) + 1)\n\t\t\twoe_assign(freq_table_woe, num_special_value_lst)\n\t\t\t\n\t\t\t\n\t\t\tfreq_table_woe['bin_num'] = np.nan\n\t\t\tfreq_table_woe['linearity'] = 0\t\t\t\n\t\t\tfor idx in freq_table_woe.index:\n\t\t\t\tif idx == 1:\n\t\t\t\t\tfreq_table_woe.loc[idx, 'bin_num'] = 1\n\t\t\t\t\tfreq_table_woe.loc[idx, 'linearity'] = 1\n\t\t\t\telse:\n\t\t\t\t\tif dir == 1:\n\t\t\t\t\t\tif freq_table_woe.loc[idx, 'woe'] >= freq_table_woe.loc[idx - 1, 'woe'] or freq_table_woe.loc[idx, 'special_bin'] == 1 or freq_table_woe.loc[idx - 1, 'special_bin'] == 1:\n\t\t\t\t\t\t\tfreq_table_woe.loc[idx, 'bin_num'] = freq_table_woe.loc[idx - 1, 'bin_num'] + 1\n\t\t\t\t\t\t\tfreq_table_woe.loc[idx, 'linearity'] = 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfreq_table_woe.loc[idx, 'bin_num'] = freq_table_woe.loc[idx - 1, 'bin_num']\n\t\t\t\t\telse:\n\t\t\t\t\t\tif freq_table_woe.loc[idx, 'woe'] <= freq_table_woe.loc[idx - 1, 'woe'] or freq_table_woe.loc[idx, 'special_bin'] == 1 or freq_table_woe.loc[idx - 1, 'special_bin'] == 1:\n\t\t\t\t\t\t\tfreq_table_woe.loc[idx, 'bin_num'] = freq_table_woe.loc[idx - 1, 'bin_num'] + 1\n\t\t\t\t\t\t\tfreq_table_woe.loc[idx, 'linearity'] = 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfreq_table_woe.loc[idx, 'bin_num'] = freq_table_woe.loc[idx - 1, 'bin_num']\n\t\t\t\n\t\t\tif freq_table_woe['linearity'].sum() == len(freq_table_woe):\n\t\t\t\tlinearity_ind = 1\n\t\t\titer_step += 1\n\t\t\n\t\t\n\t\t\n\t\t#Return final linearized woe table\n\t\tfreq_table_woe['xmean'] = freq_table_woe['xsum']/freq_table_woe['n']\n\t\tfreq_table_woe['ymean'] = freq_table_woe['y1sum']/freq_table_woe['n']\n\t\tfreq_table_woe['direction'] = dir\n\t\tfreq_table_woe['tot_y1'] = tot['y1sum']\n\t\tfreq_table_woe['tot_y0'] = tot['y0sum']\n\t\tfreq_table_woe['valid_nobs'] = len(_tmp4corr)\n\t\t\n\t\tfreq_table_woe.drop(['bin_num','linearity'], axis = 1)\n\t\treturn freq_table_woe\t\n\n\ndef woe_assign(freq_table, num_special_value_lst):\n\t\n\tglobal woe_cap\n\tglobal eps\n\t\n\ttot = freq_table[['y1sum', 'y0sum']].sum()\n\t\n\tfreq_table['special_bin'] = 0\n\tfor idx in freq_table.index:\n\t\tif math.isnan(freq_table.loc[idx, 'xmin']) and math.isnan(freq_table.loc[idx, 'xmax']):\n\t\t\tfreq_table.loc[idx, 'special_bin'] = 1\n\t\t\n\t\tif num_special_value_lst != None:\n\t\t\tif (freq_table.loc[idx, 'xmin'] == freq_table.loc[idx, 'xmax'] or str(freq_table.loc[idx, 'xmin']) == str(freq_table.loc[idx, 'xmax'])) and freq_table.loc[idx, 'xmin'] in num_special_value_lst:\n\t\t\t\tfreq_table.loc[idx, 'special_bin'] = 1\n\t\t\n\t\tif freq_table.loc[idx, 'y1sum'] == 0:\n\t\t\tfreq_table.loc[idx, 'woe'] = -woe_cap\n\t\telif freq_table.loc[idx, 'y0sum'] == 0:\n\t\t\tfreq_table.loc[idx, 'woe'] = woe_cap\n\t\telse:\n\t\t\tfreq_table.loc[idx, 'woe'] = math.log((freq_table.loc[idx, 'y1sum']/(tot['y1sum'] + eps))/(freq_table.loc[idx, 'y0sum']/(tot['y0sum'] + eps)))\n\t\t\n\t\tif freq_table.loc[idx, 'woe'] > woe_cap:\n\t\t\tfreq_table.loc[idx, 'woe'] = woe_cap\n\t\tif freq_table.loc[idx, 'woe'] < -woe_cap:\n\t\t\tfreq_table.loc[idx, 'woe'] = -woe_cap\n\t\n\n\ndef rank_oot(dsin, xrank, x):\n\trank_df = xrank[xrank['xmin'].notnull()][['bin','xmin']]\n\trank_val = rank_df['xmin'].copy()\n\tglobal mInf\n\trank_val.loc[rank_val.index[0]] = mInf\n\trank_dict = dict(zip(rank_df['bin'], rank_val))\n\t#Using apply() to carry out vectorized calculatioin\n\tdsin[x + '_bin'] = dsin[x].apply(rank, rank_dict = rank_dict)\n\treturn dsin\n\ndef rank(var, rank_dict):\n\t\n\tif str(var) == 'nan':\n\t\treturn 0\n\telse:\n\t\tbin_tmp = None\n\t\tfor idx in sorted(rank_dict.keys()):\n\t\t\tif var >= rank_dict[idx]:\n\t\t\t\tbin_tmp = int(idx)\n\t\t\telse:\n\t\t\t\tbreak\n\t\treturn bin_tmp\t\n\t\ndef get_ksinfo_num(dsin, y_stat):\n\tglobal eps\n\tglobal woe_cap\n\tdsout = dsin.copy()\n\tdsout['cum_y1'] = dsout['y1sum'].cumsum()\n\tdsout['cum_y0'] = dsout['y0sum'].cumsum()\n\tdsout['cum_y'] = dsout['nvar'].cumsum()\n\tdsout['cum_ry1'] = dsout['ry1sum'].cumsum()\n\tdsout['cum_ry0'] = dsout['ry0sum'].cumsum()\n\t\n\tdsout['tot_y1'] = y_stat.loc[1,'weighted']\n\tdsout['tot_y0'] = y_stat.loc[0,'weighted']\n\tdsout['tot'] = dsout['tot_y1'] + dsout['tot_y0'] \n\tdsout['tot_ry1']= y_stat.loc[1,'raw']\n\tdsout['tot_ry0']= y_stat.loc[0,'raw']\n\tdsout['tot_y'] = dsout['tot_ry1'] + dsout['tot_ry0']\n\t\n\tdsout['p_cum_y1'] = 100 * dsout['cum_y1']/(dsout['tot_y1'] + eps)\n\tdsout['p_cum_y0'] = 100 * dsout['cum_y0']/(dsout['tot_y0'] + eps)\n\tdsout['ks'] = (dsout['p_cum_y1'] - dsout['p_cum_y0']).apply(abs)\n\tdsout['maxks'] = dsout['ks'].max()\n\tdsout['p_y1'] = dsout['y1sum']/dsout['tot_y1']\n\tdsout['p_y0'] = dsout['y0sum']/dsout['tot_y0']\n\tdsout['woe'] = ((dsout['p_y1']+ eps)/(dsout['p_y0']+ eps)).apply(math.log)\n\tdsout['woe'] = dsout['woe'].apply(lambda t: min(t,woe_cap))\n\tdsout['woe'] = dsout['woe'].apply(lambda t: max(t,-woe_cap))\n\t\t\n\tdsout['info'] = (dsout['p_y1'] - dsout['p_y0']) * dsout['woe']\n\tdsout['tot_info'] = dsout['info'].cumsum()\n\t\n\tdsout['ratio'] = 100 * dsout['p_y1']/(dsout['p_y0'] + eps)\n\tdsout['rate_y1'] = 100 * dsout['y1sum']/(dsout['y1sum'] + dsout['y0sum'] + eps)\n\tdsout['totrate_y1'] = 100 * dsout['tot_y1']/(dsout['tot_y1'] + dsout['tot_y0'] + eps) \n\t\n\tpre_woe = 0\n\tpos_trend = 0\n\tneg_trend = 0\n\tfor idx in dsout.index:\n\t\tif dsout.loc[idx,'woe'] >= pre_woe:\n\t\t\tpos_trend += 1\n\t\telse:\n\t\t\tneg_trend += 1\n\t\tpre_woe = dsout.loc[idx,'woe']\n\t\n\tdsout['linearity'] = 0\n\tif len(dsout) > 1:\n\t\tdsout.loc[dsout.index[-1], 'linearity'] = abs(pos_trend - neg_trend) * 100.0/(len(dsout) - 1 + eps)\n\treturn dsout \n\ndef print_gainschart_num(dsin, x, label, num, outfile, overall_stats):\n\t\n\tundln1 = '-' * 155\n\tundln2 = '=' * 155\n\t\n\tprint('\\n', file = outfile, end = '')\n\tprint('Variable # = {0} Variable = {1} Label = {2} '.format(num, x, label), file = outfile, end = '')\n\tprint(\"# obs = {0} # valid = {1} % valid = {2:.2f}%\".format(overall_stats['TOTAL_RECORD'], overall_stats['TOTAL_RECORD_NONMISSING'], overall_stats['VALIDITY']), file = outfile)\n\tprint(undln1, file = outfile)\n\tprint(r' # # # # %cum # %cum odds % histogram of', file = outfile)\n\tprint(r' # xmax raw total raw (y=0) wgt total (y=1) (y=1) (y=0) (y=0) ratio (y=1) woe ks iv woe (normalized)', file = outfile)\n\tprint(undln1, file = outfile)\n\t\n\t\n\tdsin['lenrat'] = (((5*dsin['woe']).apply(round, 1)).apply(abs) - 1).apply(lambda t: min(t, 10))\n\t\n\tdsin['lenstar'] = ''\n\t\n\tfor idx in dsin.index:\n\t\tif dsin.loc[idx, 'lenrat'] < 0:\n\t\t\tdsin.loc[idx, 'lenstar'] = ' '*10 + '+' + ' '*10\n\t\t\t\n\t\tif dsin.loc[idx, 'woe'] > 0:\n\t\t\tdsin.loc[idx, 'lenstar'] = ' '*10 +'+' + ('*' * dsin.loc[idx, 'lenrat']).ljust(10)\n\t\t\t\n\t\telse:\n\t\t\tdsin.loc[idx, 'lenstar'] = ('*' * dsin.loc[idx, 'lenrat']).rjust(10) + '+' + ' '*10\n\t\t\n\t\try0sum = int(dsin.loc[idx, 'ry0sum'])\n\t\ty1sum = int(dsin.loc[idx, 'y1sum'])\n\t\ty0sum = int(dsin.loc[idx, 'y0sum'])\n\t\t\n\t\t\n\t\tif str(dsin.loc[idx,'xmax']) == 'nan':\n\t\t\txmax = '.'\n\t\telse:\n\t\t\trd = 0\n\t\t\tfor i in range(4,0,-1):\n\t\t\t\tif dsin.loc[idx,'xmax'] < 10 ** (7 - i):\n\t\t\t\t\trd = i\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\txmax = str(round(dsin.loc[idx,'xmax'], rd))\n\t\t\tif xmax[-2:] == '.0':\n\t\t\t\txmax = xmax[:-2]\n\t\t\n\t\todds_ratio = str(round(dsin.loc[idx, 'ratio'],0))\n\t\tif dsin.loc[idx, 'ratio'] >= 1e6:\n\t\t\todds_ratio = '+Inf'\n\t\tif odds_ratio[-2:] == '.0':\n\t\t\todds_ratio = odds_ratio[:-2]\n\t\t\n\t\tprint(\"%-6.0f%8s%11s%10s%12s%10s%10.2f%11s%9.2f%8s%8.2f%11.6f%7.3f%9.3f%24s\"%(idx, xmax, '{0:,}'.format(dsin.loc[idx, 'nvar']),'{0:,}'.format(ry0sum),\\\n\t\t '{0:,}'.format(dsin.loc[idx, 'n']),'{0:,}'.format(y1sum),dsin.loc[idx, 'p_cum_y1'], '{0:,}'.format(y0sum),\\\n\t\t dsin.loc[idx, 'p_cum_y0'], odds_ratio, dsin.loc[idx, 'rate_y1'], dsin.loc[idx, 'woe'],dsin.loc[idx, 'ks'],\\\n\t\t dsin.loc[idx, 'info'], dsin.loc[idx, 'lenstar']), file = outfile)\n\t\t\n\t\n\tprint(undln2, file = outfile)\n\tprint('%-16s%9s%10s%12s%10s%21s%25.2f Max KS = %-7.3fIV = %-8.3fLinearity = %-5.2f%%'%('Total','{0:,}'.format(dsin.loc[dsin.index[-1],'tot_y']),'{0:,}'.format(dsin.loc[dsin.index[-1],'tot_ry0']),\\\n\t '{0:,}'.format(dsin.loc[dsin.index[-1],'tot']), '{0:,}'.format(dsin.loc[dsin.index[-1],'tot_y1']), '{0:,}'.format(dsin.loc[dsin.index[-1],'tot_y0']),\\\n\t dsin.loc[dsin.index[-1],'totrate_y1'], dsin.loc[dsin.index[-1],'maxks'], dsin.loc[dsin.index[-1],'tot_info'], dsin.loc[dsin.index[-1],'linearity']), file = outfile)\n\tprint(undln2, file = outfile)\n\tprint('\\n', file = outfile)\n\ndef get_frequency(dsin, x, y, wgt):\n\n\t_tmpvar = dsin.groupby(x + '_bin')[y].agg(['count','sum','mean'])\n\t_tmpvar.rename(columns = {'count':'nvar', 'sum':'ry1sum', 'mean':'rymean'}, inplace = True)\n\tdsin['x_wgt'] = dsin[x] * dsin[wgt]\n\tdsin['y_wgt'] = dsin[y] * dsin[wgt]\n\t\n\t\n\tx_stats_wgt = dsin.groupby(x + '_bin')[x].agg(['min','max'])\n\tx_stats_wgt.rename(columns = {'min':'xmin','max':'xmax'}, inplace = True)\n\tx_stats_wgt2 = DataFrame(dsin.groupby(x + '_bin')['x_wgt'].sum())\n\tx_stats_wgt2.rename(columns = {'x_wgt':'xsum'}, inplace = True)\n\t\n\t\n\ty_stats_wgt = dsin.groupby(x + '_bin').agg({wgt:'sum','y_wgt':'sum'})\n\ty_stats_wgt.rename(columns = {wgt : 'n', 'y_wgt':'y1sum'}, inplace = True)\n\t\n\t_tmpvar = _tmpvar.merge(x_stats_wgt, left_index = True, right_index = True, how = 'left')\n\t_tmpvar = _tmpvar.merge(x_stats_wgt2, left_index = True, right_index = True, how = 'left')\n\t_tmpvar = _tmpvar.merge(y_stats_wgt, left_index = True, right_index = True, how = 'left')\n\t\n\t\n\t_tmpvar['y0sum'] = _tmpvar['n'] - _tmpvar['y1sum']\n\t_tmpvar['ry0sum'] = _tmpvar['nvar'] - _tmpvar['ry1sum']\n\t_tmpvar['ymean'] = _tmpvar['y1sum']/_tmpvar['n']\n\t_tmpvar['xmean'] = _tmpvar['xsum']/_tmpvar['n']\n\t\n\t_tmpvar.drop('xsum', axis = 1, inplace = True)\n\t_tmpvar.index.name = 'bin'\n\t\n\treturn _tmpvar","sub_path":"LR/libraries/WoE.py","file_name":"WoE.py","file_ext":"py","file_size_in_byte":46362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"409137453","text":"import enemy\nfrom pygame import image\n\n\ndef check(x, y, target):\n\n if target.x <= x <= target.x+target.width:\n if target.y <= y <= target.y+target.height:\n return 1\n else:\n return 0\n else:\n return 0\n\n\nclass bullet(object):\n def __init__(self, direction, x, y, speed):\n self.direction = direction\n self.x = x\n self.y = y\n self.speed = speed\n self.url = image.load('img/bullet/bullet1.png')\n\n def boom(self, target):\n if check(self.x, self.y, target):\n print('Booooooooom!!!!!!!!!!!!!!!!')\n self.direction = 'o'\n return 1\n\n def move(self):\n if self.direction != 'o':\n if self.direction == 'r':\n self.x += self.speed\n if self.direction == 'l':\n self.x -= self.speed\n if self.direction == 'u':\n self.y -= self.speed\n if self.direction == 'd':\n self.y += self.speed\n if self.direction == 'ru':\n self.x += self.speed*0.707\n self.y -= self.speed*0.707\n if self.direction == 'ld':\n self.x -= self.speed*0.707\n self.y += self.speed*0.707\n if self.direction == 'lu':\n self.y -= self.speed*0.707\n self.x -= self.speed*0.707\n if self.direction == 'rd':\n self.y += self.speed*0.707\n self.x += self.speed*0.707\n\n\nif __name__ == '__main__':\n b1 = bullet('r', 300, 300, 0.5)\n e1 = enemy.enemy(0, 0, 100, 100)\n b1.boom(e1)\n","sub_path":"bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"638523997","text":"import json\nfrom functools import reduce\n\n\ndef list_of_objects(data, old_key_path_list, new_key):\n \"\"\" \n Handle case of list of objects :: [{}] \n \"\"\"\n existing_list = data.get(new_key)\n \n if existing_list is None:\n existing_list = data[new_key] = []\n \n for obj in existing_list:\n obj_iter = obj\n \n for old_key in old_key_path_list:\n if old_key not in obj_iter:\n return obj\n obj_iter = obj_iter[old_key]\n \n existing_list.append({})\n return existing_list[-1]\n\n\ndef get_nested(dictionary, keys):\n \"\"\" \n Get value of nested dict levels. \n \"\"\"\n return reduce(\n lambda d, key: d.get(key, \"\") if isinstance(d, dict) else \"\", \n keys, dictionary)\n\n\ndef set_nested(data, path):\n \"\"\" \n Creates nested paths if necessary; returns last value in the sequence. \n \"\"\"\n iter = data\n for v in path:\n if iter.get(v) is None:\n iter[v] = {}\n iter = iter[v]\n \n return iter\n\n\ndef as_list(payload):\n \"\"\" \n Returns payload as list.\n \"\"\"\n return payload if isinstance(payload, list) else [payload]\n\n\nclass DataMapper:\n \"\"\"\n Class for mapping JSON strings from one format to another.\n \n Mappings are provided in the form of a JSON string, where keys represent\n identifiers in the \"source\" document and values represent mappings to\n \"destination\" document.\n \n Values should be defined according to the following rules:\n - \"&\" tells the importer to move the value to another key:value.\n - \":\" tells the importer to just nest keys.\n - \"[]\" tells the importer to create an list.\n - \"{}\" tells the importer to create an object.\n - \"{}.somekey\" tells the importer to create an object with somekey key.\n - \"[{}]\" tells the importer to create a list of objects.\n \"\"\"\n \n def __init__(self, mapping_json_str):\n \"\"\"\n :in mapping_json_str string in JSON format defining object mappings\n \"\"\"\n self.mapping_dict, self.ignored_parts = \\\n self._validate_mapping_json_str(mapping_json_str)\n\n def _validate_mapping_json_str(self, mapping_json_str):\n \"\"\"\n :in mapping_json_str string in JSON format defining object mappings\n :out tuple consisting of a dictionary and the value for 'ignored_parts'.\n Will raise an exception in case mapping dictionary contains values which\n are not strings.\n \"\"\"\n if isinstance(mapping_json_str, str):\n mapping_dict = json.loads(mapping_json_str)\n else:\n mapping_dict = mapping_json_str\n\n ignored_parts = mapping_dict.pop('ignored_parts', {})\n if not all(isinstance(v, str) for v in mapping_dict.values()):\n raise Exception(\"Input dictionary values should be strings.\")\n return mapping_dict, ignored_parts\n \n def form_doc(self, from_json_str):\n \"\"\"\n :in from_json_str document (as JSON string) which should be mapped according\n to self.mapping_dict rules.\n :out dictionary constructed by applying self.mapping_dict mappings to the\n from_json_str parameter, along with a key-value mapping (\"ignored_parts\": \n self.ignored_parts)\n \"\"\"\n return self.form_doc_dict(json.loads(from_json_str))\n \n def form_doc_dict(self, from_dict):\n \"\"\"\n :in from_dict document (as dictionary) which should be mapped according\n to self.mapping_dict rules.\n :out dictionary constructed by applying self.mapping_dict mappings to the\n from_dict parameter, along with a key-value mapping (\"ignored_parts\": \n self.ignored_parts)\n \"\"\"\n \n data = {\n \"ignored_parts\": self.ignored_parts\n }\n\n for k, v in self.mapping_dict.items():\n parts = v.split('&')\n payload_path = k.split(':')\n payload = get_nested(from_dict, payload_path)\n\n if isinstance(payload, str):\n payload = payload.strip()\n\n for part in parts:\n new_path = part.split(':')\n new_doc = set_nested(data, new_path[:-1]) \n last = new_path.pop()\n\n if last.endswith('[{}]'):\n for item in as_list(payload):\n new_doc = list_of_objects(data, payload_path, last[:-4])\n new_doc[payload_path[0]] = item\n elif last.startswith(\"{}\"):\n new_doc = set_nested(data, new_path[:-1])\n key = payload_path[-1] if len(last) == 2 else last[3:]\n new_path = [key] if len(payload_path) == 0 else payload_path\n new_doc[new_path[-1]] = { key: payload }\n elif last.endswith('[]'):\n last = last[:-2]\n existing_list = new_doc.get(last)\n if existing_list is None:\n existing_list = new_doc[last] = []\n existing_list.extend(as_list(payload))\n elif last.endswith(''):\n last = last[:-5]\n new_doc[last] = int(payload)\n elif last.endswith(''):\n last = last[:-5]\n new_doc[last] = str(payload)\n else:\n new_doc[last] = payload\n\n return data\n","sub_path":"workflow/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":5444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"596211023","text":"import re\nimport os\nimport sys\nimport random\n\nfrom check_modules import *\n\n\nN = 10\n\nABS_MAX = 2147483647\n\n\ndef remove_dublicates(lst):\n n_lst = list()\n for x in lst:\n if x not in n_lst:\n n_lst.append(x)\n return n_lst\n\n\ndef main(level, fatality=False):\n if level == 0:\n # Числа разные, ищем и удаляем случайные сущ числа\n array = list(x for x in range(1, 8))\n random.shuffle(array)\n check_value_1 = random.choice(array)\n check_value_2 = random.choice(list(set(array) - {check_value_1}))\n value_for_delete = random.choice(array)\n elif level == 1:\n if fatality:\n neo = random.randint(-ABS_MAX, ABS_MAX)\n array = list(neo for _ in range(1, 6))\n is_in_array_3 = fatality % 2\n else:\n array = list(random.randint(-ABS_MAX, ABS_MAX) for _ in range(1, 6))\n is_in_array_3 = random.choice([True, False])\n doubled = random.choice(array)\n array.insert(random.randint(0, 7), doubled)\n array.insert(random.randint(0, 7), doubled)\n is_in_array_1 = random.choice([True, False])\n is_in_array_2 = random.choice([True, False])\n check_value_1 = random.choice(array) if is_in_array_1 else random.randint(-ABS_MAX, ABS_MAX)\n check_value_2 = random.choice(array) if is_in_array_2 else random.randint(-ABS_MAX, ABS_MAX)\n value_for_delete = random.choice(array) if is_in_array_3 else random.randint(-ABS_MAX, ABS_MAX)\n else:\n raise NotImplementedError\n\n args = '{} {} {} {} {} {} {} {} {} {}'.format(\n *array, check_value_1, check_value_2, value_for_delete)\n print('Проверяемые входные данные: {}'.format(args))\n\n cmd = 'echo \"{}\" | ./a.out'.format(args)\n output = os.popen(cmd).read().strip()\n print('Результат от программы:\\n{}'.format(output))\n\n outputs = re.split(r'\\n(?:\\s*\\n)+', output)\n n = len(outputs)\n assert n == 9, ('Ожидалось 9 результатов заданий от программы, '\n 'получили {}'.format(n))\n\n print('\\nПроверка создания дерева')\n check_tree_by_array(outputs[0], array[:4])\n\n print('Проверка добавления элементов в дерево')\n check_tree_by_array(outputs[1], array)\n\n print('Первая проверка элемента в ноде')\n check_node_elements(outputs[2], array, check_value_1)\n \n print('Вторая проверка элемента в ноде')\n check_node_elements(outputs[3], array, check_value_2)\n\n array = remove_dublicates(array)\n\n print('Проверка удаления элемента в дереве')\n if value_for_delete in array:\n array.remove(value_for_delete)\n check_correct_delete_node(outputs[4], array)\n\n array = list(int(x) for x in outputs[4].split() if x.strip() not in ('-', '_'))\n\n print('Проверка левого поворота')\n check_with_left_rotate(outputs[5], array)\n\n print('Проверка правого поворота')\n check_with_right_rotate(outputs[6], array)\n\n print('Проверка количества')\n check_node_count(outputs[7], array)\n\n print('Проверка пустого дерева')\n check_empty_list(outputs[8])\n\n print('Ok!\\n')\n\n\nlevel = int(sys.argv[1])\n\nfor i in range(N):\n if i >= N-2 and level == 1:\n main(level, i)\n continue\n \n main(level)\n \n","sub_path":"pr5/tests/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"501089000","text":"#!/usr/bin/env python3\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import IsolationForest\nrng = np.random.RandomState(42)\n\n\ndata = pd.read_csv(\"mouse-synthetic-data.txt\", sep=\" \", header=None)\ndata = np.array(data)\ntr_data = data[:-10]\nxs = data[:, 0]\nys = data[:, 1]\n\nclf = IsolationForest(max_samples=100, random_state=rng, contamination=10/len(data))\nclf.fit(data)\npreds = clf.predict(data)\n\ncs = [(\"b\" if p == 1 else \"r\") for p in preds]\n\nprint(preds)\n\nplt.scatter(xs, ys, c=cs)\nplt.show()\n","sub_path":"tp_part3.py","file_name":"tp_part3.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"498128678","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2018 João Pedro Rodrigues\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nUnit Tests for `pdb_intersect`.\n\"\"\"\n\nimport os\nimport sys\nimport unittest\n\nfrom config import data_dir\nfrom utils import OutputCapture\n\n\nclass TestTool(unittest.TestCase):\n \"\"\"\n Generic class for testing tools.\n \"\"\"\n\n def setUp(self):\n # Dynamically import the module\n name = 'pdbtools.pdb_intersect'\n self.module = __import__(name, fromlist=[''])\n\n def exec_module(self):\n \"\"\"\n Execs module.\n \"\"\"\n\n with OutputCapture() as output:\n try:\n self.module.main()\n except SystemExit as e:\n self.retcode = e.code\n\n self.stdout = output.stdout\n self.stderr = output.stderr\n\n return\n\n def test_default(self):\n \"\"\"$ pdb_intersect data/dummy.pdb data/dummy.pdb\"\"\"\n\n # Simulate input\n sys.argv = ['', os.path.join(data_dir, 'dummy.pdb'),\n os.path.join(data_dir, 'dummy.pdb')]\n\n # Execute the script\n self.exec_module()\n\n # Validate results\n self.assertEqual(self.retcode, 0) # ensure the program exited OK.\n self.assertEqual(len(self.stdout), 188) # no lines deleted. Same file.\n self.assertEqual(len(self.stderr), 0) # no errors\n\n atom_names = [l[12:16] for l in self.stdout]\n\n # Test content\n atoms_list = [' N ', ' H ', ' H2 ', ' H3 ', ' CA ', ' HA ', ' CB ',\n ' HB2', ' HB3', ' CG ', ' HG2', ' HG3', ' CD ', ' HD2',\n ' HD3', ' NE ', ' HE ', ' CZ ', ' NH1', 'HH11', 'HH12',\n ' NH2', 'HH21', 'HH22', ' C ', ' O ', ' N ', ' H ',\n ' CA ', ' HA ', ' CB ', ' HB2', ' HB3', ' CG ', ' HG2',\n ' HG3', ' CD ', ' OE1', ' OE2', ' C ', ' O ', ' N ',\n ' H ', ' CA ', ' HA ', ' CB ', ' HB1', ' HB2', ' HB3',\n ' C ', ' O ', ' ', ' N ', ' H ', ' H2 ', ' H3 ',\n ' CA ', ' CA ', ' HA ', ' CB ', ' HB2', ' HB3', ' CG ',\n ' OD1', ' ND2', 'HD21', 'HD22', ' C ', ' O ', ' N ',\n ' H ', ' CA ', ' HA ', ' CB ', ' HB2', ' HB3', ' CG ',\n ' HG2', ' HG3', ' CD ', ' HD2', ' HD3', ' NE ', ' HE ',\n ' CZ ', ' NH1', 'HH11', 'HH12', ' NH2', 'HH21', 'HH22',\n ' C ', ' O ', ' N ', ' H ', ' CA ', ' HA ', ' CB ',\n ' HB2', ' HB3', ' CG ', ' HG2', ' HG3', ' CD ', ' OE1',\n ' OE2', ' C ', ' O ', ' ', ' N ', ' H ', ' H2 ',\n ' H3 ', ' CA ', ' HA ', ' CB ', ' HB2', ' HB3', ' CG ',\n ' HG2', ' HG3', ' CD ', ' HD2', ' HD3', ' NE ', ' HE ',\n ' CZ ', ' NH1', 'HH11', 'HH12', ' NH2', 'HH21', 'HH22',\n ' C ', ' O ', ' N ', ' H ', ' CA ', ' HA ', ' CB ',\n ' HB2', ' HB3', ' CG ', ' HG2', ' HG3', ' CD ', ' OE1',\n ' OE2', ' C ', ' O ', ' N ', ' CA ', ' C ', ' O ',\n ' CB ', ' CG ', ' SD ', ' CE ', ' ', ' P ', ' OP1',\n ' OP2', \" O5'\", \" C5'\", \" C4'\", \" O4'\", \" C3'\", \" O3'\",\n \" C2'\", \" C1'\", ' N1 ', ' C2 ', ' O2 ', ' N3 ', ' C4 ',\n ' O4 ', ' C5 ', ' C7 ', ' C6 ', 'CA ', ' O ', ' O ',\n ' O ', ' O ', ' O ', ' O ', ' O ', ' O ']\n\n self.assertEqual(atoms_list, atom_names)\n\n def test_file_not_found(self):\n \"\"\"$ pdb_intersect not_existing.pdb\"\"\"\n\n # Error (file not found)\n afile = os.path.join(data_dir, 'not_existing.pdb')\n sys.argv = ['', afile]\n\n # Execute the script\n self.exec_module()\n\n self.assertEqual(self.retcode, 1) # exit code is 1 (error)\n self.assertEqual(len(self.stdout), 0) # nothing written to stdout\n self.assertEqual(self.stderr[0][:22],\n \"ERROR!! File not found\") # proper error message\n\n @unittest.skipIf(os.getenv('SKIP_TTY_TESTS'), 'skip on GHA - no TTY')\n def test_helptext(self):\n \"\"\"$ pdb_intersect\"\"\"\n\n sys.argv = ['']\n\n # Execute the script\n self.exec_module()\n\n self.assertEqual(self.retcode, 1) # ensure the program exited gracefully.\n self.assertEqual(len(self.stdout), 0) # no output\n self.assertEqual(self.stderr, self.module.__doc__.split(\"\\n\")[:-1])\n\n\nif __name__ == '__main__':\n from config import test_dir\n\n mpath = os.path.abspath(os.path.join(test_dir, '..'))\n sys.path.insert(0, mpath) # so we load dev files before any installation\n\n unittest.main()\n","sub_path":"tests/test_pdb_intersect.py","file_name":"test_pdb_intersect.py","file_ext":"py","file_size_in_byte":5290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"472511181","text":"from oslo_serialization import jsonutils\nfrom oslo_log import log\n\nfrom craton.api import v1\nfrom craton.api.v1 import base\nfrom craton.api.v1.resources import utils\nfrom craton import db as dbapi\nfrom craton import util\n\n\nLOG = log.getLogger(__name__)\n\n\nclass Hosts(base.Resource):\n\n @base.pagination_context\n def get(self, context, request_args, pagination_params):\n \"\"\"Get all hosts for region, with optional filtering.\"\"\"\n details = request_args.get(\"details\")\n hosts_obj, link_params = dbapi.hosts_get_all(\n context, request_args, pagination_params,\n )\n if details:\n hosts_obj = [utils.get_resource_with_vars(request_args, h)\n for h in hosts_obj]\n\n links = base.links_from(link_params)\n response_body = jsonutils.to_primitive(\n {'hosts': hosts_obj, 'links': links}\n )\n\n for host in response_body[\"hosts\"]:\n utils.add_up_link(context, host)\n\n return response_body, 200, None\n\n def post(self, context, request_data):\n \"\"\"Create a new host.\"\"\"\n json = util.copy_project_id_into_json(context, request_data)\n host_obj = dbapi.hosts_create(context, json)\n host = jsonutils.to_primitive(host_obj)\n if 'variables' in json:\n host[\"variables\"] = jsonutils.to_primitive(host_obj.variables)\n else:\n host[\"variables\"] = {}\n\n utils.add_up_link(context, host)\n\n location = v1.api.url_for(\n HostById, id=host_obj.id, _external=True\n )\n headers = {'Location': location}\n\n return host, 201, headers\n\n\nclass HostById(base.Resource):\n\n def get(self, context, id, request_args):\n \"\"\"Get host by given id\"\"\"\n host_obj = dbapi.hosts_get_by_id(context, id)\n host = utils.get_resource_with_vars(request_args, host_obj)\n\n utils.add_up_link(context, host)\n\n return host, 200, None\n\n def put(self, context, id, request_data):\n \"\"\"Update existing host data, or create if it does not exist.\"\"\"\n host_obj = dbapi.hosts_update(context, id, request_data)\n\n host = jsonutils.to_primitive(host_obj)\n\n utils.add_up_link(context, host)\n\n return host, 200, None\n\n def delete(self, context, id):\n \"\"\"Delete existing host.\"\"\"\n dbapi.hosts_delete(context, id)\n return None, 204, None\n\n\nclass HostsLabels(base.Resource):\n\n def get(self, context, id):\n \"\"\"Get labels for given host device.\"\"\"\n host_obj = dbapi.hosts_get_by_id(context, id)\n response = {\"labels\": list(host_obj.labels)}\n return response, 200, None\n\n def put(self, context, id, request_data):\n \"\"\"\n Update existing device label entirely, or add if it does\n not exist.\n \"\"\"\n resp = dbapi.hosts_labels_update(context, id, request_data)\n response = {\"labels\": list(resp.labels)}\n return response, 200, None\n\n def delete(self, context, id, request_data):\n \"\"\"Delete device label entirely.\"\"\"\n dbapi.hosts_labels_delete(context, id, request_data)\n return None, 204, None\n","sub_path":"craton/api/v1/resources/inventory/hosts.py","file_name":"hosts.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"388811598","text":"from __future__ import absolute_import\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError as DjangoValidationError\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.validators import RegexValidator, ip_address_validators\nfrom django.forms import FilePathField as DjangoFilePathField\nfrom django.forms import ImageField as DjangoImageField\nfrom django.utils import six, timezone\nfrom django.utils.dateparse import parse_date, parse_datetime, parse_time\nfrom django.utils.encoding import is_protected_type, smart_text\nfrom django.utils.functional import cached_property\nfrom django.utils.ipv6 import clean_ipv6_address\n\nfrom rest_framework import permissions\nfrom rest_framework.serializers import ModelSerializer, ValidationError\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom push_notifications.models import APNSDevice, GCMDevice\nfrom push_notifications.fields import hex_re\n\nfrom django.utils.translation import ugettext_lazy as _\n\nimport re\n\n\nclass empty:\n \"\"\"\n This class is used to represent no data being provided for a given input\n or output value.\n It is required because `None` may be a valid input or output value.\n \"\"\"\n pass\n\n\ndef is_simple_callable(obj):\n \"\"\"\n True if the object is a callable that takes no arguments.\n \"\"\"\n function = inspect.isfunction(obj)\n method = inspect.ismethod(obj)\n\n if not (function or method):\n return False\n\n args, _, _, defaults = inspect.getargspec(obj)\n len_args = len(args) if function else len(args) - 1\n len_defaults = len(defaults) if defaults else 0\n return len_args <= len_defaults\n\n\ndef get_attribute(instance, attrs):\n \"\"\"\n Similar to Python's built in `getattr(instance, attr)`,\n but takes a list of nested attributes, instead of a single attribute.\n Also accepts either attribute lookup on objects or dictionary lookups.\n \"\"\"\n for attr in attrs:\n if instance is None:\n # Break out early if we get `None` at any point in a nested lookup.\n return None\n try:\n if isinstance(instance, collections.Mapping):\n instance = instance[attr]\n else:\n instance = getattr(instance, attr)\n except ObjectDoesNotExist:\n return None\n if is_simple_callable(instance):\n try:\n instance = instance()\n except (AttributeError, KeyError) as exc:\n # If we raised an Attribute or KeyError here it'd get treated\n # as an omitted field in `Field.get_attribute()`. Instead we\n # raise a ValueError to ensure the exception is not masked.\n raise ValueError(\n 'Exception raised in callable attribute \"{0}\"; original exception was: {1}'.format(attr, exc))\n\n return instance\n\n\ndef set_value(dictionary, keys, value):\n \"\"\"\n Similar to Python's built in `dictionary[key] = value`,\n but takes a list of nested keys instead of a single key.\n set_value({'a': 1}, [], {'b': 2}) -> {'a': 1, 'b': 2}\n set_value({'a': 1}, ['x'], 2) -> {'a': 1, 'x': 2}\n set_value({'a': 1}, ['x', 'y'], 2) -> {'a': 1, 'x': {'y': 2}}\n \"\"\"\n if not keys:\n dictionary.update(value)\n return\n\n for key in keys[:-1]:\n if key not in dictionary:\n dictionary[key] = {}\n dictionary = dictionary[key]\n\n dictionary[keys[-1]] = value\n\n\ndef to_choices_dict(choices):\n \"\"\"\n Convert choices into key/value dicts.\n pairwise_choices([1]) -> {1: 1}\n pairwise_choices([(1, '1st'), (2, '2nd')]) -> {1: '1st', 2: '2nd'}\n pairwise_choices([('Group', ((1, '1st'), 2))]) -> {'Group': {1: '1st', 2: '2nd'}}\n \"\"\"\n # Allow single, paired or grouped choices style:\n # choices = [1, 2, 3]\n # choices = [(1, 'First'), (2, 'Second'), (3, 'Third')]\n # choices = [('Category', ((1, 'First'), (2, 'Second'))), (3, 'Third')]\n ret = OrderedDict()\n for choice in choices:\n if (not isinstance(choice, (list, tuple))):\n # single choice\n ret[choice] = choice\n else:\n key, value = choice\n if isinstance(value, (list, tuple)):\n # grouped choices (category, sub choices)\n ret[key] = to_choices_dict(value)\n else:\n # paired choice (key, display value)\n ret[key] = value\n return ret\n\n\ndef flatten_choices_dict(choices):\n \"\"\"\n Convert a group choices dict into a flat dict of choices.\n flatten_choices({1: '1st', 2: '2nd'}) -> {1: '1st', 2: '2nd'}\n flatten_choices({'Group': {1: '1st', 2: '2nd'}}) -> {1: '1st', 2: '2nd'}\n \"\"\"\n ret = OrderedDict()\n for key, value in choices.items():\n if isinstance(value, dict):\n # grouped choices (category, sub choices)\n for sub_key, sub_value in value.items():\n ret[sub_key] = sub_value\n else:\n # choice (key, display value)\n ret[key] = value\n return ret\n\n\ndef iter_options(grouped_choices, cutoff=None, cutoff_text=None):\n \"\"\"\n Helper function for options and option groups in templates.\n \"\"\"\n\n class StartOptionGroup(object):\n start_option_group = True\n end_option_group = False\n\n def __init__(self, label):\n self.label = label\n\n class EndOptionGroup(object):\n start_option_group = False\n end_option_group = True\n\n class Option(object):\n start_option_group = False\n end_option_group = False\n\n def __init__(self, value, display_text, disabled=False):\n self.value = value\n self.display_text = display_text\n self.disabled = disabled\n\n count = 0\n\n for key, value in grouped_choices.items():\n if cutoff and count >= cutoff:\n break\n\n if isinstance(value, dict):\n yield StartOptionGroup(label=key)\n for sub_key, sub_value in value.items():\n if cutoff and count >= cutoff:\n break\n yield Option(value=sub_key, display_text=sub_value)\n count += 1\n yield EndOptionGroup()\n else:\n yield Option(value=key, display_text=value)\n count += 1\n\n if cutoff and count >= cutoff and cutoff_text:\n cutoff_text = cutoff_text.format(count=cutoff)\n yield Option(value='n/a', display_text=cutoff_text, disabled=True)\n\n\nclass CreateOnlyDefault(object):\n \"\"\"\n This class may be used to provide default values that are only used\n for create operations, but that do not return any value for update\n operations.\n \"\"\"\n\n def __init__(self, default):\n self.default = default\n\n def set_context(self, serializer_field):\n self.is_update = serializer_field.parent.instance is not None\n if callable(self.default) and hasattr(self.default, 'set_context') and not self.is_update:\n self.default.set_context(serializer_field)\n\n def __call__(self):\n if self.is_update:\n raise SkipField()\n if callable(self.default):\n return self.default()\n return self.default\n\n def __repr__(self):\n return unicode_to_repr(\n '%s(%s)' % (self.__class__.__name__, unicode_repr(self.default))\n )\n\n\nclass CurrentUserDefault(object):\n def set_context(self, serializer_field):\n self.user = serializer_field.context['request'].user\n\n def __call__(self):\n return self.user\n\n def __repr__(self):\n return unicode_to_repr('%s()' % self.__class__.__name__)\n\n\nclass SkipField(Exception):\n pass\n\n\nNOT_READ_ONLY_WRITE_ONLY = 'May not set both `read_only` and `write_only`'\nNOT_READ_ONLY_REQUIRED = 'May not set both `read_only` and `required`'\nNOT_REQUIRED_DEFAULT = 'May not set both `required` and `default`'\nUSE_READONLYFIELD = 'Field(read_only=True) should be ReadOnlyField'\nMISSING_ERROR_MESSAGE = (\n 'ValidationError raised by `{class_name}`, but error key `{key}` does '\n 'not exist in the `error_messages` dictionary.'\n)\n\n\nclass Field(object):\n _creation_counter = 0\n\n default_error_messages = {\n 'required': _('This field is required.'),\n 'null': _('This field may not be null.')\n }\n default_validators = []\n default_empty_html = empty\n initial = None\n\n def __init__(self, read_only=False, write_only=False,\n required=None, default=empty, initial=empty, source=None,\n label=None, help_text=None, style=None,\n error_messages=None, validators=None, allow_null=False):\n self._creation_counter = Field._creation_counter\n Field._creation_counter += 1\n\n # If `required` is unset, then use `True` unless a default is provided.\n if required is None:\n required = default is empty and not read_only\n\n # Some combinations of keyword arguments do not make sense.\n assert not (read_only and write_only), NOT_READ_ONLY_WRITE_ONLY\n assert not (read_only and required), NOT_READ_ONLY_REQUIRED\n assert not (required and default is not empty), NOT_REQUIRED_DEFAULT\n assert not (read_only and self.__class__ == Field), USE_READONLYFIELD\n\n self.read_only = read_only\n self.write_only = write_only\n self.required = required\n self.default = default\n self.source = source\n self.initial = self.initial if (initial is empty) else initial\n self.label = label\n self.help_text = help_text\n self.style = {} if style is None else style\n self.allow_null = allow_null\n\n if self.default_empty_html is not empty:\n if default is not empty:\n self.default_empty_html = default\n\n if validators is not None:\n self.validators = validators[:]\n\n # These are set up by `.bind()` when the field is added to a serializer.\n self.field_name = None\n self.parent = None\n\n # Collect default error message from self and parent classes\n messages = {}\n for cls in reversed(self.__class__.__mro__):\n messages.update(getattr(cls, 'default_error_messages', {}))\n messages.update(error_messages or {})\n self.error_messages = messages\n\n def bind(self, field_name, parent):\n \"\"\"\n Initializes the field name and parent for the field instance.\n Called when a field is added to the parent serializer instance.\n \"\"\"\n\n # In order to enforce a consistent style, we error if a redundant\n # 'source' argument has been used. For example:\n # my_field = serializer.CharField(source='my_field')\n assert self.source != field_name, (\n \"It is redundant to specify `source='%s'` on field '%s' in \"\n \"serializer '%s', because it is the same as the field name. \"\n \"Remove the `source` keyword argument.\" %\n (field_name, self.__class__.__name__, parent.__class__.__name__)\n )\n\n self.field_name = field_name\n self.parent = parent\n\n # `self.label` should default to being based on the field name.\n if self.label is None:\n self.label = field_name.replace('_', ' ').capitalize()\n\n # self.source should default to being the same as the field name.\n if self.source is None:\n self.source = field_name\n\n # self.source_attrs is a list of attributes that need to be looked up\n # when serializing the instance, or populating the validated data.\n if self.source == '*':\n self.source_attrs = []\n else:\n self.source_attrs = self.source.split('.')\n\n # .validators is a lazily loaded property, that gets its default\n # value from `get_validators`.\n @property\n def validators(self):\n if not hasattr(self, '_validators'):\n self._validators = self.get_validators()\n return self._validators\n\n @validators.setter\n def validators(self, validators):\n self._validators = validators\n\n def get_validators(self):\n return self.default_validators[:]\n\n def get_initial(self):\n \"\"\"\n Return a value to use when the field is being returned as a primitive\n value, without any object instance.\n \"\"\"\n return self.initial\n\n def get_value(self, dictionary):\n \"\"\"\n Given the *incoming* primitive data, return the value for this field\n that should be validated and transformed to a native value.\n \"\"\"\n if html.is_html_input(dictionary):\n # HTML forms will represent empty fields as '', and cannot\n # represent None or False values directly.\n if self.field_name not in dictionary:\n if getattr(self.root, 'partial', False):\n return empty\n return self.default_empty_html\n ret = dictionary[self.field_name]\n if ret == '' and self.allow_null:\n # If the field is blank, and null is a valid value then\n # determine if we should use null instead.\n return '' if getattr(self, 'allow_blank', False) else None\n elif ret == '' and not self.required:\n # If the field is blank, and emptyness is valid then\n # determine if we should use emptyness instead.\n return '' if getattr(self, 'allow_blank', False) else empty\n return ret\n return dictionary.get(self.field_name, empty)\n\n def get_attribute(self, instance):\n \"\"\"\n Given the *outgoing* object instance, return the primitive value\n that should be used for this field.\n \"\"\"\n try:\n return get_attribute(instance, self.source_attrs)\n except (KeyError, AttributeError) as exc:\n if not self.required and self.default is empty:\n raise SkipField()\n msg = (\n 'Got {exc_type} when attempting to get a value for field '\n '`{field}` on serializer `{serializer}`.\\nThe serializer '\n 'field might be named incorrectly and not match '\n 'any attribute or key on the `{instance}` instance.\\n'\n 'Original exception text was: {exc}.'.format(\n exc_type=type(exc).__name__,\n field=self.field_name,\n serializer=self.parent.__class__.__name__,\n instance=instance.__class__.__name__,\n exc=exc\n )\n )\n raise type(exc)(msg)\n\n def get_default(self):\n \"\"\"\n Return the default value to use when validating data if no input\n is provided for this field.\n If a default has not been set for this field then this will simply\n return `empty`, indicating that no value should be set in the\n validated data for this field.\n \"\"\"\n if self.default is empty:\n raise SkipField()\n if callable(self.default):\n if hasattr(self.default, 'set_context'):\n self.default.set_context(self)\n return self.default()\n return self.default\n\n def validate_empty_values(self, data):\n \"\"\"\n Validate empty values, and either:\n * Raise `ValidationError`, indicating invalid data.\n * Raise `SkipField`, indicating that the field should be ignored.\n * Return (True, data), indicating an empty value that should be\n returned without any further validation being applied.\n * Return (False, data), indicating a non-empty value, that should\n have validation applied as normal.\n \"\"\"\n if self.read_only:\n return (True, self.get_default())\n\n if data is empty:\n if getattr(self.root, 'partial', False):\n raise SkipField()\n if self.required:\n self.fail('required')\n return (True, self.get_default())\n\n if data is None:\n if not self.allow_null:\n self.fail('null')\n return (True, None)\n\n return (False, data)\n\n def run_validation(self, data=empty):\n \"\"\"\n Validate a simple representation and return the internal value.\n The provided data may be `empty` if no representation was included\n in the input.\n May raise `SkipField` if the field should not be included in the\n validated data.\n \"\"\"\n (is_empty_value, data) = self.validate_empty_values(data)\n if is_empty_value:\n return data\n value = self.to_internal_value(data)\n self.run_validators(value)\n return value\n\n def run_validators(self, value):\n \"\"\"\n Test the given value against all the validators on the field,\n and either raise a `ValidationError` or simply return.\n \"\"\"\n errors = []\n for validator in self.validators:\n if hasattr(validator, 'set_context'):\n validator.set_context(self)\n\n try:\n validator(value)\n except ValidationError as exc:\n # If the validation error contains a mapping of fields to\n # errors then simply raise it immediately rather than\n # attempting to accumulate a list of errors.\n if isinstance(exc.detail, dict):\n raise\n errors.extend(exc.detail)\n except DjangoValidationError as exc:\n errors.extend(exc.messages)\n if errors:\n raise ValidationError(errors)\n\n def to_internal_value(self, data):\n \"\"\"\n Transform the *incoming* primitive data into a native value.\n \"\"\"\n raise NotImplementedError(\n '{cls}.to_internal_value() must be implemented.'.format(\n cls=self.__class__.__name__\n )\n )\n\n def to_representation(self, value):\n \"\"\"\n Transform the *outgoing* native value into primitive data.\n \"\"\"\n raise NotImplementedError(\n '{cls}.to_representation() must be implemented for field '\n '{field_name}. If you do not need to support write operations '\n 'you probably want to subclass `ReadOnlyField` instead.'.format(\n cls=self.__class__.__name__,\n field_name=self.field_name,\n )\n )\n\n def fail(self, key, **kwargs):\n \"\"\"\n A helper method that simply raises a validation error.\n \"\"\"\n try:\n msg = self.error_messages[key]\n except KeyError:\n class_name = self.__class__.__name__\n msg = MISSING_ERROR_MESSAGE.format(class_name=class_name, key=key)\n raise AssertionError(msg)\n message_string = msg.format(**kwargs)\n raise ValidationError(message_string)\n\n @cached_property\n def root(self):\n \"\"\"\n Returns the top-level serializer for this field.\n \"\"\"\n root = self\n while root.parent is not None:\n root = root.parent\n return root\n\n @cached_property\n def context(self):\n \"\"\"\n Returns the context as passed to the root serializer on initialization.\n \"\"\"\n return getattr(self.root, '_context', {})\n\n def __new__(cls, *args, **kwargs):\n \"\"\"\n When a field is instantiated, we store the arguments that were used,\n so that we can present a helpful representation of the object.\n \"\"\"\n instance = super(Field, cls).__new__(cls)\n instance._args = args\n instance._kwargs = kwargs\n return instance\n\n def __deepcopy__(self, memo):\n \"\"\"\n When cloning fields we instantiate using the arguments it was\n originally created with, rather than copying the complete state.\n \"\"\"\n args = copy.deepcopy(self._args)\n kwargs = dict(self._kwargs)\n # Bit ugly, but we need to special case 'validators' as Django's\n # RegexValidator does not support deepcopy.\n # We treat validator callables as immutable objects.\n # See https://github.com/tomchristie/django-rest-framework/issues/1954\n validators = kwargs.pop('validators', None)\n kwargs = copy.deepcopy(kwargs)\n if validators is not None:\n kwargs['validators'] = validators\n return self.__class__(*args, **kwargs)\n\n def __repr__(self):\n \"\"\"\n Fields are represented using their initial calling arguments.\n This allows us to create descriptive representations for serializer\n instances that show all the declared fields on the serializer.\n \"\"\"\n return unicode_to_repr(representation.field_repr(self))\n\n\nclass IntegerField(Field):\n default_error_messages = {\n 'invalid': _('A valid integer is required.'),\n 'max_value': _('Ensure this value is less than or equal to {max_value}.'),\n 'min_value': _('Ensure this value is greater than or equal to {min_value}.'),\n 'max_string_length': _('String value too large.')\n }\n MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.\n re_decimal = re.compile(r'\\.0*\\s*$') # allow e.g. '1.0' as an int, but not '1.2'\n\n def __init__(self, **kwargs):\n self.max_value = kwargs.pop('max_value', None)\n self.min_value = kwargs.pop('min_value', None)\n super(IntegerField, self).__init__(**kwargs)\n if self.max_value is not None:\n message = self.error_messages['max_value'].format(max_value=self.max_value)\n self.validators.append(MaxValueValidator(self.max_value, message=message))\n if self.min_value is not None:\n message = self.error_messages['min_value'].format(min_value=self.min_value)\n self.validators.append(MinValueValidator(self.min_value, message=message))\n\n def to_internal_value(self, data):\n if isinstance(data, six.text_type) and len(data) > self.MAX_STRING_LENGTH:\n self.fail('max_string_length')\n\n try:\n data = int(self.re_decimal.sub('', str(data)))\n except (ValueError, TypeError):\n self.fail('invalid')\n return data\n\n def to_representation(self, value):\n return int(value)\n\n\nclass HexIntegerField(IntegerField):\n \"\"\"\n Store an integer represented as a hex string of form \"0x01\".\n \"\"\"\n\n def to_internal_value(self, data):\n data = int(data, 16)\n return super(HexIntegerField, self).to_internal_value(data)\n\n def to_representation(self, value):\n return value\n\n\n# Serializers\nclass DeviceSerializerMixin(ModelSerializer):\n class Meta:\n fields = (\"name\", \"registration_id\", \"device_id\", \"active\", \"date_created\")\n read_only_fields = (\"date_created\",)\n\n\nclass APNSDeviceSerializer(ModelSerializer):\n class Meta(DeviceSerializerMixin.Meta):\n model = APNSDevice\n\n def validate_registration_id(self, value, source):\n if hex_re.match(value[source]) is None or len(value[source]) != 64:\n raise ValidationError(\"Registration ID (device token) is invalid\")\n return value\n\n\nclass GCMDeviceSerializer(ModelSerializer):\n device_id = HexIntegerField(\n help_text=\"ANDROID_ID / TelephonyManager.getDeviceId() (e.g: 0x01)\",\n style={'input_type': 'text'},\n required=False\n )\n\n class Meta(DeviceSerializerMixin.Meta):\n model = GCMDevice\n\n\n# Permissions\nclass IsOwner(permissions.BasePermission):\n def has_object_permission(self, request, view, obj):\n # must be the owner to view the object\n return obj.user == request.user\n\n\n# Mixins\nclass DeviceViewSetMixin(object):\n lookup_field = \"registration_id\"\n\n def perform_create(self, serializer):\n if self.request.user.is_authenticated():\n serializer.save(user=self.request.user)\n return super(DeviceViewSetMixin, self).perform_create(serializer)\n\n\nclass AuthorizedMixin(object):\n permission_classes = (permissions.IsAuthenticated, IsOwner)\n\n def get_queryset(self):\n # filter all devices to only those belonging to the current user\n return self.queryset.filter(user=self.request.user)\n\n\n# ViewSets\nclass APNSDeviceViewSet(DeviceViewSetMixin, ModelViewSet):\n queryset = APNSDevice.objects.all()\n serializer_class = APNSDeviceSerializer\n\n\nclass APNSDeviceAuthorizedViewSet(AuthorizedMixin, APNSDeviceViewSet):\n pass\n\n\nclass GCMDeviceViewSet(DeviceViewSetMixin, ModelViewSet):\n queryset = GCMDevice.objects.all()\n serializer_class = GCMDeviceSerializer\n\n\nclass GCMDeviceAuthorizedViewSet(AuthorizedMixin, GCMDeviceViewSet):\n pass\n","sub_path":"push_notifications/api/rest_framework.py","file_name":"rest_framework.py","file_ext":"py","file_size_in_byte":24726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"461154987","text":"import hashlib\nimport random\nimport string\n\nfrom flask import jsonify\nfrom flask import redirect\nfrom flask import render_template\nfrom flask import request\nfrom flask import session\nfrom flask import url_for\nfrom .treelog import log\nfrom ..models import User, Follow, Tweet, TweetImg\nfrom . import api\nfrom .notification import save_notification, user_notified\n\n\ndef hash_password(pwd):\n m = hashlib.md5()\n m.update(pwd.encode('utf-8'))\n result = m.hexdigest()\n return result\n\n\n@api.route('/')\ndef index():\n return redirect(url_for('api.login_view'))\n\n\n# 显示登录界面的函数 GET\n@api.route('/login')\ndef login_view():\n return render_template('login.html')\n\n\n# 处理登录请求 POST\n@api.route('/login', methods=['POST'])\ndef login():\n account = request.get_json()\n log('account', account)\n u = User(account)\n u.password = hash_password(u.password)\n user = User.query.filter_by(username=u.username).first()\n r = {\n 'success': True,\n 'message': '登录成功',\n 'code': '成功',\n 'data': {\n\n }\n }\n if user.validate(u):\n log(\"用户登录成功\")\n # 用 make_response 生成响应 并且设置 cookie\n session['user_id'] = user.id\n r['data'] = '/timeline/{}'.format(user.username)\n else:\n r['message'] = '登录失败'\n return jsonify(r)\n\n\n# 处理注册的请求 POST\n@api.route('/register', methods=['POST'])\ndef register():\n account = request.get_json()\n\n data = request.get_data()\n '''\n json_string = data.decode('utf-8')\n d = json.loads(json_string)\n log('json_string', json_string)\n log('d', d)\n '''\n log('account', account)\n log('data', data)\n\n u = User(account)\n usr = User.query.filter_by(username=u.username).first()\n r = {\n 'success': True,\n 'message': '注册成功',\n 'code': '成功',\n 'data': {\n\n }\n }\n if u.valid() and usr is None:\n log(\"用户注册成功\")\n u.password = hash_password(u.password)\n # 保存到数据库\n u.save()\n user = User.query.filter_by(username=u.username).first()\n user_id = user.id\n session['user_id'] = user_id\n r['data'] = '/timeline/{}'.format(u.username)\n # 自动关注之前所有人\n for i in range(1, user_id):\n f = Follow(user_id, i)\n f.save()\n admin_id = 1\n admin = User.query.filter_by(id=admin_id).first()\n # 向该用户发送@通知供测试\n content = '@' + user.username + ' ' + 'test'\n fake_tweet(content, user_id=admin_id)\n content = 'test'\n fake_tweet(content, user_id=user_id)\n else:\n r['message'] = '注册失败'\n return jsonify(r)\n\n\n@api.route('/signout')\ndef signout():\n session['user_id'] = None\n log('session', session)\n return redirect(url_for('api.login_view'))\n\n\n# 自动创建用户\n@api.route('/testuser', methods=['GET'])\ndef fake_user():\n form = {\n 'username': 'visitor' + string_generator(size=4),\n 'password': string_generator(size=6),\n }\n user = User(form)\n # 写入关注人信息\n user.password = hash_password(user.password)\n # 保存到数据库\n user.save()\n user = User.query.filter_by(username=user.username).first()\n # 自动关注之前所有人\n for i in range(1, user.id):\n f = Follow(user.id, i)\n f.save()\n admin_id = 1\n admin = User.query.filter_by(id=admin_id).first()\n session['user_id'] = user.id\n content = '@' + user.username + ' ' + 'test'\n fake_tweet(content, user_id=admin_id)\n content = 'test'\n user_id = user.id\n fake_tweet(content, user_id=user_id)\n created_user = {\n 'success': True,\n 'user': user.json(),\n 'message': '登录成功',\n }\n return jsonify(created_user)\n\n\n# 自动创建微博\ndef fake_tweet(content, user_id):\n # 向该用户发送@通知供测试\n form = {\n 'content': content\n }\n t = Tweet(form)\n t.user_id = user_id\n t.save()\n for i in range(1, 10):\n img_url = '/static/tweets_picture/' + str(i) + '.jpg'\n s = TweetImg(img_url)\n s.tweet = t\n s.save()\n # 根据解析微博得到的@的用户名数组, 生成相应的At实例, 存入数据库\n name_lst = user_notified(t.content)\n save_notification(lst=name_lst, tweet=t)\n return\n\n\ndef string_generator(size):\n # chars = string.ascii_uppercase + string.digits\n chars = string.digits\n return ''.join(random.SystemRandom().choice(chars) for _ in range(size))\n","sub_path":"app/api/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"297832020","text":"import logging\nimport logging.handlers\n\n\ndef init_logger(logger_name):\n \"\"\"\n defines logger\n :param logger_name: logger name e.g. 'producer', 'consumer\" and etc.\n :return: logger\n \"\"\"\n _logger = logging.getLogger(logger_name)\n\n # logger message format\n log_formatter = logging.Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S')\n\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(log_formatter)\n _logger.addHandler(console_handler)\n\n _logger.setLevel(logging.DEBUG)\n\n return _logger\n","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"169215571","text":"import torch\nimport numpy as numpy\nimport torch.utils.data as data\nimport scipy.io as sio\nimport os\nimport skimage.io as skio\n\nclass OnePoseAllJointsDataset(data.Dataset): #right now only accept train = True\n def __init__(self, imagedir, annotdir, annot_mat_file_name, train= True):\n self.imagedir = imagedir\n self.annot_mat = os.path.join(annotdir, annot_mat_file_name)\n self.image_names = []\n self.annot_mat = sio.loadmat(self.annot_mat, struct_as_record=False)\n self.annots = {}\n self.train_label = train\n self.idx_train = self.annot_mat['RELEASE'][0,0].__dict__['img_train'][0].tolist()\n self._data_cleaning_()\n self._annot_init_()\n def _data_cleaning_(self): \n '''\n prepare the list of image names, to be indexed\n '''\n anno_len = self.annot_mat['RELEASE'][0,0].__dict__['annolist'].shape[1]\n for i in range(anno_len):\n self.image_names.append(self.annot_mat['RELEASE'][0,0].__dict__['annolist'][0,i].__dict__['image'][0,0].__dict__['name'].item())\n \n if self.train_label == True:\n _img_names = [x for x, y in zip(self.image_names, self.idx_train) if y == 1]\n else: \n _img_names = [x for x, y in zip(self.image_names, self.idx_train) if y == 0]\n\n _names_ = os.listdir(self.imagedir)\n for name in _img_names:\n if name not in _names_:\n _img_names.remove(name)\n self.image_names = _img_names \n return \n\n def _annot_init_(self):\n '''\n Extract names, joint positions from the .mat annotation file into a dictionary\n '''\n anno_len = self.annot_mat['RELEASE'][0,0].__dict__['annolist'].shape[1]\n for i in range(anno_len):\n img_fn = self.annot_mat['RELEASE'][0,0].__dict__['annolist'][0,i].__dict__['image'][0,0].__dict__['name'].item()\n if img_fn not in self.image_names:\n #Ignore those images without \n continue\n pose_num = self.annot_mat['RELEASE'][0,0].__dict__['annolist'][0,i].__dict__['annorect'].shape[1]\n if pose_num != 1: #Ignore those pictures with multiple human poses\n try:\n self.image_names.remove(img_fn)\n except ValueError:\n pass #In case some file names in the annotation doesn't have a corresponding image file\n continue\n try:\n joints_num = self.annot_mat['RELEASE'][0,0].__dict__['annolist'][0,i].__dict__['annorect'][0,0].__dict__['annopoints'][0,0].__dict__['point'].shape[1]\n except:\n try:\n self.image_names.remove(img_fn)\n except ValueError:\n pass #In case some file names in the annotation doesn't have a corresponding image file\n continue\n\n if joints_num != 16: #Ignore those figures with incomplete joint information\n try:\n self.image_names.remove(img_fn)\n except ValueError:\n pass \n continue\n\n try:\n scale = self.annot_mat['RELEASE'][0,0].__dict__['annolist'][0,i].__dict__['annorect'][0,0].__dict__['scale'][0,0]\n #rough human position\n obj_pos_x = self.annot_mat['RELEASE'][0,0].__dict__['annolist'][0,i].__dict__['annorect'][0,0].__dict__['objpos'][0,0].__dict__['x'][0,0]\n obj_pos_y = self.annot_mat['RELEASE'][0,0].__dict__['annolist'][0,i].__dict__['annorect'][0,0].__dict__['objpos'][0,0].__dict__['y'][0,0]\n #person-centric body joint annotations\n joints_x = []\n joints_y = []\n joints_id = []\n joints_visible = []\n for j in range(joints_num):\n joints_x.append(self.annot_mat['RELEASE'][0,0].__dict__['annolist'][0,i].__dict__['annorect'][0,0].__dict__['annopoints'][0,0].__dict__['point'][0,j].__dict__['x'][0,0])\n joints_y.append(self.annot_mat['RELEASE'][0,0].__dict__['annolist'][0,i].__dict__['annorect'][0,0].__dict__['annopoints'][0,0].__dict__['point'][0,j].__dict__['y'][0,0])\n joints_id.append(self.annot_mat['RELEASE'][0,0].__dict__['annolist'][0,i].__dict__['annorect'][0,0].__dict__['annopoints'][0,0].__dict__['point'][0,j].__dict__['id'][0,0])\n try:\n joints_visible.append(self.annot_mat['RELEASE'][0,0].__dict__['annolist'][0,i].__dict__['annorect'][0,0].__dict__['annopoints'][0,0].__dict__['point'][0,j].__dict__['is_visible'][0,0])\n except:\n joints_visible.append(0)\n annot = {img_fn: {'img_fn':img_fn, 'scale': scale, 'objpos_x':obj_pos_x, 'objpos_y':obj_pos_y, 'x':joints_x, 'y': joints_y, 'id': joints_id, 'is_visible': joints_visible}}\n self.annots.update(annot)\n except:\n try:\n self.image_names.remove(img_fn)\n except ValueError:\n pass \n continue\n return \n\n def __len__(self):\n return len(self.image_names)\n def __getitem__(self, idx):\n fn = self.image_names[idx]\n fn_ = os.path.join(self.imagedir, fn)\n img = torch.from_numpy(skio.imread(fn_)).permute(2,0,1)\n return {'img':img.float()/255, 'annot': self.annots[fn]}\n\n","sub_path":"MPII/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"114921449","text":"import argparse\n\nimport torch\nfrom torch import nn, optim\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.tensorboard import SummaryWriter\nimport numpy as np\nimport random\nimport os\nimport shutil\nimport sys\nimport json\nfrom datetime import datetime\n\nfrom models.resnet_AffineMix import resnet50\nfrom data.data_manager import get_val_loader, get_train_loader\nfrom data.imagenetDataset import imagenetDataset\nfrom models.AffineMix import AffineMix\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description=\"training script\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--batch_size\", \"-b\", type=int, default=32, help=\"Batch size\")\n parser.add_argument(\"--val_batch_size\", type=int, default=512, help=\"Batch size\")\n parser.add_argument('--pretrained', action='store_true', help='Load pretrain model')\n parser.add_argument(\"--img_size\", type=int, default=256, help=\"Image size to resize before cropping 224\")\n\n parser.add_argument(\"--learning_rate\", \"-l\", type=float, default=0.001, help=\"Learning rate\")\n parser.add_argument(\"--epochs\", \"-e\", type=int, default=30, help=\"Number of epochs\")\n parser.add_argument(\"--MILESTONES\", nargs='*', type=int, default=[10, 20, 30], help=\"Learning rate\")\n parser.add_argument('--weight_decay', default=1e-4, type=float, help='weight decay')\n parser.add_argument('--reset_fc', action='store_true', help='Reset last fc layer in init')\n\n parser.add_argument(\"--n_workers\", type=int, default=4, help=\"Number of workers for dataloader\")\n parser.add_argument(\"--data_parallel\", action='store_true', help='Run on all visible gpus')\n parser.add_argument(\"--pin_memory\", action='store_true', help='Pin memory in data loader')\n parser.add_argument(\"--dataset_type\", type=str, default='image', choices=['image', 'lmdb'],\n help=\"lmdb or image folder\")\n\n parser.add_argument(\"--img_dir\", default='/home/work/Datasets/Tiny-ImageNet-original', help=\"Images dir path\")\n\n parser.add_argument('--resume_edge',\n default='experiments/Imagenet/resnet50/randomResizeCrop/shape=1_color=0_pretrained_lr=0.01_imgsize=256_trainBN_cropResized/checkpoints/model_best.pth.tar',\n type=str,\n help='path to edge model checkpoint (default: none)')\n parser.add_argument('--resume_color',\n default='experiments/Imagenet/resnet50/randomResizeCrop/shape=0_color=1_pretrained_lr=0.01_trainBN_cropResized/checkpoints/checkpoint_4_acc_0.687.pth.tar',\n type=str,\n help='path to color model checkpoint (default: none)')\n parser.add_argument('--resume_ensemble', default='', type=str,\n help='path to color model checkpoint (default: none)')\n\n parser.add_argument(\"--save_checkpoint_interval\", type=int, default=10, help=\"Save checkpoints every i epochs\")\n parser.add_argument(\"--experiment\", default='experiments/ImageNetSubset/ensemble50_batch/train=fc_convex=0.5',\n help=\"Logs dir path\")\n\n args = parser.parse_args()\n\n args.checkpoint = f'{args.experiment}/checkpoints'\n args.log_dir = f'{args.experiment}/logs'\n\n return args\n\n\ndef save_checkpoint(state, is_best, checkpoint='checkpoint', filename='checkpoint.pth.tar'):\n if not os.path.exists(checkpoint):\n os.makedirs(checkpoint)\n\n filepath = os.path.join(checkpoint, filename)\n torch.save(state, filepath)\n if is_best:\n shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar'))\n\n\ndef save_args_json(args):\n args_dict = vars(args)\n\n if not os.path.exists(args.log_dir):\n os.makedirs(args.log_dir)\n\n with open(f'{args.log_dir}/args.json', 'w') as outfile:\n json.dump(args_dict, outfile, indent=4, sort_keys=True)\n\n\ndef log_info(text):\n dt_string = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n print(f'{dt_string} | {text}')\n sys.stdout.flush()\n\n\nclass Trainer:\n def __init__(self, args, device):\n self.args = args\n self.device = device\n self.start_epoch = 0\n self.best_acc = 0\n\n self.train_loader = get_train_loader(args)\n self.val_loader = get_val_loader(args)\n\n edge_model = resnet50(num_classes=1000)\n edge_checkpoint = torch.load(args.resume_edge)\n edge_state_dict = edge_checkpoint['state_dict']\n edge_model.load_state_dict(edge_state_dict)\n\n color_model = resnet50(num_classes=1000)\n color_checkpoint = torch.load(args.resume_color)\n color_state_dict = color_checkpoint['state_dict']\n color_model.load_state_dict(color_state_dict)\n\n ensemble_model = resnet50(num_classes=1000, norm_layer=AffineMix)\n ensemble_model.load_AffineMix_state_dict(edge_model, color_model, args)\n\n param_count = sum(p.numel() for p in ensemble_model.parameters() if p.requires_grad)\n print(f'Parameter count: {param_count:,}')\n\n self.optimizer = optim.SGD(ensemble_model.get_trainable_params(), lr=args.learning_rate, momentum=0.9,\n weight_decay=args.weight_decay)\n self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=args.MILESTONES, gamma=0.1)\n\n if args.resume_ensemble and os.path.isfile(args.resume_ensemble):\n print(f'Loading checkpoint {args.resume_ensemble}')\n\n checkpoint = torch.load(args.resume_ensemble)\n self.start_epoch = checkpoint['epoch']\n self.best_acc = checkpoint['best_prec1']\n ensemble_model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.to(device)\n\n print(f'Loaded checkpoint {args.resume_ensemble}, starting from epoch {self.start_epoch}')\n\n if args.data_parallel:\n ensemble_model = torch.nn.DataParallel(ensemble_model)\n\n self.ensemble_model = ensemble_model.to(device)\n\n self.criterion = nn.CrossEntropyLoss()\n cudnn.benchmark = True\n self.writer = SummaryWriter(log_dir=str(args.log_dir))\n\n def _do_epoch(self, epoch_idx):\n self.ensemble_model.train()\n\n for batch_idx, (images, targets, _) in enumerate(self.train_loader):\n images, targets = images.to(self.device), targets.to(self.device)\n\n self.optimizer.zero_grad()\n\n outputs, _ = self.ensemble_model(images)\n\n loss = self.criterion(outputs, targets)\n\n if batch_idx % 100 == 1:\n log_info(f'epoch: {epoch_idx}/{self.args.epochs}, batch: {batch_idx}/{len(self.train_loader)}, '\n f'loss: {loss.item()}')\n\n self.writer.add_scalar('loss_train', loss.item(), epoch_idx * len(self.train_loader) + batch_idx)\n\n loss.backward()\n self.optimizer.step()\n\n self.ensemble_model.eval()\n\n with torch.no_grad():\n total = len(self.val_loader.dataset)\n class_correct = self.do_test(self.val_loader)\n class_acc = float(class_correct) / total\n log_info(f'Validation Accuracy: {class_acc}')\n\n is_best = False\n if class_acc > self.best_acc:\n self.best_acc = class_acc\n is_best = True\n\n if is_best or (epoch_idx + 1) % self.args.save_checkpoint_interval == 0:\n checkpoint_name = f'checkpoint_{epoch_idx + 1}_acc_{round(class_acc, 3)}.pth.tar'\n print(f'Saving {checkpoint_name} to dir {self.args.checkpoint}')\n\n if self.args.data_parallel:\n state_dict = self.ensemble_model.module.state_dict()\n else:\n state_dict = self.ensemble_model.state_dict()\n\n save_checkpoint({\n 'epoch': epoch_idx + 1,\n 'state_dict': state_dict,\n 'best_prec1': self.best_acc,\n 'optimizer': self.optimizer.state_dict(),\n }, is_best, checkpoint=self.args.checkpoint, filename=checkpoint_name)\n\n self.writer.add_scalar('val_accuracy', class_acc, epoch_idx)\n\n def do_test(self, loader):\n class_correct = 0\n\n for i, (inputs, targets, _) in enumerate(loader, 1):\n inputs, targets = inputs.to(self.device), targets.to(self.device)\n\n # forward\n outputs, _ = self.ensemble_model(inputs)\n\n _, cls_pred = outputs.max(dim=1)\n\n class_correct += torch.sum(cls_pred == targets)\n\n return class_correct\n\n def do_training(self):\n for self.current_epoch in range(self.start_epoch, self.args.epochs):\n self._do_epoch(self.current_epoch)\n self.scheduler.step()\n\n self.writer.close()\n\n return self.best_acc\n\n\ndef main():\n args = get_args()\n\n torch.manual_seed(0)\n torch.cuda.manual_seed(0)\n np.random.seed(0)\n random.seed(0)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n if sys.gettrace() is not None: # debug\n print('Debug mode!')\n args.n_workers = 0\n\n save_args_json(args)\n trainer = Trainer(args, device)\n best_val_acc = trainer.do_training()\n\n\nif __name__ == \"__main__\":\n torch.cuda.empty_cache()\n torch.backends.cudnn.benchmark = True\n main()\n","sub_path":"ImageNet/train_AffineMix.py","file_name":"train_AffineMix.py","file_ext":"py","file_size_in_byte":9575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"635151956","text":"# %load q03_get_toss_win_count/build.py\n#Default Imports\nimport numpy as np\nipl_matches_array =np.genfromtxt('data/ipl_matches_small.csv', dtype='|S50', skip_header=1, delimiter=',')\n\n\n#Your Solution\n\n\ndef get_toss_win_count(team):\n i1=ipl_matches_array[ipl_matches_array[:,5]==team]\n #result=np.bincount(np.unique(i1[:,0]).astype(int)) \n result=np.count_nonzero(np.unique(i1[:,0]))\n return result\n\n\n\n","sub_path":"q03_get_toss_win_count/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"553796898","text":"from cnc_comm import *\r\nimport argparse\r\nimport sys\r\nimport time\r\n\r\n\r\ndef save_position(x, y):\r\n file = open(\"pos.txt\", 'w')\r\n file.write(\"%.2f\\n%.2f\\n\" % (x, y))\r\n file.close()\r\n\r\n\r\ndef retrieve_data(cnc, x, y):\r\n file = open(\"pos.txt\", 'r')\r\n data = file.readlines()\r\n file.close()\r\n x_pos = float(data[0])\r\n y_pos = float(data[1])\r\n if ((x_pos == x) & (y_pos == y)):\r\n print (\"You are at %.2f, %.2f\" % (x_pos, y_pos))\r\n elif (((x_pos + x) >= 0) & ((y_pos + y) >= 0)):\r\n if ((x_pos != x)):\r\n p = cnc.stream_code_x_axis(-(x_pos - x))\r\n if ((y_pos != y)):\r\n q = cnc.stream_code_y_axis(-(y_pos - y))\r\n\r\n save_position(x, y)\r\n else:\r\n print (\"You are at the limit of the axis !!\")\r\n\r\n\r\ndef main():\r\n # Command line tool\r\n\r\n parser = argparse.ArgumentParser(description='Help')\r\n parser.add_argument(\r\n 'port',\r\n help='the name of the serial port to communicate to the Arduino, '\r\n 'e.g. COM10'\r\n )\r\n parser.add_argument(\r\n '--f',\r\n nargs=1,\r\n type=argparse.FileType('r'),\r\n default=False, metavar='filename',\r\n help='the file containing the gcode')\r\n\r\n parser.add_argument(\r\n '--x',\r\n nargs=1,\r\n type=float,\r\n default=0.0,\r\n metavar='value',\r\n # action = 'store_true',\r\n help='the distance on x axis in mm')\r\n parser.add_argument(\r\n '--y',\r\n nargs=1,\r\n type=float,\r\n default=0.0,\r\n metavar='value',\r\n # action = 'store_true',\r\n help='the distance on y axis in mm')\r\n parser.add_argument(\r\n '--home',\r\n action='store_true',\r\n help='go to home (0, 0, 0)')\r\n\r\n args = parser.parse_args()\r\n port = args.port\r\n\r\n cnc = CncComm()\r\n cnc.open_port(port)\r\n retrieve_data(cnc, args.x[0], args.y[0])\r\n #time.sleep(5)\r\n #retrieve_data(cnc, 0, 0)\r\n\r\n # file = open(\"pos.txt\", 'r')\r\n # data = file.readlines()\r\n # file.close()\r\n # x_pos = float(data[0])\r\n # y_pos = float(data[1])\r\n # if ((x_pos == args.x[0]) & (y_pos == args.y[0])):\r\n # print (\"You are at %.2f, %.2f\" % (x_pos, y_pos))\r\n # elif (((x_pos + args.x[0]) > 0) & ((y_pos + args.y[0]) > 0)):\r\n # if ((x_pos != args.x[0])):\r\n # x = cnc.stream_code_x_axis(-(x_pos - args.x[0]))\r\n # if ((y_pos != args.y[0])):\r\n # y = cnc.stream_code_y_axis(-(y_pos - args.y[0]))\r\n #\r\n # save_position(args.x[0], args.y[0])\r\n # else:\r\n # print (\"You are at the limit of the axis !!\")\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"scripts/cnc_cmd.py","file_name":"cnc_cmd.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"508475724","text":"# Copyright 2020 Google LLC\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" fci_graph unit tests\n\"\"\"\n\nimport numpy\nimport fqe\nimport pytest\nfrom scipy import special\nfrom fqe import fci_graph\nfrom tests.unittest_data.fci_graph_data import loader\nfrom tests.comparisons import compare_Spinmap\n\ncases = [(4, 3, 8), (4, 4, 6), (0, 3, 7), (2, 0, 6)]\n\n\ndef test_fci_graph(c_or_python):\n \"\"\"Check the basic initializers and getter functions.\n \"\"\"\n fqe.settings.use_accelerated_code = c_or_python\n\n refdata = [\n 15, 23, 39, 71, 135, 27, 43, 75, 139, 51, 83, 147, 99, 163, 195, 29, 45,\n 77, 141, 53, 85, 149, 101, 165, 197, 57, 89, 153, 105, 169, 201, 113,\n 177, 209, 225, 30, 46, 78, 142, 54, 86, 150, 102, 166, 198, 58, 90, 154,\n 106, 170, 202, 114, 178, 210, 226, 60, 92, 156, 108, 172, 204, 116, 180,\n 212, 228, 120, 184, 216, 232, 240\n ]\n reflist = numpy.array(refdata, dtype=numpy.uint64)\n\n refdict = {\n 15: 0,\n 23: 1,\n 27: 5,\n 29: 15,\n 30: 35,\n 39: 2,\n 43: 6,\n 45: 16,\n 46: 36,\n 51: 9,\n 53: 19,\n 54: 39,\n 57: 25,\n 58: 45,\n 60: 55,\n 71: 3,\n 75: 7,\n 77: 17,\n 78: 37,\n 83: 10,\n 85: 20,\n 86: 40,\n 89: 26,\n 90: 46,\n 92: 56,\n 99: 12,\n 101: 22,\n 102: 42,\n 105: 28,\n 106: 48,\n 108: 58,\n 113: 31,\n 114: 51,\n 116: 61,\n 120: 65,\n 135: 4,\n 139: 8,\n 141: 18,\n 142: 38,\n 147: 11,\n 149: 21,\n 150: 41,\n 153: 27,\n 154: 47,\n 156: 57,\n 163: 13,\n 165: 23,\n 166: 43,\n 169: 29,\n 170: 49,\n 172: 59,\n 177: 32,\n 178: 52,\n 180: 62,\n 184: 66,\n 195: 14,\n 197: 24,\n 198: 44,\n 201: 30,\n 202: 50,\n 204: 60,\n 209: 33,\n 210: 53,\n 212: 63,\n 216: 67,\n 225: 34,\n 226: 54,\n 228: 64,\n 232: 68,\n 240: 69\n }\n norb = 8\n nalpha = 4\n nbeta = 0\n lena = int(special.binom(norb, nalpha))\n max_bitstring = (1 << norb) - (1 << (norb - nalpha))\n testgraph = fci_graph.FciGraph(nalpha, nbeta, norb)\n assert testgraph._build_string_address(nalpha, norb, [0, 1, 2, 3]) == 0\n assert testgraph._build_string_address(nalpha, norb, [1, 2, 3, 7]) == 38\n\n test_list, test_dict = testgraph._build_strings(nalpha, lena)\n assert numpy.array_equal(test_list, reflist)\n assert test_dict == refdict\n assert testgraph.string_beta(0) == 0\n assert testgraph.string_alpha(lena - 1) == max_bitstring\n assert testgraph.index_beta(0) == 0\n assert testgraph.index_alpha(max_bitstring) == lena - 1\n assert testgraph.lena() == lena\n assert testgraph.lenb() == 1\n assert testgraph.nalpha() == nalpha\n assert testgraph.nbeta() == nbeta\n assert testgraph.norb() == norb\n assert testgraph.string_alpha(lena - 1) == max_bitstring\n\n assert numpy.array_equal(testgraph.string_alpha_all(), reflist)\n assert numpy.array_equal(testgraph.string_beta_all(),\n numpy.array([0], dtype=numpy.uint64))\n\n assert testgraph.index_alpha_all() == refdict\n assert testgraph.index_beta_all() == {0: 0}\n\n\ndef test_fci_graph_maps(c_or_python):\n \"\"\"Check graph mapping functions\n \"\"\"\n fqe.settings.use_accelerated_code = c_or_python\n\n ref_alpha_map = {\n (0, 0): [(0, 0, 1), (1, 1, 1), (2, 2, 1)],\n (0, 1): [(3, 1, 1), (4, 2, 1)],\n (0, 2): [(3, 0, -1), (5, 2, 1)],\n (0, 3): [(4, 0, -1), (5, 1, -1)],\n (1, 0): [(1, 3, 1), (2, 4, 1)],\n (1, 1): [(0, 0, 1), (3, 3, 1), (4, 4, 1)],\n (1, 2): [(1, 0, 1), (5, 4, 1)],\n (1, 3): [(2, 0, 1), (5, 3, -1)],\n (2, 0): [(0, 3, -1), (2, 5, 1)],\n (2, 1): [(0, 1, 1), (4, 5, 1)],\n (2, 2): [(1, 1, 1), (3, 3, 1), (5, 5, 1)],\n (2, 3): [(2, 1, 1), (4, 3, 1)],\n (3, 0): [(0, 4, -1), (1, 5, -1)],\n (3, 1): [(0, 2, 1), (3, 5, -1)],\n (3, 2): [(1, 2, 1), (3, 4, 1)],\n (3, 3): [(2, 2, 1), (4, 4, 1), (5, 5, 1)]\n }\n ref_beta_map = {\n (0, 0): [(0, 0, 1)],\n (0, 1): [(1, 0, 1)],\n (0, 2): [(2, 0, 1)],\n (0, 3): [(3, 0, 1)],\n (1, 0): [(0, 1, 1)],\n (1, 1): [(1, 1, 1)],\n (1, 2): [(2, 1, 1)],\n (1, 3): [(3, 1, 1)],\n (2, 0): [(0, 2, 1)],\n (2, 1): [(1, 2, 1)],\n (2, 2): [(2, 2, 1)],\n (2, 3): [(3, 2, 1)],\n (3, 0): [(0, 3, 1)],\n (3, 1): [(1, 3, 1)],\n (3, 2): [(2, 3, 1)],\n (3, 3): [(3, 3, 1)]\n }\n alist = numpy.array([3, 5, 9, 6, 10, 12], dtype=numpy.uint64)\n blist = numpy.array([1, 2, 4, 8], dtype=numpy.uint64)\n aind = {3: 0, 5: 1, 6: 3, 9: 2, 10: 4, 12: 5}\n bind = {1: 0, 2: 1, 4: 2, 8: 3}\n norb = 4\n nalpha = 2\n nbeta = 1\n testgraph = fci_graph.FciGraph(nalpha, nbeta, norb)\n alpha_map = testgraph._build_mapping(alist, nalpha, aind)\n beta_map = testgraph._build_mapping(blist, nbeta, bind)\n\n assert alpha_map.keys() == ref_alpha_map.keys()\n for ak in alpha_map:\n numpy.testing.assert_equal(alpha_map[ak], ref_alpha_map[ak])\n\n assert beta_map.keys() == ref_beta_map.keys()\n for ak in alpha_map:\n numpy.testing.assert_equal(alpha_map[ak], ref_alpha_map[ak])\n\n dummy_map = ({(1, 1): (0, 1, 2)}, {(-1, -1), (0, 1, 2)})\n testgraph.insert_mapping(1, -1, dummy_map)\n assert testgraph.find_mapping(1, -1) == dummy_map\n\n\ndef test_alpha_beta_transpose(norb=4, nalpha=3, nbeta=2):\n \"\"\"Check alpha_beta_transpose\n \"\"\"\n original = fci_graph.FciGraph(nalpha, nbeta, norb)\n transposed = original.alpha_beta_transpose()\n\n assert original is not transposed\n assert original._nalpha == transposed._nbeta\n assert original._nbeta == transposed._nalpha\n assert original._lena == transposed._lenb\n assert original._lenb == transposed._lena\n\n assert original._astr is not transposed._bstr # not same object\n assert numpy.array_equal(original._astr, transposed._bstr) # but equiv\n assert original._bstr is not transposed._astr # not same object\n assert numpy.array_equal(original._bstr, transposed._astr) # but equiv\n\n assert original._aind is not transposed._bind # not same object\n assert original._aind == transposed._bind # but equiv\n assert original._bind is not transposed._aind # not same object\n assert original._bind == transposed._aind # but equiv\n\n assert original._alpha_map is not transposed._beta_map # not same object\n compare_Spinmap(original._alpha_map, transposed._beta_map)\n assert original._beta_map is not transposed._alpha_map # not same object\n compare_Spinmap(transposed._beta_map, original._alpha_map)\n\n assert original._dexca is not transposed._dexcb # not same object\n assert numpy.array_equal(original._dexca, transposed._dexcb) # but equiv\n assert original._dexcb is not transposed._dexca # not same object\n assert numpy.array_equal(original._dexcb, transposed._dexca) # but equiv\n\n\ndef test_map(alpha_or_beta, norb=4, nalpha=3, nbeta=2):\n \"\"\"Check alpha_map or beta_map\n \"\"\"\n graph = fci_graph.FciGraph(nalpha, nbeta, norb)\n if alpha_or_beta == \"alpha\":\n get_map = graph.alpha_map\n map_object = graph._alpha_map\n elif alpha_or_beta == \"beta\":\n get_map = graph.beta_map\n map_object = graph._beta_map\n else:\n raise ValueError(f'Unknown value {alpha_or_beta}')\n\n assert get_map(1, 2) is map_object[(1, 2)]\n assert get_map(2, 0) is map_object[(2, 0)]\n\n with pytest.raises(KeyError):\n get_map(-1, 2)\n\n with pytest.raises(KeyError):\n get_map(0, 4)\n\n\ndef test_init_logic():\n \"\"\"Checks the logic of the __init__ of FciGraph\n \"\"\"\n with pytest.raises(ValueError):\n fci_graph.FciGraph(-1, 10, 10)\n with pytest.raises(ValueError):\n fci_graph.FciGraph(11, 1, 10)\n with pytest.raises(ValueError):\n fci_graph.FciGraph(1, -1, 10)\n with pytest.raises(ValueError):\n fci_graph.FciGraph(1, 11, 10)\n with pytest.raises(ValueError):\n fci_graph.FciGraph(1, 1, -1)\n\n\n@pytest.mark.parametrize(\"nalpha,nbeta,norb\", cases)\ndef test_make_mapping_each(alpha_or_beta, c_or_python, nalpha, nbeta, norb):\n \"\"\"Check make_mapping_each wrt reference data\n \"\"\"\n fqe.settings.use_accelerated_code = c_or_python\n # graph = loader(nalpha, nbeta, norb, 'graph')\n graph = fci_graph.FciGraph(nalpha, nbeta, norb)\n reference = loader(nalpha, nbeta, norb, 'make_mapping_each')\n\n alpha = {\"alpha\": True, \"beta\": False}[alpha_or_beta]\n length = {\"alpha\": graph.lena(), \"beta\": graph.lenb()}[alpha_or_beta]\n\n for (c_alpha, dag, undag), refval in reference.items():\n if c_alpha == alpha:\n result = numpy.zeros((length, 3), dtype=numpy.uint64)\n count = graph.make_mapping_each(result, alpha, dag, undag)\n assert numpy.array_equal(result[:count, :], refval)\n\n\n@pytest.mark.parametrize(\"nalpha,nbeta,norb\", cases)\ndef test_map_to_deexc_alpha_icol(c_or_python, norb, nalpha, nbeta):\n \"\"\"Check _map_to_deexc_alpha_icol\n \"\"\"\n fqe.settings.use_accelerated_code = c_or_python\n # graph = loader(nalpha, nbeta, norb, 'graph')\n graph = fci_graph.FciGraph(nalpha, nbeta, norb)\n rindex, rexc, rdiag = loader(nalpha, nbeta, norb, 'map_to_deexc_alpha_icol')\n\n index, exc, diag = graph._map_to_deexc_alpha_icol()\n assert numpy.array_equal(rindex, index)\n assert numpy.array_equal(rexc, exc)\n assert numpy.array_equal(rdiag, diag)\n\n\n@pytest.mark.parametrize(\"nalpha,nbeta,norb\", cases)\ndef test_get_block_mappings(norb, nalpha, nbeta):\n \"\"\"Check _get_block_mappings\n \"\"\"\n # graph = loader(nalpha, nbeta, norb, 'graph')\n graph = fci_graph.FciGraph(nalpha, nbeta, norb)\n rmappings_set = loader(nalpha, nbeta, norb, 'get_block_mappings')\n\n for (ms, jo), rmappings in rmappings_set.items():\n mappings = graph._get_block_mappings(max_states=ms, jorb=jo)\n\n # Check if the ranges (cmap[0] and cmap[1]) loops over all states\n # Just an extra check\n assert set((x for cmap in mappings for x in cmap[0])) == \\\n set(range(graph.lena()))\n assert set((x for cmap in mappings for x in cmap[1])) == \\\n set(range(graph.lenb()))\n\n for rmap, cmap in zip(rmappings, mappings):\n assert rmap[0] == cmap[0]\n assert rmap[1] == cmap[1]\n assert numpy.array_equal(rmap[2], cmap[2])\n assert numpy.array_equal(rmap[3], cmap[3])\n","sub_path":"tests/fci_graph_test.py","file_name":"fci_graph_test.py","file_ext":"py","file_size_in_byte":11186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"640328865","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 15 22:15:32 2019\n\n@author: Aryan Singh\n\"\"\"\n\n'''\nA * B + C / D\nApplications - Calculator\n\nwe push when we have numbers\npop and evaluate when there is a operator\n\nSteps:-\n 1.Create an empty stack called operandStack.\n 2.Convert the string to a list by using the string method split.\n 3.Scan the token list from left to right.\n 4.If the token is an operand, convert it from a string to an integer \n and push the value onto the operandStack.\n 5.If the token is an operator, *, /, +, or -, it will need two operands. \n Pop the operandStack twice. The first pop is the second operand and the \n second pop is the first operand. Perform the arithmetic operation. \n Push the result back on the operandStack.\n\nWhen the input expression has been completely processed, \nthe result is on the stack. Pop the operandStack and return \nthe value.\n\n'''\n\nimport queue \nstack = queue.LifoQueue(maxsize=20) \n\ndef postfix(expression):\n token = expression.split()\n \n for tok in token:\n if tok in \"0123456789\":\n stack.put(int(tok))\n else:\n op2 = stack.get()\n op1 = stack.get()\n result = doMath(tok,op1,op2)\n stack.put(result)\n return stack.get()\n\ndef doMath(op, op2, op1):\n if op == '*':\n return op1*op2\n elif op == \"/\":\n return op1 / op2\n elif op == \"+\":\n return op1 + op2\n else:\n return op1 - op2\n \n \nprint(postfix('7 8 + 3 2 + /')) # prints 0.333\nprint(postfix('2 3 +')) # prints 5\nprint(postfix('2 3 + 6 *')) \n ","sub_path":"data_structures/postfix_stack.py","file_name":"postfix_stack.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"470728462","text":"#!/usr/bin/env python3\n# coding=utf-8\n\n\"\"\"\n Solve Quals 2016 p.C\n\n Author: killerrex\n\"\"\"\n\nimport sys\n\n\nclass PrimeSet:\n Known = [2, 3, 5, 7]\n\n @classmethod\n def _is_prime_so_far(cls, n):\n \"\"\"\n Check if a number is prime, up to the last known prime\n Args:\n n: Number to test\n \"\"\"\n for p in cls.Known:\n if n % p == 0:\n return False\n return True\n\n @classmethod\n def _grow(cls):\n \"\"\"\n Add the next prime to the list\n Returns: The last prime added\n \"\"\"\n\n n = cls.Known[-1]\n big = n % 6 == 1\n ok = False\n while not ok:\n # Advance to the next position...\n if big:\n n += 4\n else:\n n += 2\n big = not big\n ok = cls._is_prime_so_far(n)\n\n cls.Known.append(n)\n return n\n\n def __contains__(self, item):\n \"\"\"\n Search a prime in the list\n Args:\n item:\n Returns:\n \"\"\"\n # Search using a binomial search\n # We know that p[item] > item\n l = 0\n u = min(item, len(self.Known) - 1)\n if self.Known[0] == item or self.Known[u] == item:\n return True\n\n while u - l > 1:\n m = (l + u) // 2\n p = self.Known[m]\n if p == item:\n return True\n\n if p > item:\n u = m\n else:\n l = m\n return False\n\n def __init__(self):\n \"\"\"\n Create a new iterator in the primes\n \"\"\"\n super().__init__()\n\n def __iter__(self):\n return self.__next__()\n\n def __next__(self):\n yield from self.Known\n\n # Now we need to grow each time\n while True:\n yield self._grow()\n\n\ndef coinjammify(n, value, desist=100):\n \"\"\"\n Convert v in a coinjam of n bits\n Args:\n n: Number of bits\n value: initial value\n\n Returns:\n The coinjam or None\n \"\"\"\n if n <= 2:\n return None\n # Force to be 1__n-2 bits___1\n tpl = '1{:0{n}b}1'.format(value, n=n-2)\n\n bases = list(range(2, 11))\n proof = [0]*len(bases)\n # Obtain the number in each base:\n values = [(b, int(tpl, b)) for b in bases]\n\n patience = desist\n for p in PrimeSet():\n patience -= 1\n sz = len(values)\n # Enumerate in reverse, to be able to pop elements\n for k in range(sz-1, -1, -1):\n b, v = values[k]\n\n if p >= v:\n # Prime found... this is not a coinjam\n return None, None\n\n if v % p == 0:\n proof[b-2] = p\n del values[k]\n patience = desist\n if len(values) == 0:\n break\n # Just too long\n if patience <= 0:\n return None, None\n return tpl, proof\n\n\ndef paranoid(tpl, proof):\n for k, p in enumerate(proof):\n b = k + 2\n v = int(tpl, b)\n if v % p != 0:\n return False\n return True\n\n\ndef solve(fd):\n \"\"\"\n Read the cases from fd and solve them\n\n Args:\n fd: File descriptor\n\n Returns:\n\n \"\"\"\n\n total = int(fd.readline().strip())\n\n for k in range(total):\n n, j = (int(c) for c in fd.readline().strip().split())\n\n print(\"Case #{}:\".format(k+1))\n v = 0\n vmax = 1 << (n-2)\n t = 0\n # Estimate the patience we must have...\n patience = 2*j\n while v < vmax:\n cj, proof = coinjammify(n, v, patience)\n v += 1\n if cj is None:\n continue\n t += 1\n\n if not paranoid(cj, proof):\n print(\"Invalid: {} {}\".format(cj, proof))\n raise RuntimeError(\"This shall not pass\")\n\n proof = ' '.join(str(p) for p in proof)\n print(cj + ' ' + proof)\n if t >= j:\n break\n else:\n print(\"Not enough patience...\")\n\n\ndef start():\n if len(sys.argv) > 1:\n with open(sys.argv[1], 'r') as fd:\n solve(fd)\n else:\n solve(sys.stdin)\n\nif __name__ == '__main__':\n start()\n","sub_path":"codes/CodeJamCrawler/16_0_3_neat/16_0_3_killerrex_problem_c.py","file_name":"16_0_3_killerrex_problem_c.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"406361732","text":"from django.test import TestCase\r\nfrom regex_builder.regex_builder import createIntRegex\r\n# Create your tests here.\r\nimport re\r\n\r\nclass RegexTestCase(TestCase):\r\n\r\n def test_integer_regex(self):\r\n \r\n p_low = 50667\r\n p_high = 54672\r\n\r\n regex_range = createIntRegex(str(p_low), str(p_high))\r\n print(regex_range)\r\n _range = re.compile(regex_range)\r\n\r\n for i in range(10000, 1000000):\r\n if i < p_low:\r\n self.assertFalse(_range.match(str(i)))\r\n if i >= p_low and i <= p_high:\r\n self.assertTrue(_range.match(str(i)))\r\n if i > p_high:\r\n self.assertFalse(_range.match(str(i)))","sub_path":"postal_ranges/regex_builder/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"102134493","text":"n=int(onput())\nresult=[]\nwhile n>0:\n ls=input().split(\" \")\n ls=[int(x) for x in ls]\n A=ls[0]\n B=ls[1]\n C=ls[2]\n #要计算A^B%C\n while B>0:\n A=A*A\n B=B-1\n result.append(A%C)\n n=n-1\nfor i in range(0,len(result)):\n print(result[i])","sub_path":"Code/CodeRecords/2732/60796/249322.py","file_name":"249322.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"432239943","text":"from math import *\ndef annulaires(n):\n l=[]\n a=2\n s=True\n while a<=2013:\n l.append(a)\n if s:\n a+=(n-2)*2\n else:\n a+=2\n s=not s\n return l\n \nv=annulaires(8)\nb=annulaires(7)\ns=0\n\nfor i in v:\n if i in b:\n s+=i\n \nprint(s)","sub_path":"dt19.py","file_name":"dt19.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"83511027","text":"\nimport collections\nimport logging\nimport sys\n\nlogger = logging.getLogger(__name__)\n\ndef testLogger(string):\n logger.debug(string)\n\ndef copy_dict_shallow(d):\n \"\"\"\n Make a shallow copy of the given dict, while checking if all keys and values are hashable\n \"\"\"\n result = dict()\n for key, value in d.items():\n key_hashable = isinstance(key, collections.Hashable)\n value_hashable = isinstance(value, collections.Hashable)\n\n if not key_hashable or not value_hashable:\n raise ValueError(\"{0} : {1} is not hashable\".format(key, value))\n else:\n result[key] = value\n\n return result\n\ndef copy_dict_deep(d):\n \"\"\"\n Recursively make a deep copy of the given dictionary. All keys and values must be hashable\n \"\"\"\n result = dict()\n for key, value in d.items():\n key_hashable = isinstance(key, collections.Hashable)\n value_hashable = isinstance(value, collections.Hashable)\n\n if isinstance(value, dict) and key_hashable:\n result[key] = copy_dict_deep(d[key])\n elif key_hashable and value_hashable:\n result[key] = value\n else:\n raise NotImplementedError(\"{0} : {1} is not implemented\".format(key, value ))\n \n return result\n\ndef query_yes_no(question, default=None):\n \"\"\"Ask a yes/no question via raw_input() and return their answer.\n\n \"question\" is a string that is presented to the user.\n \"default\" is the presumed answer if the user just hits .\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is True for \"yes\" or False for \"no\".\n \"\"\"\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n","sub_path":"news-worker/worker/utils/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"184672130","text":"import matplotlib.pyplot as plt\n\nm = 'o'\nmw = 1.5\ndef plot_f1():\n hot, spot, rg, struct, ipknot, knotty = 66.3, 72, 66.4, 65.9, 66.0, 67.2\n model_x = [10, 20, 30, 40, 50, 60, 70, 80, 90] \n model_y_new = [58.5, 62.0, 63.1, 66.1, 66.7, 69.3, 71.9, 70.8, 72.4]\n\n fig, ax = plt.subplots()\n\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n plt.plot(33, struct, marker=m, markeredgewidth=mw, linestyle='None', color='red')\n plt.plot(37, ipknot, marker=m, markeredgewidth=mw, linestyle='None', color='orange')\n plt.plot(43, hot, marker=m, markeredgewidth=mw,linestyle='None', color='gold')\n plt.plot(47, rg, marker=m, markeredgewidth=mw, linestyle='None', color='green')\n plt.plot(55, knotty, marker=m, markeredgewidth=mw, linestyle='None', color='skyblue')\n plt.plot(85, spot, marker=m, markeredgewidth=mw, linestyle='None', color='blue')\n plt.plot(model_x, model_y_new,marker=m, markeredgewidth=mw, linestyle='dashed', color='indigo', linewidth=0.5)\n plt.legend(['RNAstructure', 'Ipknot', 'HotKnots', 'pknotsRG', 'Knotty', 'SPOT-RNA', 'New-model'], loc='lower right', handlelength=0, borderpad=0.7)\n plt.axis((5, 95, 58, 73))\n plt.xlabel('Data percent for new model training')\n plt.ylabel('F1 score')\n\n plt.savefig('/home/polina/Desktop/plot_f1.png', dpi=700)\n \n\ndef plot_p_r():\n hot_p, spot_p, rg_p, struct_p, ipknot_p, knotty_p = 69.3, 73.1, 69.0, 69.5, 71.8, 69.9\n hot_r, spot_r, rg_r, struct_r, ipknot_r, knotty_r = 64.9, 72.8, 65.3, 64.6, 62.8, 66.1\n model_x = [10, 20, 30, 40, 50, 60, 70, 80, 90]\n model_y_p = [57.7, 61.3, 63.5, 64.9, 64.9, 67.9, 71.2, 68.8, 71.0] \n model_y_r = [62.1, 65.0, 65.2, 70.1, 70.8, 72.5, 74.6, 75.0, 75.6] \n\n fig, ax = plt.subplots()\n\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n \n plt.plot(struct_p, struct_r, marker=m, markeredgewidth=mw, linestyle='None', color='red')\n plt.plot(ipknot_p, ipknot_r, marker=m,markeredgewidth=mw, linestyle='None', color='orange')\n plt.plot(hot_p, hot_r, marker=m, markeredgewidth=mw, linestyle='None', color='gold')\n plt.plot(rg_p, rg_r, marker=m, markeredgewidth=mw, linestyle='None', color='green')\n plt.plot(knotty_p, knotty_r, marker=m, markeredgewidth=mw, linestyle='None', color='skyblue')\n plt.plot(spot_p, spot_r, marker=m, markeredgewidth=mw, linestyle='None', color='blue')\n plt.plot(model_y_p, model_y_r, marker=m, markeredgewidth=mw, linestyle='dashed', color='indigo', linewidth=0.5) \n plt.text(model_y_p[0] - 0.5, model_y_r[0] - 1, '10% data', color='black', fontsize=7)\n plt.text(model_y_p[8] + 0.5, model_y_r[8] - 0.25, '90% data', color='black', fontsize=7)\n plt.plot([57, 80], [57, 80], color='black', linewidth=0.5)\n plt.legend(['RNAstructure', 'Ipknot', 'HotKnots', 'pknotsRG', 'Knotty', 'SPOT-RNA', 'New-model'], loc='upper left', handlelength=0, borderpad=0.7)\n plt.axis((57, 76, 57, 76))\n plt.xlabel('Precision')\n plt.ylabel('Recall')\n\n plt.savefig('/home/polina/Desktop/plot_pr.png', dpi=700)\n\n\ndef print_latex_table(seq, img_file):\n\tout = '\\\\begin{table}[]\\n\\\\begin{tabular}{' + 'l' * len(seq) + '}\\n'\n\timg = np.array(Image.open(img_file))\n\tfor i in range(len(img)):\n\t for j in range(len(img)):\n\t if i == j:\n\t out += seq[i] + ' '\n\t elif j < i:\n\t out += '& '\n\t else:\n\t if img[i][j] == 0:\n\t out += '& ' + str(int(img[i][j] / 255)) + ' '\n\t else:\n\t out += '& \\\\textbf{' + str(int(img[i][j] / 255)) + '} '\n\t out += ' \\\\\\\\[2pt]\\n'\n\tout += '\\end{tabular}\\n\\end{table}'\n\tprint(out)","sub_path":"SecondaryStructurePrediction/scripts/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":3667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"314912986","text":"import matplotlib.pyplot as plt\nfrom numpy import *\nfrom drange import *\nfrom enum import Enum\n\n############ USER DEFINED ######################################################\n#airplane\nmpns = ([200000, 60000])\n\nclass ScaleType(Enum):\n NoScale = 0\n LogScale = 1\n StdDevScale = 2\n ScaledScale = 3\n\nscale_type = ScaleType.StdDevScale;\n\n############ FILE/FORMAT #######################################################\n#how to scales\nstd_avg = 0.0;\nstd_dev = 0.0;\nstd_max = 0.0;\nstd_min = 0.0;\ndef Scale(t, idx):\n t = int(t[idx]);\n return {\n ScaleType.NoScale: t,\n ScaleType.LogScale: log(t),\n ScaleType.StdDevScale: ((t) - std_avg[idx])/(std_dev[idx]),\n ScaleType.ScaledScale: (t-std_min[idx])/(std_max[idx]-std_min[idx])\n }[scale_type];\n\nclass PurifyFile(UFCS_Mixin):\n def __init__(s, fil):\n s.fp = open(fil, 'r', encoding='utf-8')\n s.nline = s.NLine()\n NLine = lambda s: s.fp.readline()[:-1]\n Empty = lambda s: s.nline == ''\n def Front(s):\n (t, s.nline) = (s.nline, s.NLine())\n t = Range(*(''.join([x for x in t if x != ' ' and x != '\\n'])\n .split(',')[:-1]))\n return t\n\n# returns matrix file as a drange\ndef Load_File(s):\n return PurifyFile(s).Map(lambda arr: arr[:3]).Array()\n\ndmat = (Range(\"before-1950.txt\", \"after-1950.txt\")\n .Map(Load_File).Join(lambda t: False).Array())\ndmat = dmat[0]\ndnl = (dmat.Map(lambda t: PyRange(t[1:]\n .Map(lambda t: int(t))))\n .Array());\n# std deviation\ndef Avg(g):\n _len = g.Length()\n val = g.Reduce(lambda x, y: (x[0]+y[0], x[1]+y[1]))\n return (val[0]/_len, val[1]/_len);\npdnl = PyRange(dnl)\nstd_dev = std(pdnl, axis=0)\nstd_avg = average(pdnl, axis=0)\nstd_max = dnl.Reduce(lambda x, y: (max(x[0],y[0]), max(x[1],y[1])))\nstd_min = dnl.Reduce(lambda x, y: (min(x[0],y[0]), min(x[1],y[1])))\n\ndnl = dnl.Map(lambda P: (Scale(P, 0), Scale(P, 1)))\n\n\n############ NUMPY #############################################################\nplt.xlabel(\"weight\")\nplt.ylabel(\"horse power\")\n\nmp = ([Scale(mpns, 0), Scale(mpns, 1)])\ndef Distance(a, b):\n x = a[0]-b[0]; y = a[1]-b[1];\n return sqrt(x*x + y*y)\n\n# TODO ; enumerate bug\nbad_enumerate_t = 0\ndef BadEnumerate(t):\n global bad_enumerate_t\n bad_enumerate_t += 1\n return (t, bad_enumerate_t-1)\nplot_dnl = dnl\ndnl = dnl.Map(BadEnumerate)\nidx = (dnl\n .Map(lambda t: (t[1], Distance(mp, t[0])))\n .Reduce(lambda g, y: (g,y)[g[1]>y[1]]))\nprint(\"CLOSEST OF\\n\", mpns, \"\\nIS\\n\", dmat[idx[0]],\n \"\\nDIST [scaled]: \", idx[1]);\n\n# slope interscept/plot\nfrom scipy import stats\n\npyrange = (array(PyRange(plot_dnl.Map(lambda g: g[0]).Array())),\n array(PyRange(plot_dnl.Map(lambda g: g[1]).Array())));\nregress = stats.linregress(*pyrange)\nline = regress[0]*pyrange[0] + regress[1]\nprint(line)\nplt.plot(pyrange[0], line)\n\n# slope, intercept, r_value, p_value, std_err = stats.linregress(xi,y)\n# line = slope*xi+intercept\n\nplot_dnl.Each(lambda g: plt.plot(*g, 'ro'))\nplt.plot(*mp, 'go')\nplt.show()\n","sub_path":"HW/planes.py","file_name":"planes.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"393313459","text":"from pynput.keyboard import Key, Controller\r\nfrom adofai import ADOFAI\r\nimport time, json\r\n\r\ndef autoplay(levelpath):\r\n print(\"autoplay start\")\r\n with open(levelpath, encoding='utf-8-sig') as f:\r\n ctx = json.loads(f.read())\r\n settings = ctx['settings']\r\n bpm = settings['bpm']\r\n offset = settings['offset']\r\n pathdata = ctx['pathData']\r\n action = ctx['actions']\r\n bpmdata = {}\r\n twirldata = {}\r\n #print(action)\r\n\r\n for i in range(len(action)):\r\n act = action[i]\r\n event = act['eventType']\r\n if event == 'SetSpeed':\r\n bpmdata[act['floor'] - 1] = act['beatsPerMinute']\r\n if event == 'Twirl':\r\n twirldata[act['floor'] - 1] = 'Twirl'\r\n \r\n macro = ADOFAI(bpm, pathdata, offset, bpmdata, twirldata)\r\n macro.startMacro()\r\n","sub_path":"macro.py","file_name":"macro.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"179211504","text":"import collections\nwith open(\"directory.txt\") as f:\n\tcontent = f.readlines()\ncontent = [l.strip().split(\"\\t\") for l in content]\n\n#content.sort(key = lambda k : k[1])\n\nTele = collections.namedtuple(\"Tele\", [\"num\", \"name\"])\n\ntl = []\nfor n in content:\n\tt = Tele(n[0],n[1])\n\ttl.append(t)\n\ntl.sort(key = lambda k: k.name)\n\n\n\nwith open(\"dir-new.txt\", \"w\") as f:\n\tfor row in content:\n\t\ts = row[1] + \"\\t\" + row[0] + \"\\n\"\n\t\tf.write(s)\n\n","sub_path":"day1/read_test.py","file_name":"read_test.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"640292668","text":"def sum_odd_digts(number):\n str_nums = map(int, str(number))\n sum_odd = 0\n\n for el in str_nums:\n num = int(el)\n\n if (num % 2) == 0:\n sum_odd += (num * num)\n\n return sum_odd\n\n\ndef main():\n try:\n user_input = int(input(\"Enter a number: \"))\n except ValueError:\n print(\"Invalid number! \\nTry again\")\n return main()\n\n result = sum_odd_digts(user_input)\n print(\"Sum of squares odd digits: %s\" % str(result))\n\n\nif __name__ == '__main__':\n main()","sub_path":"Python Academy/8_v4.py","file_name":"8_v4.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"149927797","text":"#web/app.py\nfrom flask import Flask,render_template\nimport requests\nimport folium\nimport branca.colormap as cm\nimport branca\nimport json\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import (StructType,StructField,DateType,BooleanType,DoubleType,IntegerType,StringType,TimestampType)\nimport os\nimport plotly \nimport plotly.io as pio\nimport plotly.plotly as py\nimport plotly.graph_objs as go\nfrom pyspark.sql.functions import *\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nplt.style.use(['dark_background'])\nimport os\nimport datetime\n\nos.environ[\"DISPLAY\"]=\"0.0\"\nos.environ[\"PYSPARK_PYTHON\"]=\"/usr/bin/python3\"\nos.environ[\"PYSPARK_DRIVER_PYTHON\"]=\"/usr/bin/python3\"\n\napp = Flask(__name__)\nmap_count=0\nspark = SparkSession.builder.appName(\"Chicago_crime_analysis\").getOrCreate()\ncrimes_schema = StructType([StructField(\"_c0\", StringType(), True),\n StructField(\"ID\", StringType(), True),\n StructField(\"CaseNumber\", StringType(), True),\n StructField(\"Date\", StringType(), True ),\n StructField(\"Block\", StringType(), True),\n StructField(\"IUCR\", StringType(), True),\n StructField(\"PrimaryType\", StringType(), True ),\n StructField(\"Description\", StringType(), True ),\n StructField(\"LocationDescription\", StringType(), True ),\n StructField(\"Arrest\", BooleanType(), True),\n StructField(\"Domestic\", BooleanType(), True),\n StructField(\"Beat\", StringType(), True),\n StructField(\"District\", StringType(), True),\n StructField(\"Ward\", StringType(), True),\n StructField(\"CommunityArea\", StringType(), True),\n StructField(\"FBICode\", StringType(), True ),\n StructField(\"XCoordinate\", DoubleType(), True),\n StructField(\"YCoordinate\", DoubleType(), True ),\n StructField(\"Year\", IntegerType(), True),\n StructField(\"UpdatedOn\", DateType(), True ),\n StructField(\"Latitude\", DoubleType(), True),\n StructField(\"Longitude\", DoubleType(), True),\n StructField(\"Location\", StringType(), True )\n ])\ncrimes = spark.read.csv(\"Chicago_Crimes_2012_to_2017.csv\",header = True,schema = crimes_schema)\n\n@app.route('/map-addr', methods=['GET', 'POST'])\ndef map_call():\n return render_template('map_address.html')\n\n@app.route('/')\ndef hello():\n return render_template('sampleui.html',name_send6='1') \n\n@app.route('/2001-2004', methods=['GET', 'POST'])\ndef one():\n return render_template('2001-2004.html')\n\n@app.route('/2005-2008', methods=['GET', 'POST'])\ndef two():\n return render_template('2005-2008.html')\n\n@app.route('/2009-2011', methods=['GET', 'POST'])\ndef three():\n return render_template('2009-2011.html')\n\n@app.route('/2012-2017', methods=['GET', 'POST'])\ndef four():\n return render_template('2012-2017.html')\n\n@app.route('/plotplaces', methods=['GET', 'POST'])\ndef crime_loc_plot():\n X=[]\n y=[]\n crime_loc_groups = crimes.groupBy('LocationDescription').count()\n crime_loc_counts = crime_loc_groups.orderBy('count', ascending=False)\n counts_pddf_loc = pd.DataFrame(crime_loc_counts.rdd.map(lambda l: l.asDict()).collect())\n count_list=counts_pddf_loc.values.tolist()\n for k,v in count_list:\n X.append(k)\n y.append(v)\n X=X[:30]\n y=y[:30]\n return render_template('sampleui.html',values3=y,labels3=X,name_send3='1')\n\n@app.route('/plotdates', methods=['GET', 'POST'])\ndef crime_date_plot():\n global crimes\n crimes = crimes.withColumn('date_time', to_timestamp('Date', 'MM/dd/yyyy hh:mm:ss a')).withColumn('month', trunc('date_time', 'YYYY')) #adding a month column to be able to view stats on a monthly basis\n df_hour = crimes.withColumn('hour', hour(crimes['date_time']))\n hourly_count = df_hour.groupBy(['PrimaryType', 'hour']).count().cache()\n hourly_total_count = hourly_count.groupBy('hour').sum('count')\n hourly_count_pddf = pd.DataFrame(hourly_total_count.select(hourly_total_count['hour'], hourly_total_count['sum(count)'].alias('count')).rdd.map(lambda l: l.asDict()).collect())\n hourly_count_pddf = hourly_count_pddf.sort_values(by='hour')\n X=hourly_count_pddf['hour']\n y= hourly_count_pddf['count']\n X=X[:-1]\n y=y[:-1]\n return render_template('sampleui.html',values2=y,labels2=X,name_send2='1')\n\n\n@app.route('/plottypes', methods=['GET', 'POST'])\ndef crime_type_plot():\n global crimes\n X=[]\n y=[]\n crime_type_groups = crimes.groupBy('PrimaryType').count()\n crime_type_counts = crime_type_groups.orderBy('count', ascending=False)\n counts_pddf = pd.DataFrame(crime_type_counts.rdd.map(lambda l: l.asDict()).collect())\n count_list=counts_pddf.values.tolist()\n for k,v in count_list:\n X.append(k)\n y.append(v)\n return render_template('sampleui.html',values=y,labels=X,name_send4='1')\n\n@app.route('/option/')\ndef option(name):\n option=name\n print(name)\n file_name=\"crime\"+option+\".html\"\n return render_template(file_name)\n \n\n\nif __name__ == '__main__':\n app.run(debug=True, host=\"0.0.0.0\")\n","sub_path":"Integrated Project Final Version/web/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403766200","text":"import sklearn\r\nimport numpy\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.datasets import load_iris #这里我们引入sklearn的一个自带的数据集iris鸢尾花数据集\r\n\r\n#朴素贝叶斯算法(Naive Bayes),同样是有监督学习\r\niris = load_iris()\r\nprint(iris) #iris数据集里面是关于花的各项属性(4 features) 标准是150*4 在加上花的标签(有监督)150*1\r\ntest = [[0.1,0.2,0.3,0.4],\r\n [10.5,10.6,10.7,10.8],\r\n [50.5,50.6,50.7,50.8]\r\n ]\r\n\r\nNBmodel_1 = GaussianNB() #sklearn的贝叶斯分类器里面有四种分类器 ['BernoulliNB', 'GaussianNB', 'MultinomialNB', 'ComplementNB']\r\nNBmodel_1.fit(iris.data, iris.target)\r\nNBmodel_1_pre = NBmodel_1.predict(test)\r\nprint(NBmodel_1_pre)\r\nprint(iris.target_names[NBmodel_1_pre]) \r\n\r\nNBmodel_2 = MultinomialNB()\r\nNBmodel_2.fit(iris.data,iris.target)\r\nNBmodel_2_pre = NBmodel_2.predict(test)\r\nprint(NBmodel_2_pre)\r\nprint(iris.target_names[NBmodel_2_pre])\r\n\r\n\r\n","sub_path":"python_sklearn_classification_NB.py","file_name":"python_sklearn_classification_NB.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"154736660","text":"'''\nScript to run tabular experiments in batch mode.\n\nauthor: iosband@stanford.edu\n'''\n\nimport numpy as np\nimport pandas as pd\nimport argparse\nimport sys\n\nfrom src import environment\nfrom src import finite_tabular_agents\n\nfrom src.feature_extractor import FeatureTrueState\nfrom src.experiment import run_finite_tabular_experiment\n\n\n\nif __name__ == '__main__':\n '''\n Run a tabular experiment according to command line arguments\n '''\n\n # Take in command line flags\n parser = argparse.ArgumentParser(description='Run tabular RL experiment')\n parser.add_argument('stateMul', help='state multiplier', type=int)\n parser.add_argument('gap', help='gap between best arm', type=float)\n parser.add_argument('alg', help='Agent constructor', type=str)\n parser.add_argument('scaling', help='scaling', type=float)\n parser.add_argument('seed', help='random seed', type=int)\n parser.add_argument('nEps', help='number of episodes', type=int)\n args = parser.parse_args()\n\n # Make a filename to identify flags\n fileName = ('bandit'\n + '_stateMul=' + '%02.f' % args.stateMul\n + '_gap=' + '%04.3f' % args.gap\n + '_alg=' + str(args.alg)\n + '_scal=' + '%03.2f' % args.scaling\n + '_seed=' + str(args.seed)\n + '.csv')\n\n folderName = './'\n targetPath = folderName + fileName\n print('******************************************************************')\n print(fileName)\n print('******************************************************************')\n\n # Make the environment\n env = environment.make_stateBanditMDP(stateMul=args.stateMul, gap=args.gap)\n\n # Make the feature extractor\n f_ext = FeatureTrueState(env.epLen, env.nState, env.nAction, env.nState)\n\n # Make the agent\n alg_dict = {'PSRL': finite_tabular_agents.PSRL,\n 'PSRLunif': finite_tabular_agents.PSRLunif,\n 'OptimisticPSRL': finite_tabular_agents.OptimisticPSRL,\n 'GaussianPSRL': finite_tabular_agents.GaussianPSRL,\n 'UCBVI': finite_tabular_agents.UCBVI,\n 'BEB': finite_tabular_agents.BEB,\n 'BOLT': finite_tabular_agents.BOLT,\n 'UCRL2': finite_tabular_agents.UCRL2,\n 'UCRL2_GP': finite_tabular_agents.UCRL2_GP,\n 'UCRL2_GP_RTDP': finite_tabular_agents.UCRL2_GP_RTDP,\n 'EULER': finite_tabular_agents.EULER,\n 'EULER_GP': finite_tabular_agents.EULER_GP,\n 'EULER_GP_RTDP': finite_tabular_agents.EULER_GP_RTDP,\n 'UCFH': finite_tabular_agents.UCFH,\n 'EpsilonGreedy': finite_tabular_agents.EpsilonGreedy}\n\n agent_constructor = alg_dict[args.alg]\n\n agent = agent_constructor(env.nState, env.nAction, env.epLen,\n scaling=args.scaling)\n\n # Letting the agent know the transitions, but not the rewards\n agent.P_prior[0, 0] = 1e9 * (np.ones(nState) / (nState - 1))\n agent.P_prior[0, 0][0] = 0\n\n inds = (np.arange(nState) % 2) > 0\n P_true[0, 1][inds] = 1e9 * ((0.6) / stateMul)\n P_true[0, 1][-inds] = 1e9 * ((0.4) / stateMul)\n P_true[0, 1][0] = 0\n\n for a in range(env.nAction):\n for s in range(1, env.nState):\n agent.P_prior[s, a][s] += 1e9\n\n # Run the experiment\n run_finite_tabular_experiment(agent, env, f_ext, args.nEps, args.seed,\n recFreq=1000, fileFreq=10000, targetPath=targetPath)\n\n","sub_path":"psrl_experiments_2016/bandit_experiment_state_knownP.py","file_name":"bandit_experiment_state_knownP.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"59745689","text":"import time\nfrom collections import Counter\n\nfrom sqlalchemy import func\n\nfrom lagou_project.create_lago_tables import Lagoutables\nfrom lagou_project.create_lago_tables import Session\n\n\nclass HandleLagouData(object):\n def __init__(self):\n #实例化session信息\n self.mysql_session = Session()\n self.date = time.strftime(\"%Y-%m-%d\", time.localtime())\n\n # 数据的存��方法\n def insert_item(self, item):\n #今天\n date = time.strftime(\"%Y-%m-%d\", time.localtime())\n #存储的数据结构\n data = Lagoutables(\n # 岗位ID\n positionID=item['positionId'],\n # 经度\n longitude=item['longitude'],\n # 纬度\n latitude=item['latitude'],\n # 岗位名称\n positionName=item['positionName'],\n # 工作年限\n workYear=item['workYear'],\n # 学历\n education=item['education'],\n # 岗位性质\n jobNature=item['jobNature'],\n # 公司类型\n financeStage=item['financeStage'],\n # 公司规模\n companySize=item['companySize'],\n # 业务方向\n industryField=item['industryField'],\n # 所在城市\n city=item['city'],\n # 岗位标签\n positionAdvantage=item['positionAdvantage'],\n # 公司简称\n companyShortName=item['companyShortName'],\n # 公司全称\n companyFullName=item['companyFullName'],\n # 公司所在区\n district=item['district'],\n # 公司福利标签\n companyLabelList=','.join(item['companyLabelList']),\n salary=item['salary'],\n # 抓取日期\n crawl_date=date\n )\n\n query_result = self.mysql_session.query(Lagoutables).filter(Lagoutables.crawl_date == date,\n Lagoutables.positionID == item[\n 'positionId']).first()\n #判断该条岗位信息是否存在\n if query_result:\n print('该岗位信息已经存在%s:%s:%s' % (item['positionId'], item['city'], item['positionName']))\n else:\n # 插入数据\n self.mysql_session.add(data)\n # 提交数据到数据库\n self.mysql_session.commit()\n print('新增岗位信息:%s' % item['positionId'])\n\n # 行业信息\n def query_industryfield_result(self):\n info = {}\n # 查询今日抓取到的行业信息数据\n result = self.mysql_session.query(Lagoutables.industryField).filter(\n Lagoutables.crawl_date == self.date\n ).all()\n result_list1 = [x[0].split(',')[0] for x in result]\n result_list2 = [x for x in Counter(result_list1).items() if x[1] > 150]\n # 填充的是series里面的data\n data = [{\"name\": x[0], \"value\": x[1]} for x in result_list2]\n name_list = [name['name'] for name in data]\n info['x_name'] = name_list\n info['data'] = data\n return info\n\n # 查询薪资情况\n def query_salary_result(self):\n info = {}\n # 查询今日抓取到的薪资数据\n result = self.mysql_session.query(Lagoutables.salary).filter(Lagoutables.crawl_date == self.date).all()\n # 处理原始数据\n result_list1 = [x[0] for x in result]\n # 计数,并返回\n result_list2 = [x for x in Counter(result_list1).items() if x[1] > 100]\n result = [{\"name\": x[0], \"value\": x[1]} for x in result_list2]\n name_list = [name['name'] for name in result]\n info['x_name'] = name_list\n info['data'] = result\n return info\n\n # 查询工作年限情况\n def query_workyear_result(self):\n info = {}\n # 查询今日抓取到的薪资数据\n result = self.mysql_session.query(Lagoutables.workYear).filter(Lagoutables.crawl_date == self.date).all()\n # 处理原始数据\n result_list1 = [x[0] for x in result]\n # 计数,并返回\n result_list2 = [x for x in Counter(result_list1).items()]\n result = [{\"name\": x[0], \"value\": x[1]} for x in result_list2 if x[1] > 15]\n name_list = [name['name'] for name in result]\n info['x_name'] = name_list\n info['data'] = result\n return info\n\n # 查询学历信息\n def query_education_result(self):\n info = {}\n # 查询今日抓取到的薪资数据\n result = self.mysql_session.query(Lagoutables.education).filter(Lagoutables.crawl_date == self.date).all()\n # 处理原始数据\n result_list1 = [x[0] for x in result]\n # 计数,并返回\n result_list2 = [x for x in Counter(result_list1).items()]\n result = [{\"name\": x[0], \"value\": x[1]} for x in result_list2]\n name_list = [name['name'] for name in result]\n info['x_name'] = name_list\n info['data'] = result\n return info\n\n # 岗位发布数量,折线图\n def query_job_result(self):\n info = {}\n result = self.mysql_session.query(Lagoutables.crawl_date, func.count('*').label('c')).group_by(\n Lagoutables.crawl_date).all()\n result1 = [{\"name\": x[0], \"value\": x[1]} for x in result]\n name_list = [name['name'] for name in result1]\n info['x_name'] = name_list\n info['data'] = result1\n return info\n\n # 根据城市计数\n def query_city_result(self):\n info = {}\n # 查询今日抓取到的薪资数据\n result = self.mysql_session.query(Lagoutables.city, func.count('*').label('c')).filter(\n Lagoutables.crawl_date == self.date).group_by(Lagoutables.city).all()\n result1 = [{\"name\": x[0], \"value\": x[1]} for x in result]\n name_list = [name['name'] for name in result1]\n info['x_name'] = name_list\n info['data'] = result1\n return info\n\n # 融资情况\n def query_financestage_result(self):\n info = {}\n # 查询今日抓取到的薪资数据\n result = self.mysql_session.query(Lagoutables.financeStage).filter(\n Lagoutables.crawl_date == self.date).all()\n # 处理原始数据\n result_list1 = [x[0] for x in result]\n # 计数,并返回\n result_list2 = [x for x in Counter(result_list1).items()]\n result = [{\"name\": x[0], \"value\": x[1]} for x in result_list2]\n name_list = [name['name'] for name in result]\n info['x_name'] = name_list\n info['data'] = result\n return info\n\n # 公司规模\n def query_companysize_result(self):\n info = {}\n # 查询今日抓取到的薪资数据\n result = self.mysql_session.query(Lagoutables.companySize).filter(Lagoutables.crawl_date == self.date).all()\n # 处理原始数据\n result_list1 = [x[0] for x in result]\n # 计数,并返回\n result_list2 = [x for x in Counter(result_list1).items()]\n result = [{\"name\": x[0], \"value\": x[1]} for x in result_list2]\n name_list = [name['name'] for name in result]\n info['x_name'] = name_list\n info['data'] = result\n return info\n\n # 任职情况\n def query_jobNature_result(self):\n info = {}\n # 查询今日抓取到的薪资数据\n result = self.mysql_session.query(Lagoutables.jobNature).filter(Lagoutables.crawl_date == self.date).all()\n # 处理原始数据\n result_list1 = [x[0] for x in result]\n # 计数,并返回\n result_list2 = [x for x in Counter(result_list1).items()]\n result = [{\"name\": x[0], \"value\": x[1]} for x in result_list2]\n name_list = [name['name'] for name in result]\n info['x_name'] = name_list\n info['data'] = result\n return info\n\n # 抓取数量\n def count_result(self):\n info = {}\n info['all_count'] = self.mysql_session.query(Lagoutables).count()\n info['today_count'] = self.mysql_session.query(Lagoutables).filter(\n Lagoutables.crawl_date == self.date).count()\n return info\n\n # def query_city_salary_industryfidle(self):\n # result = self.mysql_session.query(func.lower(func.substring_index(Lagoutables.industryField,',',1)),\n # Lagoutables.city,\n # func.avg(func.replace(func.lower(func.substring_index(Lagoutables.salary,'-',2)),'k','')),\n # func.count(func.lower(func.substring_index(Lagoutables.industryField,',',1))).label('c')).filter(\n # Lagoutables.crawl_date == self.date\n # ).group_by(func.lower(func.substring_index(Lagoutables.industryField,',',1)),\n # Lagoutables.city).all()\n # result_list = [{\"industry\":item[0],\"city\":item[1],\"salary_avg\":int(item[2]),\"value\":item[3]} for item in result if item[3] >10]\n # info = {}\n # # city_list = set([city[1] for city in result])\n # city_list = set([city['city'] for city in result_list])\n # # industryfield_list = set([industry[0] for industry in result])\n # industryfield_list = set([x['industry'] for x in result_list])\n # series_list = []\n # for city in city_list:\n # series_item = {}\n # series_data_list = []\n # series_item['name'] = city\n # series_item['type'] = \"bar\"\n # for item in industryfield_list:\n # series_data_item = {}\n # series_data_item['name'] = item\n # value = [x['salary_avg'] for x in result_list if x['industry'] == item and x['city'] == city]\n # if value:\n # series_data_item['value'] = value[0]\n # series_data_list.append(series_data_item)\n # series_item['data'] = series_data_list\n # series_list.append(series_item)\n # info['x_name'] = list(industryfield_list)\n # info['legend_data'] = list(city_list)\n # info['data'] = series_list\n # return info\n\n\nlagou_mysql = HandleLagouData()","sub_path":"lagou_project/handler_insert_data.py","file_name":"handler_insert_data.py","file_ext":"py","file_size_in_byte":10333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"102269411","text":"# !/usr/bin/python3\r\n# @File : utils.py\r\n# @Software : PyCharm\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport pickle\r\n\r\n\r\ndef get_neighbor(y_true, y_pre):\r\n correct = 0\r\n for temp_t, temp_p in zip(y_true, y_pre):\r\n if np.abs(temp_t - temp_p) <= 1:\r\n correct += 1\r\n neighbor_acc = correct / len(y_true)\r\n print(\"neighbor_acc:\", neighbor_acc)\r\n return neighbor_acc\r\n\r\n\r\ndef load_bert_data(data_path):\r\n file = open(data_path, 'rb')\r\n (data_x, data_y) = pickle.load(file)\r\n file.close()\r\n return np.array(data_x), np.array(data_y)\r\n\r\n\r\ndef read_reflect_data(data_path):\r\n data_df = pd.read_csv(data_path, header=None)\r\n data_vec = data_df.values[:, 1:]\r\n return data_vec\r\n","sub_path":"Readability_Assessment/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"337802092","text":"import uuid\nfrom datetime import datetime\n\nimport jwt\nfrom sqlalchemy import desc\n\nfrom models.role import Role\nfrom models.social_account import SocialAccount\nfrom models.token import Token\nfrom models.user import User\n\n\nclass BasicService:\n def __init__(self, security_instance, db_session_instance, cache_instance):\n self.security = security_instance\n self.db = db_session_instance\n self.cache = cache_instance\n\n def db_add(self, *args, **kwargs):\n self.db.add(*args, **kwargs)\n\n def db_commit(self):\n self.db.commit()\n\n def cache_get(self, *args, **kwargs):\n self.cache.get(*args, **kwargs)\n\n def cache_set(self, *args, **kwargs):\n self.cache.set(*args, **kwargs)\n\n def cache_pipeline(self, *args, **kwargs):\n return self.cache.pipeline(*args, **kwargs)\n\n\nclass UserService(BasicService):\n def is_available_username_and_email(self, username, email):\n user_in_db = (\n self.security.user_class.lookup(username) or\n self.security.user_class.query.filter_by(email=email).one_or_none()\n )\n if user_in_db is not None:\n raise Exception('User with this username or email already exists!')\n\n def get_user_by_email(self, email):\n return self.security.user_class.query.filter_by(email=email).one_or_none()\n\n def get_user_by_id(self, user_id):\n return self.security.user_class.query.filter_by(id=user_id).one_or_none()\n\n def create_new_user(self, username, email, password):\n new_user = User(\n id=uuid.uuid4(),\n username=username,\n email=email,\n hashed_password=password,\n created_at=datetime.now(),\n last_sign_in=datetime.now(),\n )\n self.db.add(new_user)\n return new_user\n\n def update_user(self, user_id: str, new_user_info: dict):\n user = self.security.user_class.query.filter_by(id=user_id).one_or_none()\n update_dict = {\n User.username: new_user_info.get(\"username\", user.username),\n User.email: new_user_info.get(\"email\", user.email),\n User.full_name: new_user_info.get(\"full_name\", user.full_name),\n User.roles: new_user_info.get(\"roles\", user.roles),\n }\n self.security.user_class.query.filter_by(id=user_id).update(update_dict, synchronize_session=False)\n\n def delete_user(self, user_id):\n self.security.user_class.query.filter_by(id=user_id).delete()\n\n def create_social_account_record(self, user_id, social_id, social_name):\n new_record = SocialAccount(\n id=uuid.uuid4(),\n user_id=user_id,\n social_id=social_id,\n social_name=social_name,\n created_at=datetime.now(),\n )\n self.db.add(new_record)\n return new_record\n\n\nclass TokenService(BasicService):\n def put_token_to_blacklist(self, token):\n payload = jwt.decode(token, verify=False)\n jti = payload.get(\"jti\")\n exp = datetime.fromtimestamp(payload.get(\"exp\"))\n ttl = exp - datetime.now()\n self.cache.set(jti, True, ex=ttl)\n\n def issue_token(self, user, platform=\"web\"):\n return self.security.encode_jwt_token(user, username=user.username, platform=platform)\n\n def get_user_sessions(self, user):\n user_sessions = Token.query.filter(\n Token.sub == str(user.id)\n ).order_by(desc(Token.created_at)).limit(10)\n results_dict = {\"username\": user.username, \"sessions\": []}\n for sess in user_sessions:\n results_dict[\"sessions\"].append({\"platform\": sess.platform, \"created_at\": int(sess.created_at)})\n return results_dict\n\n\nclass RoleService(BasicService):\n def create_role(self, name, privileges, description=None):\n new_role = Role(\n id=uuid.uuid4(),\n name=name,\n description=description,\n privileges=privileges,\n created_at=datetime.now(),\n )\n self.db.add(new_role)\n return new_role\n\n def get_role_by_id(self, role_id):\n return self.security.role_class.query.filter_by(id=role_id).one_or_none()\n\n def update_role(self, role_id: str, new_role_info: dict):\n role = self.security.role_class.query.filter_by(id=role_id).one_or_none()\n update_dict = {\n Role.name: new_role_info.get(\"name\", role.name),\n Role.description: new_role_info.get(\"description\", role.description),\n Role.privileges: new_role_info.get(\"privileges\", role.privileges),\n }\n self.security.role_class.query.filter_by(id=role_id).update(update_dict, synchronize_session=False)\n\n def delete_role(self, role_id):\n self.security.role_class.query.filter_by(id=role_id).delete()\n\n\nclass AppService(UserService, TokenService, RoleService):\n pass\n","sub_path":"auth_service/src/services/app_service.py","file_name":"app_service.py","file_ext":"py","file_size_in_byte":4855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"487689817","text":"# Downganizer config file\n# Yeah, it's written in python\nimport logging\nimport yapsy\n\n#\n# Transmission Config\n#\nTR_HOST = 'localhost'\nTR_PORT = 9091\nTR_USER = 'transmission'\nTR_PASSWORD = 'transmission'\n\n\n#\n# logging shitty config\n#\nLOG_FILE = 'downganizer.log'\nlogger = logging.getLogger('downganizer')\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\nlogger.addHandler(ch)\nfh = logging.FileHandler(LOG_FILE)\nfh.setFormatter(formatter)\nfh.setLevel(logging.DEBUG)\nlogger.addHandler(fh)\nlogging.getLogger('yapsy').setLevel(logging.CRITICAL)\nlogging.getLogger('yapsy').addHandler(ch)\n\n#\n# Plugins config\n#\n# Subtitle Downloader plugin\nLANGLIST = ['en']\n\n# Anime Mover plugin\nSERIESDIR = '/srv/series'\nANIMEDIR = '/srv/descargas/anime'\nANIMEREGEX = '\\[inshuheki\\]|\\[au\\]'\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"230937569","text":"#ImportModules\nimport ShareYourSystem as SYS\n\n#Definition a MakerClass decorated by the DefaultorClass\n@SYS.DoerClass()\nclass MakerClass(object):\n\n\tdef default_init(self,\n\t\t\t\t_MakingMyFloat=1.,\n\t\t\t\t_MakingShareList=['bonjour'],\n\t\t\t\t_MakingSpecificList=None,\n\t\t\t\t_MakingMyInt={'DefaultValueType':int}\n\t\t\t\t):\n\t\tobject.__init__(self)\n\n\n#print at the class level\nprint(\"\\n\".join(\n\t[\n\t\t'MakerClass has some special attributes',\n\t\t'MakerClass.DoingAttributeVariablesOrderedDict is '+SYS.indent(\n\t\t\tMakerClass.DoingAttributeVariablesOrderedDict),\n\t\t'MakerClass.DoneAttributeVariablesOrderedDict is '+SYS.indent(\n\t\t\tMakerClass.DoneAttributeVariablesOrderedDict)\n\t])\n)\n\n#Definition a default instance\nDefaultMaker=MakerClass()\n\n#print\nprint(\n\t'\\n'+'\\n'.join(\n\t\t[\n\t\t\t'What are you saying DefaultMaker ?',\n\t\t\t'DefaultMaker.__dict__ is '+SYS.indent(DefaultMaker.__dict__),\n\t\t\t'DefaultMaker.getDo() is '+SYS.indent(DefaultMaker.getDo()),\n\t\t]\n\t)\n)\n\n#Definition a special instance\nSpecialMaker=MakerClass(\n\t_MakingSpecificList=['hello'],\n\t**{\n\t\t'MakingMyFloat':3.\n\t}\n)\n\n#print\nprint(\n\t'\\n'+'\\n'.join(\n\t\t[\n\t\t\t'What are you saying SpecialMaker ?',\n\t\t\t'SpecialMaker.__dict__ is '+SYS.indent(SpecialMaker.__dict__),\n\t\t\t'SpecialMaker.getDo() is '+SYS.indent(SpecialMaker.getDo())\n\t\t]\n\t)\n)\n\n\n\n\n\n","sub_path":"Pythonlogy/ShareYourSystem/Standards/Classors/Doer/01_ExampleDoc.py","file_name":"01_ExampleDoc.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"551177850","text":"def test_run():\n import cairocffi as cairo\n import numpy as np\n\n surface = cairo.ImageSurface(cairo.FORMAT_RGB24, 100, 100)\n with cairo.Context(surface) as context:\n context.set_source_rgb(1, 1, 1)\n context.paint()\n context.select_font_face(\n \"Courier\", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD\n )\n context.set_font_size(10)\n context.move_to(20, 20)\n context.set_source_rgb(0, 0, 0)\n context.show_text(\"abc\")\n\n buf = surface.get_data()\n a = np.frombuffer(buf, np.uint8)\n assert len(a) == 100 * 100 * 4\n","sub_path":"tests/test_cairoffi.py","file_name":"test_cairoffi.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"13558754","text":"# get controversial words from txt file\nfile = open('input/dispute_words.txt', 'r')\ntext = file.read()\nprint(text)\nwords = text.split(\", \")\n\nwords.sort()\nprint(words)\n\nfile2 = open('input/dispute_words2.txt', 'w')\nfor word in words:\n file2.write(word + \", \")\n\nfile.close()\nfile2.close()","sub_path":"sort_txt.py","file_name":"sort_txt.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"372254296","text":"from collections import OrderedDict\nimport math\nimport sys\nimport inspect\nimport numpy as np\nfrom ..utils.mfenums import DiscretizationType\nfrom ..data import mfstructure, mfdata\nfrom ..mfbase import MFDataException, ExtFileAction\nfrom .mfstructure import DatumType\nfrom ...utils import datautil\nfrom ...datbase import DataListInterface, DataType\nfrom .mffileaccess import MFFileAccessList\nfrom .mfdatastorage import DataStorage, DataStorageType, DataStructureType\nfrom .mfdatautil import to_string\n\n\nclass MFList(mfdata.MFMultiDimVar, DataListInterface):\n \"\"\"\n Provides an interface for the user to access and update MODFLOW\n scalar data.\n\n Parameters\n ----------\n sim_data : MFSimulationData\n data contained in the simulation\n structure : MFDataStructure\n describes the structure of the data\n data : list or ndarray\n actual data\n enable : bool\n enable/disable the array\n path : tuple\n path in the data dictionary to this MFArray\n dimensions : MFDataDimensions\n dimension information related to the model, package, and array\n\n Methods\n -------\n new_simulation : (sim_data : MFSimulationData)\n initialize MFArray object for a new simulation\n has_data : (layer_num : int) : bool\n Returns whether layer \"layer_num\" has any data associated with it.\n For unlayered data do not pass in \"layer\".\n get_data : (layer_num : int) : ndarray\n Returns the data associated with layer \"layer_num\". If \"layer_num\" is\n None, returns all data.\n set_data : (data : ndarray/list/dict, multiplier : float, layer_num : int)\n Sets the contents of the data at layer \"layer_num\" to \"data\" with\n multiplier \"multiplier\". For unlayered data do not pass in\n \"layer_num\". data can have the following formats:\n 1) ndarray - ndarray containing the datalist\n 2) [(line_one), (line_two), ...] - list where each like of the\n datalist is a tuple within the list\n 3) {'filename':filename, factor=fct, iprn=print_code, data=data}\n - dictionary defining the external file containing the datalist.\n If the data is transient, a dictionary can be used to specify each\n stress period where the dictionary key is - 1 and\n the dictionary value is the datalist data defined above:\n {0:ndarray, 1:[(line_one), (line_two), ...], 2:{'filename':filename})\n append_data : (data : list(tuple))\n Appends \"data\" to the end of this list. Assumes data is in a format\n that can be appended directly to a numpy recarray.\n append_list_as_record : (data : list)\n Appends the list \"data\" as a single record in this list's recarray.\n Assumes \"data\" has the correct dimensions.\n update_record : (record : list, key_index : int)\n Updates a record at index \"key_index\" with the contents of \"record\".\n If the index does not exist update_record appends the contents of\n \"record\" to this list's recarray.\n search_data : (search_term : string, col : int)\n Searches the list data at column \"col\" for \"search_term\". If col is\n None search_data searches the entire list.\n load : (first_line : string, file_handle : file descriptor,\n block_header : MFBlockHeader, pre_data_comments : MFComment) :\n tuple (bool, string)\n Loads data from first_line (the first line of data) and open file\n file_handle which is pointing to the second line of data. Returns a\n tuple with the first item indicating whether all data was read\n and the second item being the last line of text read from the file.\n get_file_entry : (layer : int) : string\n Returns a string containing the data in layer \"layer\". For unlayered\n data do not pass in \"layer\".\n\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n\n\n \"\"\"\n def __init__(self, sim_data, model_or_sim, structure, data=None,\n enable=True, path=None, dimensions=None, package=None):\n super(MFList, self).__init__(sim_data, model_or_sim, structure, enable,\n path, dimensions)\n try:\n self._data_storage = self._new_storage()\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(structure.get_model(),\n structure.get_package(), path,\n 'creating storage', structure.name,\n inspect.stack()[0][3],\n type_, value_, traceback_, None,\n sim_data.debug, ex)\n self._package = package\n self._last_line_info = []\n self._data_line = None\n self._temp_dict = {}\n self._crnt_line_num = 1\n if data is not None:\n try:\n self.set_data(data, True)\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(structure.get_model(),\n structure.get_package(), path,\n 'setting data', structure.name,\n inspect.stack()[0][3],\n type_, value_, traceback_, None,\n sim_data.debug, ex)\n\n @property\n def data_type(self):\n return DataType.list\n\n @property\n def package(self):\n return self._package\n\n @property\n def dtype(self):\n return self.get_data().dtype\n\n @property\n def plotable(self):\n if self.model is None:\n return False\n else:\n return True\n\n def to_array(self, kper=0, mask=False):\n i0 = 1\n sarr = self.get_data(key=kper)\n if not isinstance(sarr, list):\n sarr = [sarr]\n if len(sarr) == 0 or sarr[0] is None:\n return None\n if 'inode' in sarr[0].dtype.names:\n raise NotImplementedError()\n arrays = {}\n model_grid = self._data_dimensions.get_model_grid()\n\n if model_grid._grid_type.value == 1:\n shape = (model_grid.num_layers(), model_grid.num_rows(),\n model_grid.num_columns())\n elif model_grid._grid_type.value == 2:\n shape = (model_grid.num_layers(), model_grid.num_cells_per_layer())\n else:\n shape = (model_grid.num_cells_per_layer(),)\n\n for name in sarr[0].dtype.names[i0:]:\n if not sarr[0].dtype.fields[name][0] == object:\n arr = np.zeros(shape)\n arrays[name] = arr.copy()\n\n if np.isscalar(sarr[0]):\n # if there are no entries for this kper\n if sarr[0] == 0:\n if mask:\n for name, arr in arrays.items():\n arrays[name][:] = np.NaN\n return arrays\n else:\n raise Exception(\"MfList: something bad happened\")\n\n for name, arr in arrays.items():\n cnt = np.zeros(shape, dtype=np.float)\n #print(name,kper)\n for sp_rec in sarr:\n if sp_rec is not None:\n for rec in sp_rec:\n arr[rec['cellid']] += rec[name]\n cnt[rec['cellid']] += 1.\n # average keys that should not be added\n if name != 'cond' and name != 'flux':\n idx = cnt > 0.\n arr[idx] /= cnt[idx]\n if mask:\n arr = np.ma.masked_where(cnt == 0., arr)\n arr[cnt == 0.] = np.NaN\n\n arrays[name] = arr.copy()\n # elif mask:\n # for name, arr in arrays.items():\n # arrays[name][:] = np.NaN\n return arrays\n\n def new_simulation(self, sim_data):\n try:\n super(MFList, self).new_simulation(sim_data)\n self._data_storage = self._new_storage()\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n 'reinitializing', self.structure.name,\n inspect.stack()[0][3],\n type_, value_, traceback_, None,\n self._simulation_data.debug, ex)\n\n self._data_line = None\n\n def has_data(self):\n try:\n if self._get_storage_obj() is None:\n return False\n return self._get_storage_obj().has_data()\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(self.structure.get_model(),\n self.structure.get_package(), self._path,\n 'checking for data', self.structure.name,\n inspect.stack()[0][3], type_, value_,\n traceback_, None,\n self._simulation_data.debug, ex)\n\n def get_data(self, apply_mult=False, **kwargs):\n try:\n if self._get_storage_obj() is None:\n return None\n return self._get_storage_obj().get_data()\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(self.structure.get_model(),\n self.structure.get_package(), self._path,\n 'getting data', self.structure.name,\n inspect.stack()[0][3], type_, value_,\n traceback_, None,\n self._simulation_data.debug, ex)\n\n def set_data(self, data, autofill=False):\n self._resync()\n try:\n if self._get_storage_obj() is None:\n self._data_storage = self._new_storage()\n # store data\n self._get_storage_obj().set_data(data, autofill=autofill)\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(self.structure.get_model(),\n self.structure.get_package(), self._path,\n 'setting data', self.structure.name,\n inspect.stack()[0][3], type_, value_,\n traceback_, None,\n self._simulation_data.debug, ex)\n\n def append_data(self, data):\n try:\n self._resync()\n if self._get_storage_obj() is None:\n self._data_storage = self._new_storage()\n # store data\n self._get_storage_obj().append_data(data)\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n 'appending data', self.structure.name,\n inspect.stack()[0][3], type_, value_,\n traceback_, None,\n self._simulation_data.debug, ex)\n\n def append_list_as_record(self, record):\n self._resync()\n try:\n # convert to tuple\n tuple_record = ()\n for item in record:\n tuple_record += (item,)\n # store\n self._get_storage_obj().append_data([tuple_record])\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n 'appending data', self.structure.name,\n inspect.stack()[0][3], type_, value_,\n traceback_, None,\n self._simulation_data.debug, ex)\n\n def update_record(self, record, key_index):\n self.append_list_as_record(record)\n\n def search_data(self, search_term, col=None):\n try:\n data = self._get_storage_obj().get_data()\n if data is not None:\n search_term = search_term.lower()\n for row in data:\n col_num = 0\n for val in row:\n if val is not None and val.lower() == search_term and \\\n (col == None or col == col_num):\n return (row, col)\n col_num += 1\n return None\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n if col is None:\n col = ''\n raise MFDataException(self.structure.get_model(),\n self.structure.get_package(), self._path,\n 'searching for data', self.structure.name,\n inspect.stack()[0][3], type_, value_,\n traceback_,\n 'search_term={}\\ncol={}'.format(search_term,\n col),\n self._simulation_data.debug, ex)\n\n def get_file_entry(self, values_only=False,\n ext_file_action=ExtFileAction.copy_relative_paths):\n try:\n # freeze model grid to boost performance\n self._data_dimensions.lock()\n # init\n indent = self._simulation_data.indent_string\n file_entry = []\n storage = self._get_storage_obj()\n if storage is None or not storage.has_data():\n return ''\n\n # write out initial comments\n if storage.pre_data_comments:\n file_entry.append(storage.pre_data_comments.get_file_entry())\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(self.structure.get_model(),\n self.structure.get_package(),\n self._path, 'get file entry initialization',\n self.structure.name,\n inspect.stack()[0][3], type_, value_,\n traceback_, None,\n self._simulation_data.debug, ex)\n\n if storage.layer_storage.first_item().data_storage_type == \\\n DataStorageType.external_file:\n try:\n ext_string = self._get_external_formatting_string(0,\n ext_file_action)\n file_entry.append('{}{}{}'.format(indent, indent,\n ext_string))\n # write file\n\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n 'formatting external file string',\n self.structure.name,\n inspect.stack()[0][3], type_, value_,\n traceback_, None,\n self._simulation_data.debug, ex)\n else:\n try:\n data_complete = storage.get_data()\n if storage.layer_storage.first_item().data_storage_type == \\\n DataStorageType.internal_constant:\n data_lines = 1\n else:\n data_lines = len(data_complete)\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n 'getting data from storage',\n self.structure.name,\n inspect.stack()[0][3], type_, value_,\n traceback_, None,\n self._simulation_data.debug, ex)\n\n # loop through list line by line - assumes first data_item size\n # is representative\n self._crnt_line_num = 1\n for mflist_line in range(0, data_lines):\n text_line = []\n index = 0\n self._get_file_entry_record(data_complete, mflist_line,\n text_line, index, self.structure,\n storage, indent)\n\n # include comments\n if mflist_line in storage.comments and \\\n storage.comments[mflist_line].text:\n text_line.append(storage.comments[mflist_line].text)\n\n file_entry.append('{}{}\\n'.format(indent, indent.\n join(text_line)))\n self._crnt_line_num += 1\n\n # unfreeze model grid\n self._data_dimensions.unlock()\n return ''.join(file_entry)\n\n def _get_file_entry_record(self, data_complete, mflist_line, text_line,\n index, data_set, storage, indent):\n if storage.layer_storage.first_item().data_storage_type == \\\n DataStorageType.internal_constant:\n try:\n # constant data\n data_type = self.structure.data_item_structures[1].type\n const_str = self._get_constant_formatting_string(\n storage.get_const_val(0), 0, data_type, '')\n text_line.append('{}{}{}'.format(indent, indent,\n const_str.upper()))\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n 'getting constant data',\n self.structure.name,\n inspect.stack()[0][3], type_, value_,\n traceback_, None,\n self._simulation_data.debug, ex)\n else:\n data_dim = self._data_dimensions\n data_line = data_complete[mflist_line]\n for data_item in data_set.data_item_structures:\n if data_item.is_aux:\n try:\n aux_var_names = data_dim.package_dim.get_aux_variables()\n if aux_var_names is not None:\n for aux_var_name in aux_var_names[0]:\n if aux_var_name.lower() != 'auxiliary':\n data_val = data_line[index]\n text_line.append(to_string(\n data_val, data_item.type,\n self._simulation_data,\n self._data_dimensions,\n data_item.is_cellid,\n data_item.possible_cellid,\n data_item))\n index += 1\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n 'processing auxiliary '\n 'variables',\n self.structure.name,\n inspect.stack()[0][3], type_,\n value_,\n traceback_, None,\n self._simulation_data.debug, ex)\n elif data_item.type == DatumType.record:\n # record within a record, recurse\n self._get_file_entry_record(data_complete, mflist_line,\n text_line, index, data_item,\n storage, indent)\n elif (not data_item.is_boundname or\n data_dim.package_dim.boundnames()) and \\\n (not data_item.optional or data_item.name_length < 5\n or not data_item.is_mname or not storage.in_model):\n data_complete_len = len(data_line)\n if data_complete_len <= index:\n if data_item.optional == False:\n message = 'Not enough data provided ' \\\n 'for {}. Data for required data ' \\\n 'item \"{}\" not ' \\\n 'found (data path: {})' \\\n '.'.format(self.structure.name,\n data_item.name,\n self._path,)\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n 'building file entry record',\n self.structure.name,\n inspect.stack()[0][3], type_,\n value_, traceback_, message,\n self._simulation_data.debug)\n else:\n break\n try:\n # resolve size of data\n resolved_shape, shape_rule = data_dim.get_data_shape(\n data_item, self.structure, [data_line],\n repeating_key=self._current_key)\n data_val = data_line[index]\n if data_item.is_cellid or (data_item.possible_cellid and\n storage._validate_cellid([data_val], 0)):\n if data_item.shape is not None and \\\n len(data_item.shape) > 0 and \\\n data_item.shape[0] == 'ncelldim':\n model_grid = data_dim.get_model_grid()\n cellid_size = \\\n model_grid.get_num_spatial_coordinates()\n data_item.remove_cellid(resolved_shape,\n cellid_size)\n data_size = 1\n if len(resolved_shape) == 1 and \\\n datautil.DatumUtil.is_int(resolved_shape[0]):\n data_size = int(resolved_shape[0])\n if data_size < 0:\n # unable to resolve data size based on shape, use\n # the data heading names to resolve data size\n data_size = storage.resolve_data_size(index)\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n 'resolving data shape',\n self.structure.name,\n inspect.stack()[0][3], type_,\n value_, traceback_,\n 'Verify that your data is the '\n 'correct shape',\n self._simulation_data.debug, ex)\n for data_index in range(0, data_size):\n if data_complete_len > index:\n data_val = data_line[index]\n if data_item.type == DatumType.keyword:\n if data_val is not None:\n text_line.append(data_item.display_name)\n if self.structure.block_variable:\n # block variables behave differently for\n # now. this needs to be resolved\n # more consistently at some point\n index += 1\n elif data_item.type == DatumType.keystring:\n if data_val is not None:\n text_line.append(data_val)\n index += 1\n\n # keystring must be at the end of the line so\n # everything else is part of the keystring data\n data_key = data_val.lower()\n if data_key not in data_item.keystring_dict:\n keystr_struct = data_item.keystring_dict[\n '{}record'.format(data_key)]\n else:\n keystr_struct = data_item.keystring_dict[\n data_key]\n if isinstance(keystr_struct,\n mfstructure.MFDataStructure):\n # data items following keystring\n ks_structs = keystr_struct.\\\n data_item_structures[1:]\n else:\n # key string stands alone\n ks_structs = [keystr_struct]\n ks_struct_index = 0\n max_index = len(ks_structs) - 1\n for data_index in range(index,\n data_complete_len):\n if data_line[data_index] is not None:\n try:\n k_data_item = ks_structs[\n ks_struct_index]\n text_line.append(to_string(\n data_line[data_index],\n k_data_item.type,\n self._simulation_data,\n self._data_dimensions,\n k_data_item.is_cellid,\n k_data_item.possible_cellid,\n k_data_item))\n except Exception as ex:\n message = 'An error occurred ' \\\n 'while converting data '\\\n 'to a string. This ' \\\n 'error occurred while ' \\\n 'processing \"{}\" line ' \\\n '{} data item \"{}\".' \\\n '(data path: {})' \\\n '.'.format(\n self.structure.name,\n data_item.name,\n self._crnt_line_num,\n self._path)\n type_, value_, \\\n traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n 'converting data '\n 'to a string',\n self.structure.name,\n inspect.stack()[0][\n 3], type_,\n value_, traceback_,\n message,\n self.\n _simulation_data.\n debug, ex)\n if ks_struct_index < max_index:\n # increment until last record\n # entry then repeat last entry\n ks_struct_index += 1\n index = data_index\n elif data_val is not None and (not isinstance(\n data_val, float) or\n not math.isnan(data_val)):\n try:\n if data_item.tagged and data_index == 0:\n # data item tagged, include data item name\n # as a keyword\n text_line.append(to_string(\n data_val, DatumType.string,\n self._simulation_data,\n self._data_dimensions,\n False, data_item=data_item))\n index += 1\n data_val = data_line[index]\n text_line.append(\n to_string(data_val, data_item.type,\n self._simulation_data,\n self._data_dimensions,\n data_item.is_cellid,\n data_item.possible_cellid,\n data_item))\n except Exception as ex:\n message = 'An error occurred while ' \\\n 'converting data to a ' \\\n 'string. ' \\\n 'This error occurred while ' \\\n 'processing \"{}\" line {} data ' \\\n 'item \"{}\".(data path: {})'\\\n '.'.format(self.structure.name,\n data_item.name,\n self._crnt_line_num,\n self._path)\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(self.structure.\n get_model(),\n self.structure.\n get_package(),\n self._path,\n 'converting data '\n 'to a string',\n self.structure.name,\n inspect.stack()[0][\n 3], type_,\n value_, traceback_,\n message,\n self.\n _simulation_data.\n debug, ex)\n index += 1\n elif not data_item.optional and shape_rule is None:\n message = 'Not enough data provided ' \\\n 'for {}. Data for required data ' \\\n 'item \"{}\" not ' \\\n 'found (data path: {})' \\\n '.'.format(self.structure.name,\n data_item.name,\n self._path)\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n 'building data line',\n self.structure.name,\n inspect.stack()[0][3], type_,\n value_, traceback_, message,\n self._simulation_data.debug)\n\n def load(self, first_line, file_handle, block_header,\n pre_data_comments=None):\n super(MFList, self).load(first_line, file_handle, block_header,\n pre_data_comments=None)\n self._resync()\n file_access = MFFileAccessList( self.structure, self._data_dimensions,\n self._simulation_data, self._path,\n self._current_key)\n return file_access.load_from_package(\n first_line, file_handle, self._get_storage_obj(), pre_data_comments)\n\n def _new_storage(self, stress_period=0):\n return DataStorage(self._simulation_data, self._model_or_sim,\n self._data_dimensions, self.get_file_entry,\n DataStorageType.internal_array,\n DataStructureType.recarray,\n stress_period=stress_period,\n data_path=self._path)\n\n def _get_storage_obj(self):\n return self._data_storage\n\n def plot(self, key=None, names=None, filename_base=None,\n file_extension=None, mflay=None, **kwargs):\n \"\"\"\n Plot boundary condition (MfList) data\n\n Parameters\n ----------\n key : str\n MfList dictionary key. (default is None)\n names : list\n List of names for figure titles. (default is None)\n filename_base : str\n Base file name that will be used to automatically generate file\n names for output image files. Plots will be exported as image\n files if file_name_base is not None. (default is None)\n file_extension : str\n Valid matplotlib.pyplot file extension for savefig(). Only used\n if filename_base is not None. (default is 'png')\n mflay : int\n MODFLOW zero-based layer number to return. If None, then all\n all layers will be included. (default is None)\n **kwargs : dict\n axes : list of matplotlib.pyplot.axis\n List of matplotlib.pyplot.axis that will be used to plot\n data for each layer. If axes=None axes will be generated.\n (default is None)\n pcolor : bool\n Boolean used to determine if matplotlib.pyplot.pcolormesh\n plot will be plotted. (default is True)\n colorbar : bool\n Boolean used to determine if a color bar will be added to\n the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.\n (default is False)\n inactive : bool\n Boolean used to determine if a black overlay in inactive\n cells in a layer will be displayed. (default is True)\n contour : bool\n Boolean used to determine if matplotlib.pyplot.contour\n plot will be plotted. (default is False)\n clabel : bool\n Boolean used to determine if matplotlib.pyplot.clabel\n will be plotted. Only used if contour=True. (default is False)\n grid : bool\n Boolean used to determine if the model grid will be plotted\n on the figure. (default is False)\n masked_values : list\n List of unique values to be excluded from the plot.\n\n Returns\n ----------\n out : list\n Empty list is returned if filename_base is not None. Otherwise\n a list of matplotlib.pyplot.axis is returned.\n \"\"\"\n from hataripy.plot import PlotUtilities\n\n if not self.plotable:\n raise TypeError(\"Simulation level packages are not plotable\")\n\n if 'cellid' not in self.dtype.names:\n return\n\n PlotUtilities._plot_mflist_helper(mflist=self, key=key, kper=None,\n names=names, filename_base=None,\n file_extension=None, mflay=None,\n **kwargs )\n\n\nclass MFTransientList(MFList, mfdata.MFTransient, DataListInterface):\n \"\"\"\n Provides an interface for the user to access and update MODFLOW transient\n list data.\n\n Parameters\n ----------\n sim_data : MFSimulationData\n data contained in the simulation\n structure : MFDataStructure\n describes the structure of the data\n data : list or ndarray\n actual data\n enable : bool\n enable/disable the array\n path : tuple\n path in the data dictionary to this MFArray\n dimensions : MFDataDimensions\n dimension information related to the model, package, and array\n\n Methods\n -------\n add_transient_key : (transient_key : int)\n Adds a new transient time allowing data for that time to be stored and\n retrieved using the key \"transient_key\"\n add_one :(transient_key : int)\n Adds one to the data stored at key \"transient_key\"\n get_data : (key : int) : ndarray\n Returns the data during time \"key\".\n set_data : (data : ndarray/list, multiplier : float, key : int)\n Sets the contents of the data at time \"key\" to \"data\" with\n multiplier \"multiplier\".\n load : (first_line : string, file_handle : file descriptor,\n block_header : MFBlockHeader, pre_data_comments : MFComment) :\n tuple (bool, string)\n Loads data from first_line (the first line of data) and open file\n file_handle which is pointing to the second line of data. Returns a\n tuple with the first item indicating whether all data was read\n and the second item being the last line of text read from the file.\n get_file_entry : (key : int) : string\n Returns a string containing the data at time \"key\".\n append_list_as_record : (data : list, key : int)\n Appends the list \"data\" as a single record in this list's recarray at\n time \"key\". Assumes \"data\" has the correct dimensions.\n update_record : (record : list, key_index : int, key : int)\n Updates a record at index \"key_index\" and time \"key\" with the contents\n of \"record\". If the index does not exist update_record appends the\n contents of \"record\" to this list's recarray.\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n\n\n \"\"\"\n def __init__(self, sim_data, model_or_sim, structure, enable=True, path=None,\n dimensions=None, package=None):\n super(MFTransientList, self).__init__(sim_data=sim_data,\n model_or_sim=model_or_sim,\n structure=structure,\n data=None,\n enable=enable,\n path=path,\n dimensions=dimensions,\n package=package)\n self._transient_setup(self._data_storage)\n self.repeating = True\n\n @property\n def data_type(self):\n return DataType.transientlist\n\n @property\n def dtype(self):\n data = self.get_data()\n if len(data) > 0:\n return data[0].dtype\n else:\n return None\n\n @property\n def masked_4D_arrays(self):\n model_grid = self._data_dimensions.get_model_grid()\n nper = self._data_dimensions.package_dim.model_dim[0].simulation_time \\\n .get_num_stress_periods()\n # get the first kper\n arrays = self.to_array(kper=0, mask=True)\n\n if arrays is not None:\n # initialize these big arrays\n if model_grid.grid_type() == DiscretizationType.DIS:\n m4ds = {}\n for name, array in arrays.items():\n m4d = np.zeros((nper, model_grid.num_layers,\n model_grid.num_rows, model_grid.num_columns))\n m4d[0, :, :, :] = array\n m4ds[name] = m4d\n for kper in range(1, nper):\n arrays = self.to_array(kper=kper, mask=True)\n for name, array in arrays.items():\n m4ds[name][kper, :, :, :] = array\n return m4ds\n else:\n m3ds = {}\n for name, array in arrays.items():\n m3d = np.zeros((nper, model_grid.num_layers,\n model_grid.num_cells_per_layer()))\n m3d[0, :, :] = array\n m3ds[name] = m3d\n for kper in range(1, nper):\n arrays = self.to_array(kper=kper, mask=True)\n for name, array in arrays.items():\n m3ds[name][kper, :, :] = array\n return m3ds\n\n def masked_4D_arrays_itr(self):\n model_grid = self._data_dimensions.get_model_grid()\n nper = self._data_dimensions.package_dim.model_dim[0].simulation_time \\\n .get_num_stress_periods()\n # get the first kper\n arrays = self.to_array(kper=0, mask=True)\n\n if arrays is not None:\n # initialize these big arrays\n for name, array in arrays.items():\n if model_grid.grid_type() == DiscretizationType.DIS:\n m4d = np.zeros((nper, model_grid.num_layers(),\n model_grid.num_rows(), model_grid.num_columns()))\n m4d[0, :, :, :] = array\n for kper in range(1, nper):\n arrays = self.to_array(kper=kper, mask=True)\n for tname, array in arrays.items():\n if tname == name:\n m4d[kper, :, :, :] = array\n yield name, m4d\n else:\n m3d = np.zeros((nper, model_grid.num_layers(),\n model_grid.num_cells_per_layer()))\n m3d[0, :, :] = array\n for kper in range(1, nper):\n arrays = self.to_array(kper=kper, mask=True)\n for tname, array in arrays.items():\n if tname == name:\n m3d[kper, :, :] = array\n yield name, m3d\n\n def to_array(self, kper=0, mask=False):\n return super(MFTransientList, self).to_array(kper, mask)\n\n def add_transient_key(self, transient_key):\n super(MFTransientList, self).add_transient_key(transient_key)\n if isinstance(transient_key, int):\n stress_period = transient_key\n else:\n stress_period = 1\n self._data_storage[transient_key] = \\\n super(MFTransientList, self)._new_storage(stress_period)\n\n def get_data(self, key=None, apply_mult=False, **kwargs):\n if self._data_storage is not None and len(self._data_storage) > 0:\n if key is None:\n if 'array' in kwargs:\n output = []\n sim_time = self._data_dimensions.package_dim.model_dim[\n 0].simulation_time\n num_sp = sim_time.get_num_stress_periods()\n for sp in range(0, num_sp):\n if sp in self._data_storage:\n self.get_data_prep(sp)\n output.append(super(MFTransientList, self).get_data(\n apply_mult=apply_mult))\n else:\n output.append(None)\n return output\n else:\n output = {}\n for key in self._data_storage.keys():\n self.get_data_prep(key)\n output[key] = super(MFTransientList, self).get_data(\n apply_mult=apply_mult)\n return output\n self.get_data_prep(key)\n return super(MFTransientList, self).get_data(apply_mult=apply_mult)\n else:\n return None\n\n def set_data(self, data, key=None, autofill=False):\n if (isinstance(data, dict) or isinstance(data, OrderedDict)) and \\\n 'filename' not in data:\n # each item in the dictionary is a list for one stress period\n # the dictionary key is the stress period the list is for\n for key, list_item in data.items():\n self._set_data_prep(list_item, key)\n super(MFTransientList, self).set_data(list_item,\n autofill=autofill)\n else:\n if key is None:\n # search for a key\n new_key_index = self.structure.first_non_keyword_index()\n if new_key_index is not None and len(data) > new_key_index:\n key = data[new_key_index]\n else:\n key = 0\n self._set_data_prep(data, key)\n super(MFTransientList, self).set_data(data, autofill)\n\n def get_file_entry(self, key=0,\n ext_file_action=ExtFileAction.copy_relative_paths):\n self._get_file_entry_prep(key)\n return super(MFTransientList, self).get_file_entry(ext_file_action=\n ext_file_action)\n\n def load(self, first_line, file_handle, block_header,\n pre_data_comments=None):\n self._load_prep(block_header)\n return super(MFTransientList, self).load(first_line, file_handle,\n pre_data_comments)\n\n def append_list_as_record(self, record, key=0):\n self._append_list_as_record_prep(record, key)\n super(MFTransientList, self).append_list_as_record(record)\n\n def update_record(self, record, key_index, key=0):\n self._update_record_prep(key)\n super(MFTransientList, self).update_record(record, key_index)\n\n def _new_storage(self, stress_period=0):\n return OrderedDict()\n\n def _get_storage_obj(self):\n if self._current_key is None or \\\n self._current_key not in self._data_storage:\n return None\n return self._data_storage[self._current_key]\n\n def plot(self, key=None, names=None, kper=0,\n filename_base=None, file_extension=None, mflay=None,\n **kwargs):\n \"\"\"\n Plot stress period boundary condition (MfList) data for a specified\n stress period\n\n Parameters\n ----------\n key : str\n MfList dictionary key. (default is None)\n names : list\n List of names for figure titles. (default is None)\n kper : int\n MODFLOW zero-based stress period number to return. (default is zero)\n filename_base : str\n Base file name that will be used to automatically generate file\n names for output image files. Plots will be exported as image\n files if file_name_base is not None. (default is None)\n file_extension : str\n Valid matplotlib.pyplot file extension for savefig(). Only used\n if filename_base is not None. (default is 'png')\n mflay : int\n MODFLOW zero-based layer number to return. If None, then all\n all layers will be included. (default is None)\n **kwargs : dict\n axes : list of matplotlib.pyplot.axis\n List of matplotlib.pyplot.axis that will be used to plot\n data for each layer. If axes=None axes will be generated.\n (default is None)\n pcolor : bool\n Boolean used to determine if matplotlib.pyplot.pcolormesh\n plot will be plotted. (default is True)\n colorbar : bool\n Boolean used to determine if a color bar will be added to\n the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.\n (default is False)\n inactive : bool\n Boolean used to determine if a black overlay in inactive\n cells in a layer will be displayed. (default is True)\n contour : bool\n Boolean used to determine if matplotlib.pyplot.contour\n plot will be plotted. (default is False)\n clabel : bool\n Boolean used to determine if matplotlib.pyplot.clabel\n will be plotted. Only used if contour=True. (default is False)\n grid : bool\n Boolean used to determine if the model grid will be plotted\n on the figure. (default is False)\n masked_values : list\n List of unique values to be excluded from the plot.\n\n Returns\n ----------\n out : list\n Empty list is returned if filename_base is not None. Otherwise\n a list of matplotlib.pyplot.axis is returned.\n \"\"\"\n from hataripy.plot import PlotUtilities\n\n if not self.plotable:\n raise TypeError(\"Simulation level packages are not plotable\")\n\n if 'cellid' not in self.dtype.names:\n return\n\n axes = PlotUtilities._plot_mflist_helper(self, key=key, names=names,\n kper=kper, filename_base=filename_base,\n file_extension=file_extension, mflay=mflay,\n **kwargs)\n return axes\n\n\nclass MFMultipleList(MFTransientList):\n \"\"\"\n Provides an interface for the user to access and update MODFLOW multiple\n list data. This is list data that is in the same format as the\n MFTransientList, but is not time based.\n\n Parameters\n ----------\n sim_data : MFSimulationData\n data contained in the simulation\n structure : MFDataStructure\n describes the structure of the data\n data : list or ndarray\n actual data\n enable : bool\n enable/disable the array\n path : tuple\n path in the data dictionary to this MFArray\n dimensions : MFDataDimensions\n dimension information related to the model, package, and array\n\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n\n\n \"\"\"\n def __init__(self, sim_data, model_or_sim, structure, enable=True,\n path=None, dimensions=None, package=None):\n super(MFMultipleList, self).__init__(sim_data=sim_data,\n model_or_sim=model_or_sim,\n structure=structure,\n enable=enable,\n path=path,\n dimensions=dimensions,\n package=package)\n\n def get_data(self, key=None, apply_mult=False, **kwargs):\n return super(MFMultipleList, self).get_data(key=key,\n apply_mult=apply_mult)","sub_path":"hataripy/mf6/data/mfdatalist.py","file_name":"mfdatalist.py","file_ext":"py","file_size_in_byte":54607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"611225348","text":"import pytz\nimport json\nimport datetime\nimport logging\n\nfrom django.db import models\nfrom django.utils.safestring import mark_safe\nfrom django.conf import settings\n\nfrom decimal import Decimal\nfrom dateutil.relativedelta import relativedelta\n\nfrom accountifie.gl.bmo import BusinessModelObject\nimport accountifie.gl.models\nfrom accountifie.toolkit.utils import get_default_company\nimport accountifie.environment.apiv1 as env_api\n\nlogger = logging.getLogger('default')\n\nDZERO = Decimal('0')\nEASTERN = pytz.timezone('US/Eastern')\n\n\n\n\ndef make_expense_stubs(cf_data):\n \"\"\"\n given a list of cashflows create expenses\n from those cashflows which have not already had an expense created from them\n where the cashflows were booked versus Accounts Payable\n \"\"\"\n today = datetime.datetime.now().date()\n stub_account = env_api.variable('UNALLOCATED_ACCT', {})\n unallocated_employee = env_api.variable('UNALLOCATED_EMPLOYEE_ID', {})\n ap_account = env_api.variable('GL_ACCOUNTS_PAYABLE', {})\n\n new_stubs = 0\n from_AP = [cf for cf in cf_data if cf['trans_type_id'] == ap_account]\n for cf in from_AP:\n if Expense.objects.filter(from_cf_id=cf['id']).count() == 0:\n new_stubs += 1\n\n # if expense acct is on the cashflow then use that\n if cf['expense_acct_id']:\n account_id = cf['expense_acct_id']\n else:\n account_id = stub_account\n\n Expense(comment=cf['description'], counterparty_id=cf['counterparty_id'], account_id=account_id, from_cf_id=cf['id'],\n expense_date=cf['post_date'], start_date=cf['post_date'], amount=-cf['amount'], stub=False,\n paid_from_id=cf['trans_type_id'], process_date=today, employee_id=unallocated_employee).save()\n\n return {'new': new_stubs, 'duplicates': len(from_AP) - new_stubs}\n\n\ndef make_stubs_from_ccard(cc_data):\n \"\"\"\n given a list of credit card transactions create expenses\n from those credit card trans which have not already had \n an expense created from them\n \"\"\"\n today = datetime.datetime.now().date()\n stub_account = env_api.variable('UNALLOCATED_ACCT', {})\n unallocated_employee = env_api.variable('UNALLOCATED_EMPLOYEE_ID', {})\n ap_account = env_api.variable('GL_ACCOUNTS_PAYABLE', {})\n\n new_stubs = 0\n for cc in cc_data:\n if Expense.objects.filter(from_ccard_id=cc['id']).count()==0:\n new_stubs += 1\n\n # if expense acct is on the cashflow then use that\n if cc['expense_acct_id']:\n account_id = cc['expense_acct_id']\n else:\n account_id = stub_account\n\n Expense(comment=cc['description'], counterparty_id=cc['counterparty_id'],\n account_id=account_id, from_ccard_id=cc['id'],\n expense_date=cc['trans_date'], start_date=cc['post_date'],\n amount=-cc['amount'], stub=False, paid_from_id=ap_account,\n process_date=today, employee_id=unallocated_employee).save()\n\n return {'new': new_stubs, 'duplicates': len(cc_data) - new_stubs}\n\n\nclass ExpenseAllocation(models.Model):\n expense = models.ForeignKey('base.Expense')\n project = models.ForeignKey('gl.Project')\n amount = models.DecimalField(max_digits=11, decimal_places=2)\n\n class Meta:\n app_label = 'base'\n db_table = 'base_expenseallocation'\n\n def __unicode__(self):\n return '%.2f: Project %s' %(self.amount, self.project)\n\n\n# HARDCODE\nPAID_FROM_CHOICES = [1001, 1002, 1003, 1100, 3000, 3005, 3006, 3010, 3020, 3250, 20100]\n\nclass Expense(models.Model, BusinessModelObject):\n \n company = models.ForeignKey('gl.Company', default='SAV')\n \n employee = models.ForeignKey('gl.Employee', null=True)\n account = models.ForeignKey('gl.Account')\n \n expense_date = models.DateField(null=True)\n start_date = models.DateField(null=True)\n end_date = models.DateField(null=True, blank=True)\n amount = models.DecimalField(max_digits=11, decimal_places=2, null=True)\n \n currency = models.CharField(max_length=10, default='USD')\n process_date = models.DateField(null=True)\n\n counterparty = models.ForeignKey('gl.Counterparty', null=True, blank=True, help_text=\"We need to match this up\")\n \n stub = models.BooleanField(default=False, help_text='incomplete, created from cashflow or credit card')\n from_cf = models.ForeignKey('base.Cashflow', null=True, blank=True, \n help_text='created from cashflow')\n from_ccard = models.ForeignKey('base.CreditCardTrans', null=True, blank=True,\n help_text='created from credit card trans')\n\n paid_from = models.ForeignKey('gl.Account', null=True, blank=True,\n help_text=\"shows the account this was paid from, or is owed to\",\n limit_choices_to={'id__in': PAID_FROM_CHOICES},\n related_name='paid_from')\n comment = models.CharField(max_length=200, blank=True, null=True, help_text=\"Details of any modifications/notes added in Django\") \n\n short_code = 'EXP'\n\n class Meta:\n app_label = 'base'\n db_table = 'base_expense'\n\n def __unicode__(self):\n return '%d: %s: %s, %0.2f' % (self.id, self.expense_date.isoformat(), self.counterparty, self.amount)\n\n def save(self):\n models.Model.save(self)\n if not self.stub:\n self.update_gl()\n \n def delete(self):\n self.delete_from_gl()\n models.Model.delete(self)\n\n @property\n def counterparty_name(self):\n return self.counterparty.name\n\n @property\n def amount_fmt(self):\n return \"{:,.0f}\".format(self.amount)\n\n @property\n def id_link(self):\n return mark_safe('%s' %( self.id, self.id))\n\n @property\n def admin_link(self):\n return mark_safe('admin %s' %( self.id, self.id))\n\n def _capitalize(self, debit):\n # capitalising or not\n all_deprec_accts = dict((x.cap_account, x) for x in accountifie.gl.models.DepreciationPolicy.objects.all())\n capitalize_it = (debit in all_deprec_accts and abs(self.amount) >= 500.0)\n expense_it = (debit in all_deprec_accts and abs(self.amount) < 500.0)\n\n acc_asset_dep = None\n months = None\n if expense_it:\n exp_path = debit.path.replace('assets.noncurr.premandequip', 'equity.retearnings.opexp.admin')\n debit = accountifie.gl.models.Account.objects.filter(path=exp_path)[0]\n elif capitalize_it:\n acc_asset_dep = all_deprec_accts[debit].depreciation_account\n months = all_deprec_accts[debit].depreciation_period\n return capitalize_it, debit.id, acc_asset_dep.id if acc_asset_dep else None, months \n\n\n def _get_exp_lines(self, exp_acct):\n #allocations = self.expenseallocation_set.all()\n allocations = ExpenseAllocation.objects.filter(expense=self)\n exp_lines = []\n running_total = DZERO\n if len(allocations) > 0:\n for allocation in allocations:\n exp_lines.append((exp_acct, Decimal(allocation.amount), self.counterparty.id, ['project_%s' % allocation.project.id]))\n running_total += Decimal(allocation.amount)\n\n if abs(Decimal(self.amount) - running_total) >= Decimal('0.005'):\n exp_lines.append((exp_acct, Decimal(self.amount) - running_total, self.counterparty.id, []))\n \n return exp_lines\n\n\n def get_gl_transactions(self):\n \"\"\"We just debit the expense account and credit the generic\n Accounts Payable, whoever it is. We do not at this stage\n try to handle anything on how the debt was paid off.\n\n \"\"\"\n\n capitalize_it, debit, acc_asset_dep, months = self._capitalize(self.account)\n ACCTS_PAYABLE = self.paid_from.id\n PREPAID_EXP = env_api.variable('GL_PREPAID_EXP', {})\n ACCRUED_LIAB = env_api.variable('GL_ACCRUED_LIAB', {})\n \n trans = []\n\n # now three different paths\n\n if capitalize_it:\n # book to asset account\n tran = dict(\n company=self.company,\n date=self.start_date,\n date_end=None,\n trans_id='%s.%s.%s' % (self.short_code, self.id, 'CPLZ'),\n bmo_id='%s.%s' % (self.short_code, self.id),\n comment= \"Capitalized Asset, %s: %s\" % (self.id, self.comment),\n lines=[(ACCTS_PAYABLE,\n DZERO - Decimal(self.amount),\n self.counterparty.id,\n [])]\n )\n\n tran['lines'] += self._get_exp_lines(debit)\n trans.append(tran)\n # and now amort/deprec over appropriate time period\n\n amort_accts = accountifie.gl.models.Account.objects.filter(path=debit.path + '.amortization')\n if len(amort_accts) > 0:\n acc_pl_dep = amort_accts[0].id\n\n deprec_accts = accountifie.gl.models.Account.objects.filter(path=debit.path + '.depreciation')\n if len(deprec_accts) > 0:\n acc_pl_dep = deprec_accts[0].id\n\n trans.append(dict(\n company=self.company,\n date=self.start_date,\n date_end=self.start_date + relativedelta(months=months),\n trans_id='%s.%s.%s' % (self.short_code, self.id, 'DPRC'),\n bmo_id='%s.%s' % (self.short_code, self.id),\n comment= \"Depreciating asset, %s: %s\" % (self.id, self.comment),\n lines=[\n (acc_pl_dep, DZERO - Decimal(self.amount), self.counterparty.id, []),\n (acc_asset_dep, Decimal(self.amount), self.counterparty.id, []),\n ]\n ))\n\n elif self.end_date is not None and self.start_date != self.end_date and abs(self.amount) >=500.0:\n\n if self.expense_date == self.start_date:\n # paid in advance\n # create account payable\n trans.append(dict(\n company=self.company,\n date=self.start_date,\n date_end=None,\n trans_id='%s.%s.%s' % (self.short_code, self.id, 'AP'),\n bmo_id='%s.%s' % (self.short_code, self.id),\n comment= \"AP for %s: %s\" % (self.id, self.comment),\n lines=[\n (PREPAID_EXP, Decimal(self.amount), self.counterparty.id, []),\n (ACCTS_PAYABLE, DZERO - Decimal(self.amount), self.counterparty.id, []),\n ]\n ))\n\n # expense over period\n tran = dict(\n company=self.company,\n date=self.start_date,\n date_end=self.end_date,\n trans_id='%s.%s.%s' % (self.short_code, self.id, 'EXPS'),\n bmo_id=self.id,\n comment= \"Expensing %s: %s\" % (self.id, self.comment),\n lines=[(PREPAID_EXP, DZERO - Decimal(self.amount), self.counterparty.id, []),]\n )\n tran['lines'] += self._get_exp_lines(debit)\n trans.append(tran)\n else:\n # paid in arrears\n \n # transfer from accrued liab to account payable at end of period\n trans.append(dict(\n company=self.company,\n date=self.end_date,\n date_end=None,\n trans_id='%s.%s.%s' % (self.short_code, self.id, 'AL2AP'),\n bmo_id='%s.%s' % (self.short_code, self.id),\n comment= \"AP for %s: %s\" % (self.id, self.comment),\n lines=[\n (ACCRUED_LIAB, Decimal(self.amount), self.counterparty.id, []),\n (ACCTS_PAYABLE, DZERO - Decimal(self.amount), self.counterparty.id, []),\n ]\n ))\n\n # accrue expense over period\n tran = dict(\n company=self.company,\n date=self.start_date,\n date_end=self.end_date,\n trans_id='%s.%s.%s' % (self.short_code, self.id, 'AL'),\n bmo_id='%s.%s' % (self.short_code, self.id),\n comment= \"Accruing %s: %s\" % (self.id, self.comment),\n lines=[(ACCRUED_LIAB, DZERO - Decimal(self.amount), self.counterparty.id, []),]\n )\n \n tran['lines'] += self._get_exp_lines(debit)\n trans.append(tran)\n\n else: # single date\n tran = dict(\n company=self.company,\n date=self.start_date,\n date_end=None,\n trans_id='%s.%s.%s' % (self.short_code, self.id, 'EXP'),\n bmo_id='%s.%s' % (self.short_code, self.id),\n comment= \"%s: %s\" % (self.id, self.comment),\n lines=[(ACCTS_PAYABLE, DZERO - Decimal(self.amount), self.counterparty.id, []),]\n )\n\n tran['lines'] += self._get_exp_lines(debit)\n trans.append(tran)\n\n return trans","sub_path":"savor/base/models/expense.py","file_name":"expense.py","file_ext":"py","file_size_in_byte":13681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"105466574","text":"from rest_framework.parsers import JSONParser\nfrom django.db import IntegrityError\nfrom . models import Event\nfrom . models import Supportedby\nfrom . models import our_partner\nfrom . models import Association\nfrom . models import Media_Partner\nfrom . models import Testimonials\nfrom . models import Speakers\n\nfrom . serializers import EventSerializer\nfrom . serializers import SupportedbySerializer\nfrom . serializers import EventSerializer\nfrom . serializers import SupportedbySerializer\nfrom . serializers import our_partnerSerializer\nfrom . serializers import AssociationSerializer\nfrom . serializers import Media_PartnerSerializer\nfrom . serializers import TestimonialsSerializer\nfrom . serializers import SpeakersSerializer\n\nfrom django.http.response import JsonResponse\nfrom rest_framework.views import APIView\nfrom django.views.generic import TemplateView\nfrom django.views.decorators.csrf import csrf_exempt\n\n# from django.shortcuts import (get_object_or_404)\n\n# Create your views here.\nclass Event_Register(APIView):\n def post(self,request):\n try:\n register_data=JSONParser().parse(request)\n event_data= Event.objects.create(\n name=register_data['name'],\n startDate=register_data['startDate'],\n endDate=register_data['endDate'],\n status=register_data['status'],\n registrationCharge=register_data['registrationCharge'],\n registrationTax=register_data['registrationTax'],\n registrationTotal=register_data['registrationTotal'],\n fileUpload=register_data['fileUpload'],\n description=register_data['description']\n )\n event_data.save()\n return JsonResponse(\"successfully registered!!\",safe=False)\n\n except IntegrityError:\n return JsonResponse('user of this data is already exist!!',safe=False)\n\nclass Event_List(APIView):\n def get(self,request):\n events=Event.objects.all()\n serializer=EventSerializer(events,many=True)\n return JsonResponse(serializer.data,safe=False)\n\n@csrf_exempt\ndef UserDelete(request,id=0):\n try:\n if request.method=='DELETE':\n event=Event.objects.get(id=id)\n event.delete()\n return JsonResponse('deleted successfully!!',safe=False)\n except Exception:\n return JsonResponse('Id not found!!',safe=False)\n \n \n \n # def delete(self,request,id):\n # delete_event=request.data\n # delete_event=self.get_object(id=id)\n # delete_event = delete_event.objects.filter(pk=id).delete()\n # event=Event.objects.get(id=delete_event[\"id\"])\n # event.delete()\n # return JsonResponse(\"successfully deleted!!\",safe=False)\n\n\nclass Supportedby_Register(APIView):\n def post(self,request):\n try:\n register_data=JSONParser().parse(request)\n supportedby_data= Supportedby.objects.create(\n nameofthecompany=register_data['nameofthecompany'],\n imgurl=register_data['imgurl']\n )\n supportedby_data.save()\n return JsonResponse(\"successfully registered!!\",safe=False)\n\n except IntegrityError:\n return JsonResponse('user of this data is already exist!!',safe=False)\n\n\nclass Supportedby_List(APIView):\n def get(self,request):\n supportedby=Supportedby.objects.all()\n serializer=SupportedbySerializer(supportedby,many=True)\n return JsonResponse(serializer.data,safe=False) \n\n@csrf_exempt\ndef UserDelete(request,id=0):\n try:\n if request.method=='DELETE':\n supportedby=Supportedby.objects.get(id=id)\n supportedby.delete()\n return JsonResponse('deleted successfully!!',safe=False)\n except Exception:\n return JsonResponse('Id not found!!',safe=False)\n\n\n'''OUR PARTNERS'''\nclass our_partner_Register(APIView):\n def post(self,request):\n try:\n register_data=JSONParser().parse(request)\n ourpartner_data= our_partner.objects.create(\n name=register_data['name'],\n img_url=register_data['img_url'],\n link=register_data['link']\n )\n\n ourpartner_data.save()\n return JsonResponse(\"successfully registered!!\",safe=False)\n\n except IntegrityError:\n return JsonResponse('user of this data is already exist!!',safe=False)\n\n\nclass our_partner_List(APIView):\n def get(self,request):\n ourpartner=our_partner.objects.all()\n serializer=our_partnerSerializer(ourpartner,many=True)\n return JsonResponse(serializer.data,safe=False)\n\n@csrf_exempt\ndef UserDelete(request,id=0):\n try:\n if request.method=='DELETE':\n ourpartner=our_partner.objects.get(id=id)\n ourpartner.delete()\n return JsonResponse('deleted successfully!!',safe=False)\n except Exception:\n return JsonResponse('Id not found!!',safe=False)\n\n\n'''ASSOCIATION'''\nclass Association_Register(APIView):\n def post(self,request):\n try:\n register_data=JSONParser().parse(request)\n association_data= Association.objects.create(\n name=register_data['name'],\n img_url=register_data['img_url'],\n link=register_data['link']\n )\n association_data.save()\n return JsonResponse(\"successfully registered!!\",safe=False)\n\n except IntegrityError:\n return JsonResponse('user of this data is already exist!!',safe=False)\n\n\nclass Association_List(APIView):\n def get(self,request):\n association=Association.objects.all()\n serializer=AssociationSerializer(association,many=True)\n return JsonResponse(serializer.data,safe=False)\n\n@csrf_exempt\ndef UserDelete(request,id=0):\n try:\n if request.method=='DELETE':\n association=Association.objects.get(id=id)\n association.delete()\n return JsonResponse('deleted successfully!!',safe=False)\n except Exception:\n return JsonResponse('Id not found!!',safe=False)\n\n\n'''MEDIA PARTNER'''\nclass Media_Partner_Register(APIView):\n def post(self,request):\n try:\n register_data=JSONParser().parse(request)\n mediapartner_data= Media_Partner.objects.create(\n name=register_data['name'],\n img_url=register_data['img_url'],\n link=register_data['link']\n )\n mediapartner_data.save()\n return JsonResponse(\"successfully registered!!\",safe=False)\n\n except IntegrityError:\n return JsonResponse('user of this data is already exist!!',safe=False)\n\n\nclass Media_Partner_List(APIView):\n def get(self,request):\n mediapartner=Media_Partner.objects.all()\n serializer=AssociationSerializer(mediapartner,many=True)\n return JsonResponse(serializer.data,safe=False)\n\n@csrf_exempt\ndef UserDelete(request,id=0):\n try:\n if request.method=='DELETE':\n mediapartner=Media_Partner.objects.get(id=id)\n mediapartner.delete()\n return JsonResponse('deleted successfully!!',safe=False)\n except Exception:\n return JsonResponse('Id not found!!',safe=False)\n\n\n\n'''TESTIMONIALS'''\nclass Testimonials_Register(APIView):\n def post(self,request):\n try:\n register_data=JSONParser().parse(request)\n testimonials_data= Testimonials.objects.create(\n name=register_data['name'],\n img_url=register_data['img_url'],\n link=register_data['link']\n )\n testimonials_data.save()\n return JsonResponse(\"successfully registered!!\",safe=False)\n\n except IntegrityError:\n return JsonResponse('user of this data is already exist!!',safe=False)\n\n\nclass Testimonials_List(APIView):\n def get(self,request):\n testimonials=Testimonials.objects.all()\n serializer=TestimonialsSerializer(testimonials,many=True)\n return JsonResponse(serializer.data,safe=False)\n\n@csrf_exempt\ndef UserDelete(request,id=0):\n try:\n if request.method=='DELETE':\n testimonials=Testimonials.objects.get(id=id)\n testimonials.delete()\n return JsonResponse('deleted successfully!!',safe=False)\n except Exception:\n return JsonResponse('Id not found!!',safe=False)\n\n\n\n'''SPEAKERS'''\nclass Speakers_Register(APIView):\n def post(self,request):\n try:\n register_data=JSONParser().parse(request)\n speakers_data= Speakers.objects.create(\n name=register_data['name'],\n img_url=register_data['img_url'],\n link=register_data['link']\n )\n speakers_data.save()\n return JsonResponse(\"successfully registered!!\",safe=False)\n\n except IntegrityError:\n return JsonResponse('user of this data is already exist!!',safe=False)\n\n\nclass Speakers_List(APIView):\n def get(self,request):\n speakers=Speakers.objects.all()\n serializer=SpeakersSerializer(testimonials,many=True)\n return JsonResponse(serializer.data,safe=False)\n\n@csrf_exempt\ndef UserDelete(request,id=0):\n try:\n if request.method=='DELETE':\n speakers=Speakers.objects.get(id=id)\n speakers.delete()\n return JsonResponse('deleted successfully!!',safe=False)\n except Exception:\n return JsonResponse('Id not found!!',safe=False)\n \n\n \n\n\n","sub_path":"recycle_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"482897265","text":"# Add dependencies\nimport csv\nimport os\n\n# Assign a variable for the file to load and the path\nfile_to_load = os.path.join(\"resources\", \"election_results.csv\")\n\n#Create a filename variable to a direct path to the file\nfile_to_save = os.path.join(\"analysis\", \"election_analysis.txt\")\n\n# 1. Initialize total vote counter\ntotal_votes = 0\n\n# Create list for candidates\ncandidate_options = []\n# Create a dictionary for candidate votes\ncandidate_votes = {}\n# Winning Candidate and Winning Count Tracker\nwinning_candidate = \"\"\nwinning_count = 0\nwinning_percentage = 0\n#Open the election results and read the file.\nwith open(file_to_load) as election_data:\n \n # To do: read and analyze the data here\n file_reader = csv.reader(election_data)\n \n # Read header row\n header = next(file_reader)\n \n # Read and print rows in csv file\n for row in file_reader:\n # 2. Add to the total vote count.\n total_votes += 1 \n # Print Candidate name\n candidate_name = row[2]\n #If the candidates does not match any existing candidate...\n if candidate_name not in candidate_options:\n # Add candidate name to list\n candidate_options.append(candidate_name)\n \n # Track that candidates vote\n candidate_votes[candidate_name] = 0\n \n # Add a vote to candidate's count\n candidate_votes[candidate_name] += 1\n # Save results to text file\nwith open(file_to_save, \"w\") as txt_file:\n election_results = (\n f'\\nElection Results\\n'\n f'-------------------------\\n'\n f'Total Votes:{total_votes: ,}\\n'\n f'-------------------------\\n')\n print(election_results)\n txt_file.write(election_results)\n # Save final vote count to text file\n \n # Interate through the candidate's list \n for candidate_name in candidate_votes:\n # Retrieve vote count of a candidate\n votes = candidate_votes[candidate_name] \n # Calculate percent of votes\n vote_percentage = float(votes) / float(total_votes) * 100\n # Add candidate name and percent of votes to text file\n candidate_results = (f\"{candidate_name} : {vote_percentage:.1f}% ({votes: ,})\\n\")\n \n # Print and write candidate's voter count and percentage\n print(candidate_results)\n txt_file.write(candidate_results)\n \n # Determin if votes are greater that the winning count\n if (votes > winning_count) and (vote_percentage > winning_percentage):\n \n # If both are true then set winning_count = votes and \n # winning_percent = vote_percent\n winning_count = votes\n winning_percentage = vote_percentage \n \n # 3. Set winning_candidate to candidate's name\n winning_candidate = candidate_name\n \n #print(winning_candidate_summary)\n winning_candidate_summary = (\n f\"--------------------------\\n\"\n f\"Winner: {winning_candidate}\\n\"\n f\"Winning Vote Count: {winning_count: ,}\\n\"\n f\"Winning Percentage: {winning_percentage: .1f}%\\n\"\n f\"--------------------------\\n\")\n \n print(winning_candidate_summary)\n # Save the winning candidate's name to the text file\n txt_file.write(winning_candidate_summary)","sub_path":"PyPoll.py","file_name":"PyPoll.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"16552152","text":"from unittest import mock\r\n\r\nimport rssEntry\r\nfrom libraryEntry import LibraryEntry\r\nimport requests_mock\r\n\r\n\r\n@mock.patch('feedparser.parse', return_value={'entries': [type('rss_entry', (object,), {\r\n 'title': 'title1.mkv',\r\n 'link': 'rss_ink',\r\n 'nyaa_infohash': 'nyaa_unfihash',\r\n 'nyaa_seeders': '2'\r\n})]})\r\ndef test_library_entry(feed_parser, library_entry_json):\r\n with requests_mock.Mocker() as mr:\r\n anime_data = {\r\n 'data': {\r\n 'id': 1,\r\n 'attributes': {\r\n 'titles': {0: 'title1'},\r\n 'canonicalTitle': 'title2',\r\n 'abbreviatedTitles': ['title3'],\r\n 'episodeCount': 1,\r\n 'subtype': 'movie',\r\n 'status': 'current'\r\n }\r\n }\r\n }\r\n mr.get(\"http://test.com\", json=anime_data)\r\n le = LibraryEntry(library_entry_json)\r\n assert len(le.rss_entries) == 1\r\n entry = le.rss_entries[0]\r\n mock_entry = feed_parser.return_value['entries'][0]\r\n assert entry.rss_title == mock_entry.title\r\n assert entry.torrent_link == mock_entry.link\r\n assert entry.info_hash == mock_entry.nyaa_infohash\r\n assert entry.seeders == int(mock_entry.nyaa_seeders)\r\n assert entry.quality == rssEntry.RSSEntry.RESOLUTION_UNKNOWN\r\n","sub_path":"Tests/test_library_entry.py","file_name":"test_library_entry.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"320610700","text":"\"\"\"\nCopyright 2015 Rackspace\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom cafe.drivers.unittest.decorators import tags\nfrom cloudcafe.compute.common.types import HostServiceTypes\nfrom cloudcafe.compute.config import ComputeConfig\nfrom cloudroast.compute.fixtures import ComputeAdminFixture\n\ncompute_config = ComputeConfig()\nhypervisor = compute_config.hypervisor.lower()\n\n\nclass HostsAdminTest(ComputeAdminFixture):\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Perform actions that setup the necessary resources for testing\n\n The following data is generated during this setup:\n - A list of hosts\n \"\"\"\n super(HostsAdminTest, cls).setUpClass()\n cls.hosts = cls.admin_hosts_client.list_hosts().entity\n\n @tags(type='smoke', net='no')\n def test_list_hosts(self):\n \"\"\"\n Test that an admin user can get a list of hosts\n\n Ensure that the host list created during setup is not empty.\n Validate that a Compute host is in the list.\n\n The following assertions occur:\n - The host list is populated\n - A compute host is found in the host list\n \"\"\"\n self.assertTrue(len(self.hosts) > 0, \"The hosts list is empty.\")\n for host in self.hosts:\n if host.service == HostServiceTypes.COMPUTE:\n return\n self.fail(\"The expected host: %s\"\n \" is not found in hosts list.\" % HostServiceTypes.COMPUTE)\n\n @tags(type='smoke', net='no')\n def test_get_host(self):\n \"\"\"\n Test that an admin user can get details about a host\n\n Select the first host name from the list of hosts generated during\n setup. Get details of the host. For each resource in the host's\n resource list ensure that the resource is mapped to the correct\n (the selected) host.\n\n The following assertions occur:\n - The host details contain a list of resources\n - Every resource in the host's resource list correctly maps\n to the selected host\n \"\"\"\n host_name = self.hosts[0].host_name\n host = self.admin_hosts_client.get_host(host_name).entity\n self.assertTrue(len(host.resources) > 0,\n \"The resources list is empty.\")\n for resource in host.resources:\n self.assertEqual(resource.host, host_name,\n \"Resource is not mapped to host %s.\" % host_name)\n","sub_path":"cloudroast/compute/admin_api/hosts/test_hosts.py","file_name":"test_hosts.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"375297269","text":"# data_scripts/lib/structs.py\nimport re\nimport json\n\nimport wikitextparser as wtp\n\nfrom data_scripts.lib import constants, utils\n\n\nclass Struct(object):\n pass\n\n\nclass Ref(Struct):\n rid = 0\n\n def __init__(\n self,\n event__id: int = None,\n text: str = None,\n empty: bool = False):\n\n if empty:\n return\n\n Ref.rid += 1\n self.rid = f'R{Ref.rid:05d}'\n self.event__id = event__id\n if text:\n self.name = utils.MyHTMLParser().extract_name(text)\n self.desc = (utils.TextFormatter()\n .text(text)\n .strip_ref_html_tags()\n .mark_double_quotes()\n .convert_ext_links_to_html()\n .convert_userbloglinks_to_html()\n .convert_bolds_to_html()\n .convert_italics_to_html()\n # .strip_wiki_links()\n .strip_wiki_links_files()\n .remove_displayed_wiki_images_or_files_everywhere()\n .strip_wps_templates()\n .remove_quote_templates()\n .remove_nowiki_html_tags()\n .restore_double_quotes()\n .get()\n )\n\n self.desc = self.desc if self.desc else None # replace empty string with none\n self.links = [str(x) for x in wtp.parse(\n utils.TextFormatter()\n .text(text)\n .convert_userbloglinks_to_html()\n .strip_wiki_links_files()\n .remove_displayed_wiki_images_or_files_everywhere()\n .strip_wps_templates()\n .remove_quote_templates()\n .get()\n ).wikilinks]\n\n def to_dict(self, **kwargs):\n if kwargs:\n pass # do nothing\n return self.__dict__\n\n @classmethod\n def from_dict(cls, **kwargs):\n ref = Ref(empty=True)\n for k, v in kwargs.items():\n setattr(ref, k, v)\n return ref\n\n def key(self):\n return (self.name, self.desc)\n\n def __hash__(self):\n return hash(self.key())\n\n def __eq__(self, other):\n if isinstance(other, Ref):\n return self.key() == other.key()\n return NotImplemented\n\n def __lt__(self, other):\n if isinstance(other, Ref):\n return self.rid < other.rid\n return NotImplemented\n\n\nclass Event(Struct):\n eid = 0\n\n def __init__(\n self,\n filename: str = None,\n ln: str = None,\n text: str = None,\n day: str = None,\n month: str = None,\n year: str = None,\n reality: str = None,\n empty: bool = False):\n\n if empty:\n return\n\n Event.eid += 1\n self.eid = f'E{Event.eid:05d}'\n self.file = filename\n self.line = ln\n self.date = self.__get_date(day, month, year)\n self.reality = reality\n self.title = self.__get_title(text)\n\n tf = utils.TextFormatter()\n text_norefs = (tf\n .text(text)\n .remove_ref_nodes()\n .remove_displayed_wiki_images_or_files_everywhere()\n .get()\n )\n\n self.desc = (tf\n .text(text_norefs)\n .mark_double_quotes()\n .convert_ext_links_to_html()\n .convert_bolds_to_html()\n .convert_italics_to_html()\n # .strip_wiki_links()\n .strip_wiki_links_files()\n .remove_displayed_wiki_images_or_files_everywhere()\n .strip_wps_templates()\n .remove_nowiki_html_tags()\n .restore_double_quotes()\n .get()\n )\n\n self.level = self.__get_heading_level(self.desc)\n self.multiple = False\n\n self.links = list(set([str(x) for x in wtp.parse(text_norefs).wikilinks]))\n parsed = wtp.parse(text)\n # self.templates = [str(x) for x in parsed.templates]\n self.refs = [Ref(self.eid, str(x)) for x in list(filter(self.__filter_tags, parsed.tags()))] # only extract tags\n self.refs = list(filter(lambda x: any([x.name, x.desc]), self.refs)) # remove empty refs\n\n def join(self, sub_evs: list):\n start_line = re.sub(r'([0-9]+)-[0-9]*', '\\1', self.line)\n self.line = f'{start_line}-{max([ev.line for ev in sub_evs])}'\n self.desc = self.desc + '\\n' + '\\n'.join([ev.desc for ev in sub_evs])\n self.multiple = True\n\n if hasattr(self, 'links'):\n self.links = list(set(self.links).union(set([element for sublist in [ev.links for ev in sub_evs] for element in sublist])))\n \n # self.templates = list(set(self.templates).union(set([element for sublist in [ev.templates for ev in sub_evs] for element in sublist])))\n\n if hasattr(self, 'refs'):\n sub_evs_refs_flat = [element for sublist in [ev.refs for ev in sub_evs] for element in sublist]\n sub_evs_refs_unique = set(sub_evs_refs_flat)\n def change_eid(ref):\n ref.event__id = self.eid\n return ref\n self.refs = list(map(change_eid, list(set(self.refs).union(sub_evs_refs_unique))))\n\n if hasattr(self, 'characters'):\n self.characters = list(set(self.characters).union(set([element for sublist in [ev.characters for ev in sub_evs] for element in sublist])))\n\n if hasattr(self, 'non_characters'):\n self.non_characters = list(set(self.non_characters).union(set([element for sublist in [ev.non_characters for ev in sub_evs] for element in sublist])))\n \n if hasattr(self, 'reflinks'):\n self.reflinks = sorted(set([*self.reflinks, *[rl for ev in sub_evs for rl in ev.reflinks]]))\n\n if not self.title:\n found_titles = list(filter(None, [ev.title for ev in sub_evs]))\n if len(found_titles) > 2:\n raise NotImplementedError\n else:\n self.title = next(iter(found_titles), None)\n \n if hasattr(self, 'ref_special'):\n if not getattr(self, 'ref_special'):\n found_refspecial = list(filter(None, [ev.ref_special for ev in sub_evs]))\n if found_refspecial:\n raise NotImplementedError\n\n\n def __get_date(self, day: str, month: str, year: str):\n date_str = ''\n if day:\n date_str = f'{month} {day}, {year}'\n else:\n if month:\n date_str = f'{month} {year}'\n else:\n date_str = year\n return date_str\n\n def __get_title(self, text):\n title = None\n match = re.search(r\"('){3}\\[\\[[^\\]]*\\]\\]('){3}\", text)\n if match:\n title = match.group(0).strip(\"'[]\")\n return title\n\n def __get_heading_level(self, text):\n heading_level = None\n if text[0] == '*' and text[1] != '*':\n heading_level = 1\n elif text[0:2] == '**':\n heading_level = 2\n return heading_level\n\n def __filter_tags(self, tag):\n ignored_tags = ['
', '', '']\n return not any([str(tag).startswith(it) for it in ignored_tags])\n\n @classmethod\n def from_dict(cls, **kwargs):\n ev = Event(empty=True)\n for k, v in kwargs.items():\n if k == 'refs' and isinstance(v, list) and all(isinstance(x, dict) for x in v):\n ev.refs = [Ref.from_dict(**x) for x in v]\n else:\n setattr(ev, k, v)\n return ev\n\n def to_dict(self, **kwargs):\n '''\n WTP objects (which are just wrappers) are stringified on Event __init__,\n but Event still carries a list of Ref objects which are not JSON serializable.\n This method returns a normal __dict__ but ensures Ref objects in self.refs are dictified too.\n '''\n jdict = {}\n for k, v in self.__dict__.items():\n if v and isinstance(v, list) and all(isinstance(x, Ref) for x in v) and (not kwargs or not kwargs['ignore_nested']):\n v = [ref.to_dict() for ref in v]\n jdict[k] = v\n return jdict\n\n def __str__(self):\n return utils.jdumps(self.to_dict())\n\n\nclass Source(Struct):\n def __init__(\n self,\n sid: str = None,\n title: str = None,\n stype: str = None,\n details: dict = None,\n empty: bool = False):\n\n if empty:\n return\n\n self.sid = sid\n self.set_title(title)\n self.type = stype\n self.details = details\n\n def set_title(self, title: str):\n self.clarification = None\n if title:\n clarif = utils.get_clarification(title)\n # TODO workaround for Luke Cage 2.13: T.R.O.Y.\n if clarif and clarif != \"(T.R.O.Y.)\":\n self.clarification = clarif\n self.title = title\n\n def plaintitle(self):\n if self.clarification:\n return self.title[:-len(self.clarification)].strip()\n return self.title\n\n @classmethod\n def split_titlestr(cls, titlestr: str):\n clarif = utils.get_clarification(titlestr)\n clarif = clarif if clarif != \"(T.R.O.Y.)\" else None\n newtitlestr = titlestr[:-len(clarif)].strip() if clarif else titlestr\n return (newtitlestr, clarif)\n\n def is_updatable_with_new_clarification(self, clarif: str):\n if (self.clarification\n or not self.details\n or not self.type in [constants.SRC_FILM, constants.SRC_TV_EPISODE, constants.SRC_ONESHOT]\n ):\n return False\n clarif = clarif[1:-1] # remove parenthesis\n type_matches = clarif in self.type\n # TODO workaround for \"episode\" clarifications\n if clarif == 'episode':\n type_matches = True\n\n series_matches = False\n if 'series' in self.details.keys():\n series_matches = clarif in self.details['series']\n return any([type_matches, series_matches])\n\n @classmethod\n def from_dict(cls, **kwargs):\n src = Source(empty=True)\n for k, v in kwargs.items():\n if k == 'sub_sources':\n src.sub_sources = [Source.from_dict(**x) for x in v]\n elif k == 'details' and isinstance(v, str):\n src.details = json.loads(v)\n else:\n setattr(src, k, v)\n return src\n\n def to_dict(self, **kwargs):\n jdict = {}\n for k, v in self.__dict__.items():\n if v and isinstance(v, list) and all(isinstance(x, Source) for x in v) and (not kwargs or not kwargs['ignore_nested']):\n v = [src.to_dict() for src in v]\n jdict[k] = v\n return jdict\n\n def __str__(self):\n return utils.jdumps(self.to_dict())\n\n def __repr__(self):\n return json.dumps(self.to_dict())\n\n def key(self):\n return (self.sid, self.title, self.type, self.details)\n\n def __hash__(self):\n return hash(self.key())\n\n def __eq__(self, other):\n if isinstance(other, Source):\n return self.key() == other.key()\n return NotImplemented\n\n\nclass SourceBuilder(object):\n def __init__(self):\n self.src = Source()\n\n def sid(self, sid: str):\n self.src.sid = sid\n return self\n\n def title(self, title: str):\n self.src.set_title(title)\n return self\n\n def stype(self, stype: str):\n self.src.type = stype\n return self\n\n def details(self, details: dict):\n self.src.details = details\n return self\n\n def build(self):\n return self.src\n\n\nclass Reflink(Struct):\n lid = 0\n\n def __init__(\n self,\n eid: str = None,\n rid: str = None,\n sid: str = None,\n empty: bool = False):\n\n if empty:\n return\n\n Reflink.lid += 1\n self.lid = f'L{Reflink.lid:05d}'\n self.evt = eid\n self.src = sid\n self.ref = rid\n\n @classmethod\n def from_dict(cls, **kwargs):\n rl = Reflink(empty=True)\n for k, v in kwargs.items():\n setattr(rl, k, v)\n return rl\n\n def to_dict(self, **kwargs):\n if kwargs:\n pass # do nothing\n return self.__dict__\n\n def __str__(self):\n return utils.jdumps(self.to_dict())\n\n def key(self):\n return (self.lid)\n\n def __hash__(self):\n return hash(self.key())\n\n def __eq__(self, other):\n if isinstance(other, Source):\n return self.key() == other.key()\n return NotImplemented\n","sub_path":"data_scripts/lib/structs.py","file_name":"structs.py","file_ext":"py","file_size_in_byte":12474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"580662520","text":"from glimmer.api import PluginParserBase, register_plugin, catch_stdout, is_valid_pathname, check_if_base64, base64_decode\n\n\nclass Plugin(PluginParserBase):\n protocols = [\"python\", \"pythons\"]\n\n def rule_check(self, module_path):\n return self.protocol_check(module_path) and is_valid_pathname(self.remove_protocol(module_path))\n\n def get_data(self, module_path):\n protocol = self.get_protocol(module_path)\n\n try:\n with open(self.remove_protocol(module_path), \"r\") as f, catch_stdout() as s:\n data = f.read()\n exec(data)\n result = s.getvalue().strip()\n if protocol == \"pythons\":\n result = result.split()\n result = [base64_decode(r) if check_if_base64(\n r) else r for r in result]\n else:\n result = (result, )\n return result\n except Exception:\n return ()\n\n\nregister_plugin(Plugin)\n","sub_path":"glimmer/plugins/parser/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"455138536","text":"#!/bin/python3\n\ns1 = input()\ns2 = input()\n\n# build array\narr = [[0 for _ in s2 + ' '] for _ in s1 + ' ']\n\nfor i in range(1, len(s1) + 1):\n for j in range(1, len(s2) + 1):\n if s1[i-1] == s2[j-1]:\n arr[i][j] = arr[i-1][j-1] + 1\n else:\n arr[i][j] = max(arr[i-1][j], arr[i][j-1])\n\nprint(arr[len(s1)][len(s2)])","sub_path":"HackerRank/common_child.py","file_name":"common_child.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"57012704","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Important Note: Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\\rightarrow$Run All).\n# \n# Make sure you fill in any place that says `YOUR CODE HERE` or \"YOUR ANSWER HERE\", as well as your name and collaborators below:\n\n# In[ ]:\n\n\nNAME = \"Karl Magnus Sundelin\"\nCOLLABORATORS = \"\"\n\n\n# ---\n\n# # Norwegian University for Science and Technology - Fall 2019\n# ![](download.jpg)\n# \n# # INGT1001 - Assignment One\n# Assignment one includes two parts, and this notebook will be your personal submission of the second part. For the first part, please download the other notebook.\n# For the assignment of this course, we will be using the Jupyter notebook:\n# ![](jupyter.png)\n# \n# # Instructions\n# - Try to answer all questions in the given spaces\n# - For questions requiring your written response, you need to print your answers. If you need to leave a black answer then state that as well.\n# - Code answer boxes do not carry stored variables and data from previous code boxes.\n# - Part two is due on **Wednesday (September 25, 2019) at midnight!**\n\n# # Programming Exercises\n# ##### Questions 3, 5, 9, 12, 13, and 14 respectively.\n\n# ### Write a program that asks the user to enter the mass of an object in pounds and then calculates and displays the mass of the object in kilograms.\n# #### Pounds to Kilograms One pound is equivalent to 0.454 kilograms. \n\n# In[6]:\n\n\n\npounds = float(input(\"How many pounds are the object? \"))\nkg = pounds*0.454\nprint(\"In kilogram the objects mass is: \",kg)\n\n\n# ### Distance Traveled\n# #### Assuming there are no accidents or delays, the distance that a car travels down the inter-state can be calculated with the following formula: Distance = Speed x Time\n# #### A car is traveling at 70 miles per hour. Write a program that displays the following:\n# * The distance the car will travel in 6 hours\n# * The distance the car will travel in 10 hours\n# * The distance the car will travel in 15 hours\n\n# In[5]:\n\n\n\nsp = 70\nt1 = 6\nt2 = 10\nt3 = 15\nd1 = sp*t1\nd2 = sp*t2\nd3 = sp*t3\nprint(\"The distance the car will travel in 6 hours: \", d1)\nprint(\"The distance the car will travel in 6 hours: \", d2)\nprint(\"The distance the car will travel in 6 hours: \", d3)\n\n\n# ### Circle Measurements\n# #### Write a program that asks the user to enter the radius of a circle. The program should cal-culate and display the area and circumference of the circle using πr2 for the area and 2πr for the circumference.\n# ###### Hint: You can either use 3.14159 as the value of pi (π), or add the statement \"import math\" to the start of the program and then use \"math.pi\" wherever you need the value of pi in the program.\n# \n\n# In[4]:\n\n\n# YOUR CODE HERE\nr = float(input(\"Skriv inn en valgfri radius til sirkelen og du vil få tilbake arealet av sirkelen. \"))\nareal = r*3.14159\nprint(areal)\n\n\n# ### Stock Transaction Program\n# \n# #### Last month, Joe purchased some stock in Acme Software, Inc. Here are the details of the purchase:\n# \n# * The number of shares that Joe purchased was 2,000.\n# * When Joe purchased the stock, he paid \\\\$40.00 per share.\n# * Joe paid his stockbroker a commission that amounted to 3 percent of the amount he paid for the stock.\n# \n# #### Two weeks later, Joe sold the stock. Here are the details of the sale:\n# * The number of shares that Joe sold was 2,000.\n# * He sold the stock for \\\\$42.75 per share.\n# * He paid his stockbroker another commission that amounted to 3 percent of the amount he received for the stock.\n# \n# #### Write a program that displays the following information:\n# * The amount of money Joe paid for the stock.\n# * The amount of commission Joe paid his broker when he bought the stock.\n# * The amount for which Joe sold the stock.\n# * The amount of commission Joe paid his broker when he sold the stock.\n# \n# #### Display the amount of money that Joe had left when he sold the stock and paid his broker (both times). If this amount is positive, then Joe made a profit. If the amount is negative, then Joe lost money.\n\n# In[2]:\n\n\n# YOUR CODE HERE\n# Stock Transaction Program\n\nshares = 2000\ncom = 0.03\nbpps = 40.00\nspps = 42.75\npaid_for_stock = shares*bpps\nprint(paid_for_stock)# Amount of money Joe paid for the stock\ncom1 = paid_for_stock*com\nprint(com1)# Amount of commission Joe paid his broker when he bought the stock\nsold_for_stock = spps*shares\nprint(sold_for_stock)# Amount for which Joe sold the stock\ncom2 = sold_for_stock*com\nprint(com2)# The amount of commision Joe paid his broker when he sold the stock\nprofit = sold_for_stock-com2-paid_for_stock-com1\nprint(profit)# The amount of Joe's profit for the transaction\n\n\n# ### Planting Grapevines\n# #### A vineyard owner is planting several new rows of grapevines, and needs to know how many grapevines to plant in each row. She has determined that after measuring the length of a future row, she can use the following formula to calculate the number of vines that will fit in the row, along with the trellis end-post assemblies that will need to be constructed at each end of the row:\n# \n# #### V = (R - 2E)/S\n# \n# #### The terms in the formula are:\n# ##### V is the number of grapevines that will fit in the row. R is the length of the row, in feet. E is the amount of space, in feet, used by an end-post assembly. S is the space between vines, in feet.\n# #### Write a program that makes the calculation for the vineyard owner. The program should ask the user to input the following:\n# \n# * The length of the row, in feet\n# * The amount of space used by an end-post assembly, in feet\n# * The amount of space between the vines, in feet\n# \n# #### Once the input data has been entered, the program should calculate and display the num-ber of grapevines that will fit in the row.\n\n# In[8]:\n\n\n# V number grapevines\n# R length of row\n# E is amount of space\n# S is space between vines\n\nR = float(input(\"Length of row, in feet? \"))\nE = float(input(\"Amount of space used by an end-post assembly, in feet? \"))\nS = float(input(\"Space between vines? \"))\nV = (R-2*E)/S\nprint(V)\n\n\n# ### Compound Interest\n# #### When a bank account pays compound interest, it pays interest not only on the principal amount that was deposited into the account, but also on the interest that has accumulated over time. Suppose you want to deposit some money into a savings account, and let the account earn compound interest for a certain number of years. The formula for calculating the balance of the account after a specified number of years is: \n# \n# ![](Equation14.png)\n# \n# #### The terms in the formula are:\n# ##### A is the amount of money in the account after the specified number of years. P is the principal amount that was originally deposited into the account. r is the annual interest rate. n is the number of times per year that the interest is compounded. t is the specified number of years.\n# #### Write a program that makes the calculation for you. The program should ask the user to input the following:\n# * The amount of principal originally deposited into the account\n# * The annual interest rate paid by the account\n# * The number of times per year that the interest is compounded (For example, if interest is compounded monthly, enter 12. If interest is compounded quarterly, enter 4.)\n# * The number of years the account will be left to earn interest\n# \n# #### Once the input data has been entered, the program should calculate and display the amount of money that will be in the account after the specified number of years.\n# \n# \n# ![](questionnote.png)\n# \n\n# In[11]:\n\n\norg_dep = float(input(\"The amount of principal originally deposited into the account? \"))\nan_int = float(input(\"The annual interest rate paid by the account? \"))\nitpy = int(input(\"The number of times per year that the interest is compounded? \"))\nyears = float(input(\"The number of years the account will be left to earn interest? \"))\n\nmoney_total = org_dep*((1+an_int/100)**years)\nprint(money_total)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"TA_4/Assignment1_2/Øving 1 - Del 2_karlmsu_attempt_2019-09-23-20-55-28_Øving PartII.py","file_name":"Øving 1 - Del 2_karlmsu_attempt_2019-09-23-20-55-28_Øving PartII.py","file_ext":"py","file_size_in_byte":8091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"382410710","text":"import os\nimport argparse\nfrom Vector import Vector\nfrom array_utils import aperture\n\n\n\nclass Sketch:\n def __init__(self, text, k, d):\n freq = [0] * d\n for kgram in aperture(text, k):\n h = hash(kgram)\n freq[h%d] += 1\n self._sketch = Vector(freq).direction()\n\n def similar_to(self, other):\n return self._sketch * other._sketch\n\n def __str__(self):\n return str(self._sketch)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Calculate the Skecth of a text document')\n parser.add_argument('filename', help='name of the text file')\n parser.add_argument('k', type=int, help='k as in kgrams')\n parser.add_argument('d', type=int, help='dimension : how fine-grained is your reality?')\n args = parser.parse_args()\n filename = args.filename\n k = args.k\n d = args.d\n with open(filename) as f:\n text = f.read()\n sk = Sketch(text, k, d)\n print(sk)\n f1 = open('../corpus/prejudice.txt')\n s1 = Sketch(f1.read(), k, d)\n f2 = open('../corpus/constitution.txt')\n s2 = Sketch(f2.read(), k, d)\n print(s1.similar_to(s2))\n\n\n","sub_path":"Sketch.py","file_name":"Sketch.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"488474802","text":"\"\"\"Tests for cement.ext.ext_alarm.\"\"\"\n\nimport time\nimport signal\nfrom cement.core.exc import CaughtSignal\nfrom cement.utils import test\n\n\nclass AlarmExtTestCase(test.CementExtTestCase):\n\n def setUp(self):\n super(AlarmExtTestCase, self).setUp()\n self.app = self.make_app('tests',\n extensions=['alarm'],\n argv=[]\n )\n\n @test.raises(CaughtSignal)\n def test_alarm_timeout(self):\n global app\n app = self.app\n with app as app:\n try:\n app.alarm.set(1, \"The Timer Works!\")\n time.sleep(3)\n except CaughtSignal as e:\n self.eq(e.signum, signal.SIGALRM)\n raise\n\n def test_alarm_no_timeout(self):\n with self.app as app:\n app.alarm.set(3, \"The Timer Works!\")\n time.sleep(1)\n app.alarm.stop()\n","sub_path":"tests/ext/alarm_tests.py","file_name":"alarm_tests.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"330571358","text":"#coding = utf-8\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport mpl_toolkits.mplot3d\nimport math\ndef griewank(d, x):\n factor_1 = np.zeros(x[0].shape)\n for i in range(d):\n factor_1 += x[i] ** 2 / 4000\n factor_2 = np.ones(x[0].shape)\n for i in range(d):\n factor_2 *= np.cos(x[i]/math.sqrt(i+1))\n return 1 + factor_1 - factor_2\n\nx = np.linspace(-10, 10, 1000)\ny = np.linspace(-10, 10, 1000)\nx, y = np.meshgrid(x, y)\nz = griewank(2, [x, y])\n\nfigure = plt.figure()\nax = figure.gca(projection=\"3d\")\nax.plot_surface(x, y, z, cmap=cm.YlGnBu_r)\nplt.title(r'$z=1 + \\frac{x^2 + y^2}{4000}-\\cos{x}\\cos{\\frac{y}{\\sqrt{2}}}$')\nax.set_xlabel('X axis')\nax.set_ylabel('Y axis')\nax.set_zlabel('Z axis')\n# ax.contour(x, y, z, zdir='z', offset=10, cmap=plt.get_cmap('rainbow'))\nplt.show()","sub_path":"Artificial-Intelligence/Griewank.py","file_name":"Griewank.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"357989817","text":"# coding: utf-8\nfrom sqlalchemy import Column, INTEGER, String, TEXT\nfrom sqlalchemy.ext.declarative import declarative_base\n\nBase = declarative_base()\nmetadata = Base.metadata\n\n\nclass TDataConfigInfo(Base):\n __tablename__ = 't_dataconfig_info'\n\n id = Column(INTEGER, primary_key=True)\n vc_name = Column(String(50))\n vc_table_name = Column(String(50))\n vc_sql = Column(TEXT)\n vc_column = Column(TEXT)\n vc_index = Column(String(255))\n vc_task_ids = Column(String(255))\n","sub_path":"worker_fd/model/t_dataconfig_Info.py","file_name":"t_dataconfig_Info.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"62755516","text":"\"\"\"Finite-horizon discrete environments with known transition dynamics. These\nare handy when you want to perform exact maxent policy optimisation.\"\"\"\n\nimport abc\n\nimport gym\nfrom gym import spaces\nimport numpy as np\n\n\nclass ModelBasedEnv(gym.Env, abc.ABC):\n \"\"\"ABC for tabular environments with known dynamics.\"\"\"\n\n def __init__(self):\n \"\"\"Initialise common attributes of all model-based environments,\n including current state & number of actions taken so far (initial None,\n so that error can be thrown if reset() is not called), attributes for\n cached observation/action space, and random seed for rollouts.\"\"\"\n self.cur_state = None\n self.n_actions_taken = None\n # Constructing action & observation spaces requires self.n_actions and\n # self.obs_dim, which are set in subclasses. If we constructed\n # observation & action in this __init__ method, then subclasses would\n # have to call super().__init__() last to give it access to\n # obs_dim/n_actions. By constructing these lazily, we ensure that\n # subclasses can call super().__init__() at any point & still have it\n # succeed.\n self._action_space = None\n self._observation_space = None\n self.seed()\n\n @property\n def action_space(self):\n if self._action_space is None:\n self._action_space = spaces.Discrete(self.n_actions)\n return self._action_space\n\n @property\n def observation_space(self):\n if self._observation_space is None:\n self._observation_space = spaces.Box(low=float('-inf'),\n high=float('inf'),\n shape=(self.obs_dim, ))\n return self._observation_space\n\n def seed(self, seed=None):\n if seed is None:\n # Gym API wants list of seeds to be returned for some reason, so\n # generate a seed explicitly in this case\n seed = np.random.randint(0, 1 << 31)\n self.rand_state = np.random.RandomState(seed)\n return [seed]\n\n def reset(self):\n self.cur_state = self.rand_state.choice(self.n_states,\n p=self.initial_state_dist)\n self.n_actions_taken = 0\n # as in step(), we copy so that it can't be mutated in-place (updates\n # will be reflected in self.observation_matrix!)\n return self.observation_matrix[self.cur_state].copy()\n\n def step(self, action):\n assert self.cur_state is not None \\\n and self.n_actions_taken is not None, \\\n \"remember to call reset() before first step()\"\n old_state = self.cur_state\n out_dist = self.transition_matrix[old_state, action]\n choice_states = np.arange(self.n_states)\n next_state = int(\n self.rand_state.choice(choice_states, p=out_dist, size=()))\n self.cur_state = next_state\n self.n_actions_taken += 1\n done = self.n_actions_taken >= self.horizon\n reward = self.reward_matrix[old_state]\n assert np.isscalar(reward), reward\n # copy so that it can't be mutated in-place\n obs = self.observation_matrix[next_state].copy()\n assert obs.ndim == 1, obs.shape\n infos = {\"old_state\": old_state, \"new_state\": next_state}\n return obs, reward, done, infos\n\n @property\n def n_states(self):\n \"\"\"Number of states in this MDP (int).\"\"\"\n return self.transition_matrix.shape[0]\n\n @property\n def n_actions(self):\n \"\"\"Number of actions in this MDP (int).\"\"\"\n return self.transition_matrix.shape[1]\n\n @property\n def obs_dim(self):\n \"\"\"Size of observation vectors for this MDP.\"\"\"\n return self.observation_matrix.shape[-1]\n\n # ############################### #\n # METHODS THAT MUST BE OVERRIDDEN #\n # ############################### #\n\n @property\n @abc.abstractmethod\n def transition_matrix(self):\n \"\"\"3D transition matrix with dimensions corresponding to current state,\n current action, and next state (in that order). In other words, if `T`\n is our returned matrix, then `T[s,a,sprime]` is the chance of\n transitioning into state `sprime` after taking action `a` in state\n `s`.\"\"\"\n\n @property\n @abc.abstractmethod\n def observation_matrix(self):\n \"\"\"2D observation matrix with dimensions corresponding to current state\n (first dim) and elements of observation (second dim).\"\"\"\n\n @property\n @abc.abstractmethod\n def reward_matrix(self):\n \"\"\"1D reward matrix with an element corresponding to each state.\"\"\"\n\n @property\n @abc.abstractmethod\n def horizon(self):\n \"\"\"Number of actions that can be taken in an episode.\"\"\"\n\n @property\n @abc.abstractmethod\n def initial_state_dist(self):\n \"\"\"1D vector representing a distribution over initial states.\"\"\"\n return\n","sub_path":"src/imitation/model_env.py","file_name":"model_env.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"474935994","text":"class Solution(object):\n def searchMatrix(self, matrix, target):\n \"\"\"\n :type matrix: List[List[int]]\n :type target: int\n :rtype: bool\n \"\"\"\n #either start at topright or bottomleft corner\n #we have end two conditions as value should oppose on both ways\n #ex. from 15 , check value smaller than 15 (target=5), so traverse previous column to reduce instaed of going to next row as it performs reverse function of increasing value, which is wrong.\n #likewise choose either column or row traversal and reach particular value\n #O(m+n), need to traverse either sides\n #O(1) in place\n \n \n \n #topright corner\n i=0\n j=len(matrix[0])-1\n if matrix:\n #while boundary conditions fulfilled\n while i<=len(matrix)-1 and j>=0:\n #if target is less than value, go to previous column\n if target>return\n elif target==matrix[i][j]:\n return True\n #if target is more than value, go to next row\n else:\n i+=1\n return False\n ","sub_path":"search2dmatrix2.py","file_name":"search2dmatrix2.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"197421151","text":"from contextlib import contextmanager\nfrom itertools import product\nimport json\nfrom pathlib import Path\nimport shutil\nimport sys\nimport tempfile\n\nfrom ccds.__main__ import api_main\nimport pytest\n\n\nCCDS_ROOT = Path(__file__).parents[1].resolve()\n\n\ndefault_args = {\n 'project_name': 'my_test_project',\n 'repo_name': 'my-test-repo',\n 'module_name': 'project_module',\n 'author_name': 'DrivenData',\n 'description': 'A test project',\n 'open_source_license' : 'MIT',\n 'dataset_storage': {\"azure\": {\"container\": \"container-name\"}},\n}\n\ndef config_generator():\n cookiecutter_json = json.load((CCDS_ROOT / 'cookiecutter.json').open('r'))\n\n # python versions for the created environment\n py_version = [('python_version_number', v) for v in ['3.7']]\n\n configs = product(\n py_version,\n [('environment_manager', opt) for opt in cookiecutter_json['environment_manager']],\n [('dependency_file', opt) for opt in cookiecutter_json['dependency_file']],\n [('pydata_packages', opt) for opt in cookiecutter_json['pydata_packages']],\n )\n\n def _is_valid(config):\n config = dict(config)\n # Pipfile + pipenv only valid combo for either\n if (config['environment_manager'] == 'pipenv') ^ (config['dependency_file'] == 'Pipfile'):\n return False\n # conda is the only valid env manager for environment.yml\n if (config['dependency_file'] == 'environment.yml') and (config['environment_manager'] != 'conda'):\n return False\n return True\n\n # remove invalid configs\n configs = [\n c for c in configs if _is_valid(c)\n ]\n\n for c in configs:\n config = dict(c)\n config.update(default_args)\n yield config\n\n\n@contextmanager\ndef bake_project(config):\n temp = Path(tempfile.mkdtemp(suffix='data-project')).resolve()\n\n api_main.cookiecutter(\n str(CCDS_ROOT),\n no_input=True,\n extra_context=config,\n output_dir=temp,\n overwrite_if_exists=True\n )\n\n yield temp / config['repo_name']\n\n # cleanup after\n shutil.rmtree(temp)","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"280395974","text":"class SparseArray:\n\n def __init__(self, values):\n self.length = len(values)\n self.data = {}\n for i, val in enumerate(values):\n if val:\n self.data[i] = val\n\n def __len__(self):\n return(self.length)\n\n def __getitem__(self, index):\n try:\n return(self.data[index])\n except KeyError:\n return(0)\n\n def __setitem__(self, index, value):\n if (index > self.length):\n raise(IndexError)\n if not value:\n try: \n del self.data[index]\n except KeyError:\n pass\n else:\n self.data[index] = value\n\n def __delitem__(self, index):\n if (index > self.length): # bad index\n raise(IndexError)\n try:\n del self.data[index]\n except KeyError:\n pass # 0 already\n finally: \n # move all the values past the index up in the array\n for i, val in self.data.items():\n if (i > index):\n self.data[i-1] = val\n del self.data[i]\n self.length -= 1\n\n def append(self, additional):\n try:\n for val in additional:\n if val:\n self.data[self.length] = val\n self.length += 1\n except TypeError:\n self.data[self.length] = additional\n self.length += 1\n\n","sub_path":"students/cheryl/session08/sparse_array.py","file_name":"sparse_array.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"278429649","text":"from random import sample\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as pltc\n\npopulations = [1420, 1368, 329, 269, 212, 204]\ncolors = sample(list(pltc.cnames.values()), len(populations))\n\n['#FF0000', '#FF8C00', '#4169E1', '#48D1CC', '#FFFF00', '#006400']\ncountry = ['China', 'India', 'Us', 'Indonesia', 'Brazil', 'Pakistan']\n\nspace_slice = [0.05, 0, 0, 0, 0, 0]\n\nplt.figure(figsize = (6,5))\nplt.pie(populations, labels = country, autopct = '%2f%%', shadow = True, explode = space_slice, colors = colors)\nplt.legend(country, loc = (-0.25, 0.7), shadow = True)\nplt.show()\n\n","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"438231852","text":"import cv2\n\nfrom motion_detection.common.constants import BLUR_KERNEL_SIZE\n\ndef blur_and_convert_to_gray(frame, kernel=BLUR_KERNEL_SIZE, sigmaX=1):\n if len(frame.shape) == 3:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = cv2.GaussianBlur(frame, kernel, sigmaX)\n return frame\n\n\ndef calc_difference(frame1, frame2, thresh=(25, 255), dilate_kernel=None, dilate_iterations=2):\n frame_diff = cv2.absdiff(frame1, frame2)\n _, thresh = cv2.threshold(frame_diff, 25, 255, cv2.THRESH_BINARY)\n cv2.imshow(\"thresh\", thresh)\n thresh = cv2.dilate(thresh, dilate_kernel, dilate_iterations)\n return thresh \n\n","sub_path":"motion_detection/utils/frame.py","file_name":"frame.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"545538468","text":"import csv\nimport numpy as np\nimport pandas as pd\nimport graphviz\nimport matplotlib.pyplot as plt\nimport sklearn.tree as tree\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import svm\n\nfrom wine import NeuralNetworkClassifierUtil\nfrom wine import DecisionTreeClassifierUtil\nfrom wine import BoostingDTUtil\nfrom wine import SVMClassifier\nfrom wine import KNNutil\n\n#################################################\n#1. wine quality data set\ndata = pd.read_csv('winequality-data.csv')\nX = data.iloc[:, :11]\ny = data.iloc[:, 11]\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=0.25)\nfeatures = list(X_train.columns.values)\n#print(features)\n\n#1.A.Decision Tree classifier\nDecisionTreeClassifierUtil.wineDecisionTree(X,y,features)\n\n#1.B.Neural network classifier learning curve\nNeuralNetworkClassifierUtil.neuralNetwork(X,y)\n\n#1.C.Boosted DT classifier\nBoostingDTUtil.boosting(X,y)\n\n#1.D. SVM classifier\nSVMClassifier.svmClassifier(X,y)\n\n#1.E.KNN classifier\nKNNutil.knnClassifierMethod(X,y,features)\n\n#########################################\n# learning curve function from sklearn tutorial\n\nfrom sklearn.datasets import load_digits\nfrom sklearn.model_selection import learning_curve\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.ensemble import GradientBoostingClassifier\n\n\ndef plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):\n \"\"\"\n Generate a simple plot of the test and training learning curve.\n Parameters\n ----------\n estimator : object type that implements the \"fit\" and \"predict\" methods\n An object of that type which is cloned for each validation.\n title : string\n Title for the chart.\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n y : array-like, shape (n_samples) or (n_samples, n_features), optional\n Target relative to X for classification or regression;\n None for unsupervised learning.\n ylim : tuple, shape (ymin, ymax), optional\n Defines minimum and maximum yvalues plotted.\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - An object to be used as a cross-validation generator.\n - An iterable yielding train/test splits.\n For integer/None inputs, if ``y`` is binary or multiclass,\n :class:`StratifiedKFold` used. If the estimator is not a classifier\n or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.\n Refer :ref:`User Guide ` for the various\n cross-validators that can be used here.\n n_jobs : integer, optional\n Number of jobs to run in parallel (default 1).\n \"\"\"\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Total Training examples\")\n plt.ylabel(\"Accuracy\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training accuracy\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Testing accuracy\")\n\n plt.legend(loc=\"best\")\n return plt\n\n\n###############################################\n# Titanic survival data set\ndata = pd.read_csv('titanic_train.csv')\nX = data.iloc[:, 2:]\ny = data.iloc[:, 1]\nfeatures = list(X.columns.values)\n\n# Decision Tree classifier\n# decision tree learning curve of tree depth 3\nclf = DecisionTreeClassifier(random_state=0, criterion='gini', max_depth=3)\nplot_learning_curve(clf, \"Decision Tree(depth=3)\", X, y, ylim=[0, 1])\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=0.25)\nclf = clf.fit(X_train, y_train)\ndot_data = tree.export_graphviz(clf, out_file=None,\n feature_names=features,\n class_names=list(map(str, set(y))),\n filled=True, rounded=True,\n special_characters=True)\ngraph = graphviz.Source(dot_data)\ngraph.render(\"titanic.pdf\")\n\n\n# Neural network classifier\n# clf = MLPClassifier(hidden_layer_sizes=(5), random_state=0, solver=\"lbfgs\")\nplot_learning_curve(clf, \"Neural Network classifier with 5 hidden layers\"\n \"\", X, y, ylim=[0, 1])\n\n\n# Boosted DT classifier\nclf = AdaBoostClassifier(n_estimators=100)\nplot_learning_curve(clf, \"Adaboost(n_estimators=100)\", X, y, ylim=[0, 1])\n\nclf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.001, max_depth=3, random_state=0, max_leaf_nodes=5)\nplot_learning_curve(clf, \"Gradient Boosting(n_estimators=100)\", X, y, ylim=[0, 1])\n\n\n# SVM classifier\nfrom sklearn.model_selection import GridSearchCV\n\ntuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-4, 1e-3, 1e-2, 0.1, 1, 10, 100],\n 'C': [0.01, 0.1, 1, 10, 100, 1000]}]\nclf = GridSearchCV(svm.SVC(), tuned_parameters, cv=5, scoring='accuracy')\nclf.fit(X, y)\nprint(clf.best_params_)\n\nclf = svm.SVC(C=10, kernel=\"rbf\", gamma=0.01)\nplot_learning_curve(clf, \"SVM(RBF kernel & gamma=0.01)\", X, y, ylim=[0, 1])\n\nclf = svm.SVC(C=10, kernel=\"rbf\", gamma=0.001)\nplot_learning_curve(clf, \"SVM(RBF kernel & gamma=0.001)\", X, y, ylim=[0, 1])\n\nclf = svm.SVC(C=10, kernel=\"rbf\", gamma=0.0001)\nplot_learning_curve(clf, \"SVM(RBF kernel & gamma=0.001)\", X, y, ylim=[0, 1])\n\nclf = svm.SVC(C=100, kernel=\"linear\", gamma=0.0001)\nplot_learning_curve(clf, \"SVM(linear kernel & gamma=0.0001\", X, y, ylim=[0, 1])\n\n# KNN classifier\nclf = KNeighborsClassifier(1, weights=\"distance\", p=2)\nplot_learning_curve(clf, \"KNN (K=1)\", X, y, ylim=[0, 1])\n\nclf = KNeighborsClassifier(5, weights=\"distance\", p=2)\nplot_learning_curve(clf, \"KNN (K=5)\", X, y, ylim=[0, 1])\n\nclf = KNeighborsClassifier(10, weights=\"distance\", p=2)\nplot_learning_curve(clf, \"KNN (K=10)\", X, y, ylim=[0, 1])\n\nclf = KNeighborsClassifier(10, weights=\"uniform\", p=2)\nplot_learning_curve(clf, \"KNN (K=10 & uniform weights)\", X, y, ylim=[0, 1])\n\n","sub_path":"Assign1.py","file_name":"Assign1.py","file_ext":"py","file_size_in_byte":7209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"35107733","text":"from django.http.response import HttpResponse\r\nfrom django.shortcuts import redirect, render\r\nfrom rest_framework import viewsets\r\nfrom rest_framework.decorators import api_view\r\nfrom rest_framework.response import Response\r\nimport json\r\nfrom .models import *\r\nfrom django.views.decorators.csrf import csrf_exempt\r\n\r\n# Create your views here.\r\n\r\n@api_view()\r\ndef DishAPIView(request,rest):\r\n rest=rest.replace('_',' ')\r\n x=DishModel.objects.all()\r\n data=[]\r\n dishes=[]\r\n dishes_price=[]\r\n for i in x:\r\n if rest in i.rest_available:\r\n data.append({'restaurant': rest,'dish_name':i.dish_name,'dish_price':i.dish_price})\r\n return Response(data)\r\n\r\n@api_view(['POST'])\r\n@csrf_exempt\r\ndef SaveDetailsAPI(request):\r\n dishes=request.POST.getlist('dish')\r\n name=request.POST.get('name')\r\n rest=request.POST.get('restaurant')\r\n price=0\r\n for i in dishes:\r\n x=DishModel.objects.filter(dish_name=i)\r\n for j in x:\r\n price+=j.dish_price\r\n dishes1=','.join(dishes)\r\n y=PlaceOrderModel(user_order=name,rest_placed=rest,dish_placed=dishes1,total_price=price)\r\n y.save()\r\n data={'user':name,'restaurant':rest,'dishes':dishes,'total_price':price}\r\n return Response(data)\r\n\r\ndef home(request):\r\n if request.method=='POST':\r\n try:\r\n user_typ=request.POST['usr_typ']\r\n print(user_typ)\r\n if user_typ=='Customer':\r\n name_1=request.POST.get('email')\r\n password=request.POST.get('psw')\r\n try:\r\n x=UserModel.objects.get(username=name_1,password=password,user_type=user_typ)\r\n name=x.name\r\n y=RestaurantModel.objects.all()\r\n context={'name':name,'rest_list':y}\r\n return render(request,'temp_one/home2.html',context)\r\n except Exception as e:\r\n message_zip=zip(['warning'],['Login Error. Please sign up if you haven\\'t registered'])\r\n return render(request,'temp_one/home.html',{'message_zip':message_zip})\r\n else:\r\n name_1=request.POST.get('email1')\r\n password=request.POST.get('psw1')\r\n address=request.POST.get('address')\r\n print(name_1,password)\r\n try:\r\n a=UserModel.objects.get(username=name_1,password=password,user_type=user_typ)\r\n rest_name=a.name\r\n print(rest_name)\r\n x=PlaceOrderModel.objects.filter(rest_placed=rest_name)\r\n print(x)\r\n x1=[]\r\n for i in x:\r\n dishes=i.dish_placed\r\n try:\r\n dishes=dishes.replace('\\'','').replace('\"','')\r\n dishes=dishes.strip(\"][\").split(\",\")\r\n dishes=','.join(dishes)\r\n except:\r\n pass\r\n x1.append(dishes)\r\n try:\r\n print('inside if')\r\n return render(request,'temp_three/rest_acc.html',{'data':zip(x,x1),'rest_name':rest_name})\r\n except:\r\n return render(request,'temp_three/rest_acc.html',{'rest_name':rest_name})\r\n except:\r\n return render(request,'temp_one/home.html')\r\n except:\r\n return render(request,'temp_one/home.html')\r\n else:\r\n return render(request,'temp_one/home.html')\r\n##RESTAURANT ACCOUNT VIEW\r\ndef RestaurantAccView(request):\r\n rest=request.POST['restaurant']\r\n x=PlaceOrderModel.objects.filter(rest_placed=rest)\r\n if x.exists():\r\n return render(request,'temp_three/rest_acc.html',{'data':x})\r\ndef signup(request):\r\n if request.method==\"POST\":\r\n user_typ=request.POST.get('usr_typ')\r\n if user_typ=='Customer':\r\n name_1=request.POST['name']\r\n username_1=request.POST['email']\r\n password=request.POST['psw']\r\n else:\r\n name_1=request.POST['name1']\r\n username_1=request.POST['email1']\r\n password=request.POST['psw1']\r\n address=request.POST['address']\r\n try:\r\n if name_1=='' or username_1=='' or password=='' or address=='':\r\n return redirect('sign-up')\r\n except:\r\n if name_1=='' or username_1=='' or password=='':\r\n return redirect('sign-up')\r\n print(name_1)\r\n print(username_1)\r\n x=UserModel.objects.filter(username=username_1,user_type=user_typ)\r\n if x.exists():\r\n message_zip=zip(['warning'],['User already exists, please login'])\r\n return render(request,'temp_one/home.html',{'message_zip':message_zip})\r\n else:\r\n y=UserModel(name=name_1,username=username_1,password=password,user_type=user_typ)\r\n if user_typ=='Restaurant':\r\n z=RestaurantModel(rest_name=name_1,rest_address=address)\r\n z.save()\r\n y.save()\r\n message_zip=zip(['success'],['Your account has been registered. Please sign in now'])\r\n return render(request,'temp_one/home.html',{'message_zip':message_zip})\r\n else:\r\n return render(request,'temp_one/signup.html')\r\n\r\n\r\ndef my_orders(request,name1):\r\n # if request.method==\"POST\":\r\n x=PlaceOrderModel.objects.filter(user_order=name1)\r\n orders_list=[]\r\n restaurants_list=[]\r\n price_list=[]\r\n date_time_list=[]\r\n for i in x:\r\n orders=i.dish_placed\r\n orders=orders.replace('\\'','').replace('\"','')\r\n orders=orders.strip(\"][\").split(\",\")\r\n orders=','.join(orders)\r\n restaurants=i.rest_placed\r\n price=i.total_price\r\n price_list.append(price)\r\n date_time=i.date\r\n orders_list.append(orders)\r\n restaurants_list.append(restaurants)\r\n date_time_list.append(date_time)\r\n print(orders_list)\r\n context={'ordered_zip':zip(orders_list,restaurants_list,price_list,date_time_list),'name':name1}\r\n return render(request,'temp_one/orders.html',context)\r\n # else:\r\n # return render(request,'temp_one/home.html')\r\n\r\ndef RestaurantView(request):\r\n if request.method==\"POST\":\r\n rest=request.POST.get('restaurant')\r\n user=request.POST.get('name')\r\n print(user)\r\n x=DishModel.objects.all()\r\n dish_list=[]\r\n price_list=[]\r\n for i in x:\r\n if rest in i.rest_available:\r\n dish=i.dish_name\r\n price=i.dish_price\r\n dish_list.append(dish)\r\n price_list.append(price)\r\n y=RestaurantModel.objects.get(rest_name=rest)\r\n address=y.rest_address\r\n context={'dish_n_price_list':zip(dish_list,price_list),'name':user,'restaurant':rest,'address':address}\r\n return render(request,'temp_two/restaurant.html',context)\r\n else:\r\n return redirect('home')\r\n@csrf_exempt\r\ndef PlaceOrderView(request):\r\n if request.method=='POST':\r\n dishes=request.POST.getlist('dish')\r\n name=request.POST.get('name')\r\n rest=request.POST.get('restaurant')\r\n price=0\r\n print(dishes)\r\n for i in dishes:\r\n x=DishModel.objects.filter(dish_name=i)\r\n for j in x:\r\n price+=j.dish_price\r\n dishes1=','.join(dishes)\r\n y=PlaceOrderModel(user_order=name,rest_placed=rest,dish_placed=dishes1,total_price=price)\r\n y.save()\r\n return redirect('home')\r\n else:\r\n return redirect('home')\r\n\r\ndef AllOrders(request):\r\n print(PlaceOrderModel.objects.all())\r\n return render(request,'temp_two/allorders.html',{'details':'x'})\r\ndef AddItem(request,name1):\r\n name1=name1.replace('_',' ')\r\n if request.method=='POST':\r\n item_name=request.POST.get('item_name')\r\n item_price=request.POST.get('item_price')\r\n try:\r\n x=DishModel.objects.filter(dish_name=item_name)\r\n print(x)\r\n n=''\r\n if x.exists():\r\n for i in x:\r\n if name1 in i.rest_available:\r\n n+='1'\r\n break\r\n else:\r\n n=''\r\n val=x.rest_available\r\n x.update(rest_available=val+','+name1)\r\n if n=='':\r\n return redirect('add-items')\r\n else:\r\n message='This item has already been added'\r\n return render(request,'temp_three/add_items.html',{'rest_name':name1,'message':message})\r\n else:\r\n x=DishModel(dish_name=item_name,dish_price=item_price,rest_available=name1)\r\n x.save()\r\n message='The item has been added'\r\n return render(request,'temp_three/add_items.html',{'rest_name':name1,'message':message})\r\n except:\r\n print('o')\r\n x=DishModel(dish_name=item_name,dish_price=item_price,rest_available=name1)\r\n x.save()\r\n message='The item has been added'\r\n return render(request,'temp_three/add_items.html',{'rest_name':name1,'message':message})\r\n else:\r\n return render(request,'temp_three/add_items.html',{'rest_name':name1})","sub_path":"proj_zomato/app_one/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"182374972","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nPrint the query log to standard out.\n\nUseful for optimizing database calls.\n\nInsipired by the method at: \n\"\"\"\n\nfrom django.conf import settings\nfrom django.db import connection\n\n\nclass QueryLogMiddleware:\n\n def process_response(self, request, response):\n if settings.DEBUG:\n queries = {}\n for query in connection.queries:\n sql = query['sql']\n queries.setdefault(sql, 0)\n queries[sql] += 1\n return response\n\n\n","sub_path":"schools/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"44274122","text":"#!/usr/bin/env python3\n# Copyright (c) 2018 Kevin Weiss, for HAW Hamburg \n#\n# This file is subject to the terms and conditions of the MIT License. See the\n# file LICENSE in the top level directory for more details.\n# SPDX-License-Identifier: MIT\n\"\"\"This module generates output files based on memory maps.\"\"\"\nfrom copy import deepcopy\nfrom logging import debug, info\n\n\ndef _insert_at_front(field_names, name):\n if name in field_names:\n field_names.insert(0, field_names.pop(field_names.index(name)))\n\n\ndef _find_unique_keys(records):\n info(\"Finding unique keys\")\n field_names = []\n for record in records:\n for field_name in record.keys():\n if field_name not in field_names:\n debug(\"Found %r\", field_name)\n field_names.append(field_name)\n field_names = sorted(field_names)\n # Ordered but starts with name, offset, total_size, description\n _insert_at_front(field_names, 'description')\n _insert_at_front(field_names, 'type')\n _insert_at_front(field_names, 'type_size')\n _insert_at_front(field_names, 'total_size')\n _insert_at_front(field_names, 'offset')\n _insert_at_front(field_names, 'name')\n\n return field_names\n\n\ndef _get_name(names):\n full_name = ''\n for name in names:\n if name.isdigit():\n full_name = '{}[{}].'.format(full_name[:-1], name)\n else:\n full_name += '{}.'.format(name)\n return full_name[:-1]\n\n\ndef parse_mem_map_to_csv(mem_map):\n \"\"\"Parses a memory map to a csv table string.\"\"\"\n info(\"Parsing memory map to csv string\")\n local_mem_map = deepcopy(mem_map)\n fields = _find_unique_keys(local_mem_map['records'])\n csv_str = ','.join(fields)\n for record in local_mem_map['records']:\n csv_str += '\\n'\n name = _get_name(record['name'])\n for field in fields:\n if field not in record:\n record[field] = ''\n if record[field] is None:\n record[field] = ''\n if field == 'name':\n csv_str += name\n else:\n csv_str += str(record[field]).replace(',', '\",\"')\n csv_str += ','\n csv_str = csv_str[:-1]\n return csv_str\n\n\ndef parse_mem_map_to_access_c(mem_maps):\n \"\"\"Parses access registers based on memory map to a .c string.\"\"\"\n a_str = \"#include \\\"app_access.h\\\"\\n\"\n for mem_map in deepcopy(mem_maps):\n a_str += \"\\nconst uint8_t %s_ACCESS[] = { \\n\" % mem_map['name'].upper()\n size = 0\n map_size = 0\n for record in mem_map['records']:\n debug(record)\n if 'bit_offset' not in record:\n size = record['type_size']\n if 'total_size' in record:\n size = record['total_size']\n for access_byte in range(size):\n if access_byte != 0:\n a_str += \", \"\n a_str += \"0x%02X\" % record[\"access\"]\n size += 1\n map_size += 1\n if record != mem_map['records'][-1]:\n a_str += \",\"\n a_str += \" /* {} */\\n\".format('_'.join(record[\"name\"]))\n a_str = a_str.rstrip(',')\n a_str += \"/* total size %d */\\n};\" % map_size\n return a_str\n","sub_path":"memory_map_manager/mm_output_parser.py","file_name":"mm_output_parser.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"474087521","text":"from functools import partial\n\nfrom .buildfile import BuildFile\nfrom .workspace import Workspace, find_workspace\nfrom .target import Target\n\n\ndef python_library(build: BuildFile, workspace: Workspace, name='', files=None):\n files = files if files is not None else []\n\n target = Target(name, workspace, build)\n target.setup_build()\n target.link_files(target.builddir, files)\n\n\ndef run(build: BuildFile, workspace: Workspace):\n build_locals = {\n 'python_library': partial(python_library, build, workspace)\n }\n build.execute(build_locals)\n print(build.target)\n\n\ndef main():\n workspace = find_workspace()\n run(workspace.builds[0], workspace)\n","sub_path":"forgebuild/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"222158979","text":"# coding:utf-8 \n'''\ncreated on 2018/2/28\n\n@author:Dxq\n'''\nimport numpy as np\nfrom aip import AipFace\nimport urllib.request\n\n\"\"\" 你的 APPID AK SK \"\"\"\nAPP_ID = '10365287'\nAPI_KEY = 'G7q4m36Yic1vpFCl5t46yH5K'\nSECRET_KEY = 'MneS2GDvPQ5QsGpVtSaHXGAlvwHu1XnC '\nclient = AipFace(APP_ID, API_KEY, SECRET_KEY)\n\n\n# 本地图片\ndef get_file_content(filePath):\n with open(filePath, 'rb') as fp:\n return fp.read()\n\n\n# 地址图片\ndef get_url_img(filePath):\n image_bytes = urllib.request.urlopen(filePath).read()\n return image_bytes\n\n\ndef landmark72_trans(points):\n num = len(points)\n data = np.zeros([num, 2])\n data[:, 0] = [p['x'] for p in points]\n data[:, 1] = [p['y'] for p in points]\n\n res = dict()\n res['chin'] = data[:13]\n res['left_eye'] = data[13:22]\n res['right_eye'] = data[30:39]\n res['left_brow'] = data[22:30]\n res['right_brow'] = data[39:47]\n res['nose'] = data[47:58]\n res['lip'] = data[58:72]\n\n return data, res\n\n\ndef get_baseInfo(full_path):\n options = {\n 'max_face_num': 1,\n # 'face_fields': \"age,beauty,expression,faceshape,gender,glasses,landmark,race,qualities\",\n 'face_fields': \"landmark,faceshape,gender,glasses,qualities\"\n }\n res = client.detect(get_file_content(full_path), options=options)\n assert res['result_num'] == 1\n result = res['result'][0]\n # assert result['face_probability'] > 0.8\n\n isPerson = result['qualities']['type']['human']\n isCartoon = result['qualities']['type']['cartoon']\n # assert isPerson > isCartoon\n\n landmark72_list, landmark72_dict = landmark72_trans(result['landmark72'])\n # gender = result['gender']\n # glasses = result['glasses']\n\n angle = result['rotation_angle']\n\n return landmark72_list, landmark72_dict, angle\n","sub_path":"eyelid_model/baidu.py","file_name":"baidu.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"294700445","text":"# -*- coding: utf-8 -*-\n# !@time: 2020/6/10 上午10:59\n# !@author: superMC @email: 18758266469@163.com\n# !@fileName: image_tool.py\nimport random\n\nimport cv2\n\nfrom self_utils.utils import colors\n\n\ndef crop_box(image, box):\n x1 = int(box[0])\n y1 = int(box[1])\n x2 = int(box[2])\n y2 = int(box[3])\n return image[y1:y2, x1:x2]\n\n\ndef change_coord(landmark_x, landmark_y, x0, y0):\n new_landmark_x = landmark_x - x0\n new_landmark_y = landmark_y - y0\n return new_landmark_x, new_landmark_y\n\n\n# box1 face box2 person\ndef person_face_cost(person_box, face_box):\n # print('iou box1:', box1)\n # print('iou box2:', box2)\n ix1 = max(person_box[0], face_box[0])\n ix2 = min(person_box[2], face_box[2])\n iy1 = max(person_box[1], face_box[1])\n iy2 = min(person_box[3], face_box[3])\n iw = max(0, (ix2 - ix1))\n ih = max(0, (iy2 - iy1))\n iarea = iw * ih\n area1 = (face_box[2] - face_box[0]) * (face_box[3] - face_box[1])\n return 1 - (iarea / area1)\n\n\ndef warp_affine(image, x1, y1, x2, y2, scale=1.0):\n eye_center = ((x1 + x2) / 2, (y1 + y2) / 2)\n\n dy = y2 - y1\n dx = x2 - x1\n # 计算旋转角度\n angle = cv2.fastAtan2(dy, dx)\n rot = cv2.getRotationMatrix2D(eye_center, angle, scale=scale)\n\n rot_img = cv2.warpAffine(image, rot, dsize=(image.shape[1], image.shape[0]))\n\n return rot_img\n\n\ndef plot_boxes(image, persons, fps=25):\n im_height, im_width, _ = image.shape\n scale = 0.6\n persons = sorted(persons, key=lambda x: x.id)\n for i in range(len(persons)):\n pBox = persons[i].pBox\n color = colors[persons[i].id]\n image = cv2.rectangle(image, (pBox[0], pBox[1]), (pBox[2], pBox[3]), color, 2)\n fBox = persons[i].fBox\n if fBox is not None:\n image = cv2.rectangle(image, (fBox[0], fBox[1]), (fBox[2], fBox[3]), color, 2)\n\n cv2.putText(image, str(persons[i].id) + \" \" + persons[i].name, (pBox[0] + 5, pBox[3] - 5),\n cv2.FONT_HERSHEY_SIMPLEX, scale, color)\n\n cv2.putText(image, str(persons[i].id) + \" \" + persons[i].name + \" \" + str(int(persons[i].fps_num / fps)),\n (5, im_height - 20 * (i + 1)), cv2.FONT_HERSHEY_SIMPLEX, scale, color)\n return image\n","sub_path":"self_utils/image_tool.py","file_name":"image_tool.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"55144884","text":"# credit source: https://www.raspberrypi.org/forums/viewtopic.php?t=196010\n#########################################################\n#MQTT\nimport paho.mqtt.client as mqtt\nimport paho.mqtt.publish as publish\n\n\n\n#########################################################\n#MQTT\ndef connectionStatus(client, userdata, flags, rc):\n mqttClient.subscribe(\"connect\")\n\ndef messageDecoder(client, userdata, msg):\n message = msg.payload.decode(encoding='UTF-8')\n if message == \"get brightness\":\n mqttClient.publish(\"to iOS\", brightness)\n print(\"Brightness sent to iOS device!\")\n if message == \"get activity\":\n mqttClient.publish(\"to iOS\", label)\n print(\"Activity sent to iOS device!\")\n else:\n print(\"Unknown message!\")\n\n\nf = open(\"result.txt\", \"r\")\ntemp = f.readline().split(\" \")\nlabel = temp[0]\nbrightness = temp[1]\nprint(label)\nprint(brightness)\nf.close()\n\n\n#Instantiate Eclipse Paho as mqttClient\nmqttClient = mqtt.Client(\"RPI\")\n\n#Set calling function functions to mqttClient\nmqttClient.on_connect = connectionStatus\nmqttClient.on_message = messageDecoder\n\n\n#Connect client to server\nmqttClient.connect(\"192.168.1.3\")\n\n\n#Monitor client activity forever\nmqttClient.loop_forever()\n","sub_path":"cnn/mqtt.py","file_name":"mqtt.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"494568387","text":"from flask import Flask, redirect, url_for, request, abort\nimport json\nimport uuid\n\napp = Flask(__name__)\n\naliens = []\n\n\n@app.route('/api/alien', methods=['POST'])\ndef create_alien():\n req = json.loads(to_not_shit(request.data.decode(\"utf-8\")))\n if not all_exist(req['friends']):\n abort(400)\n id = str(uuid.uuid4())\n req['id'] = id\n aliens.append(req)\n\n return to_shit(json.dumps(req, sort_keys=True, indent=4))\n\n\n@app.route('/api/alien/name/', methods=['GET'])\ndef get_alien(id):\n for a in aliens:\n if a['id'] == id:\n return to_shit(json.dumps(a, sort_keys=True, indent=4))\n abort(400)\n\n\n@app.route('/api/alien', methods=['GET'])\ndef get_all_alien():\n return to_shit(json.dumps({'aliens': aliens}, sort_keys=True, indent=4))\n\n\n@app.route('/api/alien/name/', methods=['DELETE'])\ndef delete_alien(id):\n for a in aliens:\n if a['id'] == id:\n aliens.remove(a)\n return (\"\", 200)\n\n abort(400)\n\n\n@app.route('/api/alien/name/', methods=['PUT'])\ndef edit_alien(id):\n for a in aliens:\n if a['id'] == id:\n aliens.remove(a)\n na = json.loads(to_not_shit(request.data.decode(\"utf-8\")))\n na['id'] = id\n aliens.append(na)\n return (\"\", 200)\n\n abort(400)\n\n\ndef to_not_shit(json_fucke: str):\n return json_fucke.replace(\"^\", \"{\").replace(\"&\", \"}\").replace(\"/*\", \"[\").replace(\"*/\", \"]\").replace(\"'\", \"\\\"\")\n\n\ndef to_shit(json: str):\n return json.replace(\"{\", \"^\").replace(\"}\", \"&\").replace(\"[\", \"/*\").replace(\"]\", \"*/\").replace(\"\\\"\", \"'\")\n\n\ndef all_exist(friends):\n for f in friends:\n if not exist(f):\n return False\n\n return True\n\n\ndef exist(name):\n for a in aliens:\n if a['name'] == name:\n return True\n\n return False\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"Numero1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"331812859","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 22/11/17\n\n@author: Maurizio Ferrari Dacrema\n\"\"\"\n\nfrom Recommender_import_list import *\nfrom Conferences.SIGIR.CMN_our_interface.CMN_RecommenderWrapper import CMN_RecommenderWrapper\n\n\nfrom ParameterTuning.run_parameter_search import runParameterSearch_Collaborative\n\nimport os, traceback, argparse\nfrom functools import partial\nimport numpy as np\n\n\nfrom ParameterTuning.SearchSingleCase import SearchSingleCase\nfrom ParameterTuning.SearchAbstractClass import SearchInputRecommenderArgs\n\nfrom Utils.ResultFolderLoader import ResultFolderLoader, generate_latex_hyperparameters\nfrom Utils.assertions_on_data_for_experiments import assert_implicit_data, assert_disjoint_matrices\nfrom Utils.plot_popularity import plot_popularity_bias, save_popularity_statistics\n\n\ndef read_data_split_and_search(dataset_name,\n flag_baselines_tune = False,\n flag_DL_article_default = False, flag_DL_tune = False,\n flag_print_results = False):\n\n from Conferences.SIGIR.CMN_our_interface.CiteULike.CiteULikeReader import CiteULikeReader\n\n\n result_folder_path = \"result_experiments/{}/{}_{}/\".format(CONFERENCE_NAME, ALGORITHM_NAME, dataset_name)\n\n if dataset_name == \"citeulike\":\n dataset = CiteULikeReader(result_folder_path)\n\n elif dataset_name == \"epinions\":\n dataset = EpinionsReader(result_folder_path)\n\n elif dataset_name == \"pinterest\":\n dataset = PinterestICCVReader(result_folder_path)\n\n\n URM_train = dataset.URM_DICT[\"URM_train\"].copy()\n URM_validation = dataset.URM_DICT[\"URM_validation\"].copy()\n URM_test = dataset.URM_DICT[\"URM_test\"].copy()\n URM_test_negative = dataset.URM_DICT[\"URM_test_negative\"].copy()\n\n\n # Ensure IMPLICIT data and DISJOINT sets\n assert_implicit_data([URM_train, URM_validation, URM_test, URM_test_negative])\n\n\n if dataset_name == \"citeulike\":\n assert_disjoint_matrices([URM_train, URM_validation, URM_test])\n assert_disjoint_matrices([URM_test, URM_test_negative])\n\n elif dataset_name == \"pinterest\":\n assert_disjoint_matrices([URM_train, URM_validation, URM_test])\n assert_disjoint_matrices([URM_train, URM_validation, URM_test_negative])\n\n else:\n assert_disjoint_matrices([URM_train, URM_validation, URM_test, URM_test_negative])\n\n\n # If directory does not exist, create\n if not os.path.exists(result_folder_path):\n os.makedirs(result_folder_path)\n\n\n collaborative_algorithm_list = [\n ItemKNNCFRecommender,\n \n ]\n\n metric_to_optimize = \"HIT_RATE\"\n n_cases = 50\n n_random_starts = 15\n\n\n\n\n\n algorithm_dataset_string = \"{}_{}_\".format(ALGORITHM_NAME, dataset_name)\n\n plot_popularity_bias([URM_train + URM_validation, URM_test],\n [\"Training data\", \"Test data\"],\n result_folder_path + algorithm_dataset_string + \"popularity_plot\")\n\n save_popularity_statistics([URM_train + URM_validation + URM_test, URM_train + URM_validation, URM_test],\n [\"Full data\", \"Training data\", \"Test data\"],\n result_folder_path + algorithm_dataset_string + \"popularity_statistics\")\n\n\n\n from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample\n\n evaluator_validation = EvaluatorNegativeItemSample(URM_validation, URM_test_negative, cutoff_list=[5])\n evaluator_test = EvaluatorNegativeItemSample(URM_test, URM_test_negative, cutoff_list=[5, 10])\n\n\n\n runParameterSearch_Collaborative_partial = partial(runParameterSearch_Collaborative,\n URM_train = URM_train,\n URM_train_last_test = URM_train + URM_validation,\n metric_to_optimize = metric_to_optimize,\n evaluator_validation_earlystopping = evaluator_validation,\n evaluator_validation = evaluator_validation,\n evaluator_test = evaluator_test,\n output_folder_path = result_folder_path,\n parallelizeKNN = False,\n allow_weighting = True,\n resume_from_saved = True,\n n_cases = n_cases,\n n_random_starts = n_random_starts)\n\n\n\n\n\n if flag_baselines_tune:\n\n for recommender_class in collaborative_algorithm_list:\n try:\n runParameterSearch_Collaborative_partial(recommender_class)\n except Exception as e:\n print(\"On recommender {} Exception {}\".format(recommender_class, str(e)))\n traceback.print_exc()\n\n\n ################################################################################################\n ######\n ###### DL ALGORITHM\n ######\n\n if flag_DL_article_default:\n\n try:\n\n CMN_article_hyperparameters = {\n \"epochs\": 30,\n \"epochs_gmf\": 30,\n \"hops\": 2,\n \"neg_samples\": 4,\n \"reg_l2_cmn\": 1e-1,\n \"reg_l2_gmf\": 1e-4,\n \"pretrain\": True,\n \"learning_rate\": 1e-3,\n \"verbose\": False,\n }\n\n if dataset_name == \"citeulike\":\n CMN_article_hyperparameters[\"batch_size\"] = 128\n CMN_article_hyperparameters[\"embed_size\"] = 50\n\n elif dataset_name == \"epinions\":\n CMN_article_hyperparameters[\"batch_size\"] = 128\n CMN_article_hyperparameters[\"embed_size\"] = 40\n\n elif dataset_name == \"pinterest\":\n CMN_article_hyperparameters[\"batch_size\"] = 256\n CMN_article_hyperparameters[\"embed_size\"] = 50\n\n\n\n CMN_earlystopping_hyperparameters = {\n \"validation_every_n\": 5,\n \"stop_on_validation\": True,\n \"evaluator_object\": evaluator_validation,\n \"lower_validations_allowed\": 5,\n \"validation_metric\": metric_to_optimize\n }\n\n\n parameterSearch = SearchSingleCase(CMN_RecommenderWrapper,\n evaluator_validation=evaluator_validation,\n evaluator_test=evaluator_test)\n\n recommender_input_args = SearchInputRecommenderArgs(\n CONSTRUCTOR_POSITIONAL_ARGS = [URM_train],\n FIT_KEYWORD_ARGS = CMN_earlystopping_hyperparameters)\n\n recommender_input_args_last_test = recommender_input_args.copy()\n recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[0] = URM_train + URM_validation\n\n parameterSearch.search(recommender_input_args,\n recommender_input_args_last_test = recommender_input_args_last_test,\n fit_hyperparameters_values=CMN_article_hyperparameters,\n output_folder_path = result_folder_path,\n resume_from_saved = True,\n output_file_name_root = CMN_RecommenderWrapper.RECOMMENDER_NAME)\n\n\n\n except Exception as e:\n\n print(\"On recommender {} Exception {}\".format(CMN_RecommenderWrapper, str(e)))\n traceback.print_exc()\n\n\n\n ################################################################################################\n ######\n ###### PRINT RESULTS\n ######\n\n if flag_print_results:\n\n n_test_users = np.sum(np.ediff1d(URM_test.indptr)>=1)\n file_name = \"{}..//{}_{}_\".format(result_folder_path, ALGORITHM_NAME, dataset_name)\n\n result_loader = ResultFolderLoader(result_folder_path,\n base_algorithm_list = None,\n other_algorithm_list = [CMN_RecommenderWrapper],\n KNN_similarity_list = KNN_similarity_to_report_list,\n ICM_names_list = None,\n UCM_names_list = None)\n\n\n result_loader.generate_latex_results(file_name + \"{}_latex_results.txt\".format(\"article_metrics\"),\n metrics_list = [\"HIT_RATE\", \"NDCG\"],\n cutoffs_list = [5, 10],\n table_title = None,\n highlight_best = True)\n\n result_loader.generate_latex_results(file_name + \"{}_latex_results.txt\".format(\"all_metrics\"),\n metrics_list = [\"PRECISION\", \"RECALL\", \"MAP\", \"MRR\", \"NDCG\", \"F1\", \"HIT_RATE\", \"ARHR\",\n \"NOVELTY\", \"DIVERSITY_MEAN_INTER_LIST\", \"DIVERSITY_HERFINDAHL\", \"COVERAGE_ITEM\", \"DIVERSITY_GINI\", \"SHANNON_ENTROPY\"],\n cutoffs_list = [10],\n table_title = None,\n highlight_best = True)\n\n result_loader.generate_latex_time_statistics(file_name + \"{}_latex_results.txt\".format(\"time\"),\n n_evaluation_users=n_test_users,\n table_title = None)\n\n\n\n\n\nif __name__ == '__main__':\n\n ALGORITHM_NAME = \"CMN\"\n CONFERENCE_NAME = \"SIGIR\"\n\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-b', '--baseline_tune', help=\"Baseline hyperparameter search\", type = bool, default = False)\n parser.add_argument('-a', '--DL_article_default', help=\"Train the DL model with article hyperparameters\", type = bool, default = False)\n parser.add_argument('-p', '--print_results', help=\"Print results\", type = bool, default = True)\n\n\n input_flags = parser.parse_args()\n print(input_flags)\n\n KNN_similarity_to_report_list = [\"cosine\"]\n\n\n dataset_list = [\"citeulike\"]\n\n for dataset_name in dataset_list:\n\n read_data_split_and_search(dataset_name,\n flag_baselines_tune=input_flags.baseline_tune,\n flag_DL_article_default= input_flags.DL_article_default,\n flag_print_results = input_flags.print_results,\n )\n\n\n\n\n if input_flags.print_results:\n generate_latex_hyperparameters(result_folder_path =\"result_experiments/{}/\".format(CONFERENCE_NAME),\n algorithm_name= ALGORITHM_NAME,\n experiment_subfolder_list = dataset_list,\n other_algorithm_list = [CMN_RecommenderWrapper],\n KNN_similarity_to_report_list = KNN_similarity_to_report_list,\n split_per_algorithm_type = True)\n","sub_path":"run_SIGIR_18_CMN.py","file_name":"run_SIGIR_18_CMN.py","file_ext":"py","file_size_in_byte":11454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"633732886","text":"from sklearn import metrics\nimport numpy as np\nimport os\nfrom datetime import datetime\nimport tensorflow as tf\n\n\ndef calculate_auc(y_true, y_pred):\n return metrics.roc_auc_score(y_true, y_pred, average='macro')\n\n\ndef calculate_ap(y_true, y_pred):\n return metrics.average_precision_score(y_true, y_pred, average='macro')\n\n\ndef calculate_accuracy(y_true, y_pred):\n threshold_predict = (np.sign(y_pred - 0.5) + 1) / 2\n return np.sum(threshold_predict == y_true) / len(y_true)\n\n\ndef standard_scale(x, mean, std):\n return (x - mean) / std\n\n\ndef create_folder(folder):\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n\ndef describe_model(model):\n \"\"\"\n 描述keras模型的结构\n :param model:keras model\n :return:\n \"\"\"\n\n description = 'Model layers / shapes / parameters:\\n'\n total_params = 0\n\n for layer in model.layers:\n layer_params = layer.count_params()\n description += '- {}'.format(layer.name).ljust(20)\n description += '{}'.format(layer.input_shape).ljust(20)\n description += '{0:,}'.format(layer_params).rjust(12)\n description += '\\n'\n total_params += layer_params\n\n description += 'Total:'.ljust(30)\n description += '{0:,}'.format(total_params).rjust(22)\n\n print(description)\n\n\ndef compute_time_consumed(start_time):\n \"\"\"\n 计算训练总耗时\n :param start_time:\n :return:\n \"\"\"\n time_elapsed = datetime.now() - start_time\n seconds = time_elapsed.seconds\n hour = seconds // 3600\n minute = (seconds % 3600) // 60\n second = seconds % 3600 % 60\n print(\"本次训练共耗时 {0} 时 {1} 分 {2} 秒\".format(hour, minute, second))\n\n\ndef finish_instance():\n import os\n os.system('sh /data/stop.sh')\n\n\ndef mix_data(x, y, batch_size, alpha=1.0):\n \"\"\" mixup data augmentation\"\"\"\n lam = np.random.beta(alpha, alpha)\n x = x.numpy()\n y = y.numpy()\n\n index = np.random.permutation(batch_size)\n\n mixed_x = tf.convert_to_tensor(lam * x + (1 - lam) * x[index, ...])\n y_a, y_b = tf.convert_to_tensor(y), tf.convert_to_tensor(y[index, :])\n return mixed_x, y_a, y_b, lam\n\n\ndef mix_data_generator(x, y, batch_size, alpha=0.2):\n \"\"\" mixup data augmentation\"\"\"\n lam = np.random.beta(alpha, alpha, batch_size)\n x = x.numpy()\n y = y.numpy()\n epochs = int(x) // batch_size\n for _ in epochs:\n index = np.random.permutation(batch_size)\n\n mixed_x = tf.convert_to_tensor(lam * x + (1 - lam) * x[index, ...])\n mixed_y = tf.convert_to_tensor(lam * y + (1 - lam) * y[index, ...])\n yield mixed_x, mixed_y\n","sub_path":"src/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"88307023","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 24 13:55:48 2020\r\n\r\n@author: Grant\r\n\"\"\"\r\n\r\nimport math\r\nimport numpy\r\n\r\nX = [[1, 3, 5, 6], [2, 6, 8, 1], [3, 1, 6, 1]]\r\nY = [61, 59, 40]\r\n\r\nn = 3\r\nn_parameters = 4\r\nL = 1\r\n\r\ndef gaussian_kernel(x_i, x_j, sigma):\r\n answer = math.exp(-(abs(x_i - x_j))**2 / (2 * sigma**2))\r\n \r\n return answer\r\n\r\nK = []\r\nfor i in range(n):\r\n temp_k = []\r\n for k in range(n):\r\n summation = 0\r\n for j in range(n_parameters):\r\n summation += gaussian_kernel(X[i][j], X[k][j], 1)\r\n temp_k.append(summation)\r\n \r\n K.append(temp_k)\r\n\r\ntemp_I = [n*L, 0, 0], [0, n*L, 0], [0, 0, n*L]\r\nI = numpy.array(temp_I)\r\n\r\nmatrix_x = numpy.array(X)\r\nmatrix_y = numpy.array(Y)\r\nmatrix_k = numpy.array(K)\r\n\r\nmatrix_k = numpy.linalg.inv(numpy.add(matrix_k, I)) \r\n\r\nalpha = numpy.dot(matrix_k, Y)\r\n\r\nprint(numpy.dot(alpha, matrix_x))","sub_path":"Kernalized Ridge Regression.py","file_name":"Kernalized Ridge Regression.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"52182712","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\n\nfrom setuptools import setup, find_packages\n\nHERE = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(HERE, 'README.md')) as f:\n README = f.read()\n\nsetup(\n name='scan-to-paperless',\n version='0.11.0',\n description='Tool to scan and process documents to palerless',\n long_description=README,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n ],\n author='Stéphane Brunner',\n author_email='stephane.brunner@gmail.com',\n url='https://hub.docker.com/r/sbrunner/scan-to-paperless/',\n packages=find_packages(exclude=['tests.*']),\n install_requires=['argcomplete', 'pyyaml', 'scikit-image'],\n entry_points={\n 'console_scripts': [\n 'scan = scan_to_paperless.scan:main',\n 'scan-process-status = scan_to_paperless.scan_process_status:main',\n ],\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"501383000","text":"# https://www.twilio.com/docs/sms/quickstart/python#send-an-outbound-sms-with-python\n# Download the helper library from https://www.twilio.com/docs/python/install\nfrom twilio.rest import Client\n\n\n# Your Account Sid and Auth Token from twilio.com/console\n# DANGER! This is insecure. See http://twil.io/secure\naccount_sid = 'twilio_account_si'\nauth_token = 'twilio_auth_token'\nclient = Client(account_sid, auth_token)\n\nmessage = client.messages \\\n .create(\n body=\"Join Earth's mightiest heroes. Like Kevin Bacon.\",\n from_='+00000000000',\n to='+00000000000'\n )\n\nprint(message.sid)\n\n","sub_path":"sms/send_sms.py","file_name":"send_sms.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"247464726","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 2 23:25:32 2016\n\n@author: sriram\n\"\"\"\n\n\n\nfrom fancyimpute import KNN\nfrom fancyimpute import MICE\nimport pandas as pd\nimport numpy as np\n\nfeatureFile=\"/Users/sriram/Documents/Study/MachineLearning/Projects/Walmart/Data/csv/features.csv\"\ntrainFile=\"/Users/sriram/Documents/Study/MachineLearning/Projects/Walmart/Data/csv/train.csv\"\nfeatureDF=pd.read_csv(featureFile)\ntrainDF=pd.read_csv(trainFile)\nfeatureDF['TS']=pd.to_datetime(featureDF['Date'])\nfeatureDF['year']=featureDF['TS'].dt.year\nfeatureDF['month']=featureDF['TS'].dt.month\nfeatureDF['day']=featureDF['TS'].dt.day\nfeatureDF['week']=(featureDF['TS'].dt.week%4)\nfeatureDF.ix[featureDF.week == 0 ,'week']=4\nWeeklySales=trainDF.groupby(['Store','Date'])[['Store','Date','Weekly_Sales']].sum()\nWeeklySales=WeeklySales.reset_index(drop=True)\nfeatureDF['Weekly_Sales'] = WeeklySales['Weekly_Sales']\nfeatureDF['WeeklySalesMarkDown1Ratio']=featureDF['MarkDown1']/featureDF['Weekly_Sales']\n\ntrainData=featureDF[['Store','month','week','Weekly_Sales','MarkDown1']]\n\ntrainData=trainData.sort(['Store','month','week']).reset_index(drop=True)\nX_Incomplete=trainData.as_matrix()\nmice = MICE(n_imputations=100, impute_type='pmm')\nXY_completed = mice.complete(X_Incomplete)\n\nX_filled_knn = KNN(k=45).complete(X_Incomplete)\n\n\n\ndef findWeekcount(df):\n featureDF['TS']=pd.to_datetime(featureDF['Date'])\n featureDF['year']=featureDF['TS'].dt.year\n featureDF['month']=featureDF['TS'].dt.month\n featureDF['day']=featureDF['TS'].dt.day \n\nfeatureDF_clean=featureDF[['Store','Unemployment']]\nfeatureDF_clean=featureDF_clean.sort(['Store','Unemployment'])\nfeatureDF_clean_5=featureDF_clean[featureDF_clean['Store'].isin([5,6])]\nX_Incomplete=featureDF_clean.as_matrix()\nmice = MICE(n_imputations=100, impute_type='pmm')\nXY_completed = mice.complete(X_Incomplete)\npd.DataFrame(XY_completed).to_csv('/Users/sriram/Documents/Study/MachineLearning/Projects/Walmart/Data/UnEmployment.csv')\nfeatureDF_clean.groupby(['Store','year','month','day','Unemployment']).agg(applyKNN)\ndef applyMICE(df):\n X_Incomplete=df.as_matrix()\n mice = MICE(n_imputations=100, impute_type='pmm')\n XY_completed = mice.complete(X_Incomplete)\n return XY_completed\n\nStartIndex=0\nPrev_Store=0\nrow_index=0\nfor index, row in featureDF_clean_5.iterrows():\n Curr_Store=row['Store']\n if (Prev_Store != 0 ) and ( Prev_Store != Curr_Store):\n XY_completed_5=applyMICE(featureDF_clean_5[StartIndex:row_index]) \n StartIndex=row_index\n Prev_Store=Curr_Store\n row_index = row_index + 1\n\n \n\ndef applyKNN(featureDF_clean):\n X_Incomplete=featureDF_clean.as_matrix()\n X_filled_knn = KNN(k=20).complete(X_Incomplete)\n featureDF_Complete = pd.DataFrame(X_filled_knn)\n featureDF_clean['CorrectedUnemployment']=featureDF_Complete[4]\n return \n\nfeatureDF_clean.to_csv('/Users/sriram/Documents/Study/MachineLearning/Projects/Walmart/Data/SampleOut.csv')\n\n\nimport io\nimport numpy as np\nimport pandas as pd\n\ncontent = '''Trader Buy/Sell\n A 1\n A 0\n B 1\n B 1\n B 0\n C 1\n C 0\n C 0'''\n\ndf = pd.read_table(io.BytesIO(content), sep='\\s+')\ndef categorize(x):\n m = x.mean()\n return 1 if m > 0.5 else 0 if m < 0.5 else np.nan\nresult = featureDF_clean.groupby(['Store','Store','month','day','Unemployment']).agg([categorize])\nresult = result.rename(columns={'categorize' : 'Store'})\nprint(result)\n\ntest = np.array([[1, 2,4], [3, 4,0], [5, 6,9]])\ntest[:,[0,2]]\n\n\nfrom sklearn import linear_model\nfrom sklearn.linear_model import ARDRegression\n\nX = [[1, 2010], [1, 2011], [1, 2012], [1, 2013], [1, 2014], [1, 2015]]\n\nm = [[1, 2010, 1], [1, 2010, 2 ], [1, 2010, 3], [1, 2010, 4], [1, 2010, 5], [1, 2010, 6]]\n\nx = [[1, 1], [1, 2], [1, 3], [1, 4], [1, 5], [1, 6], [1, 7], [1, 8]]\n\nY = [0., 1., 2., 3., 4.5, 5.0, 5.5, 6.5]\nreg = linear_model.BayesianRidge()\nlinear = linear_model.LinearRegression()\nreg1 = linear_model.BayesianRidge()\n\nard=ARDRegression(compute_score=True)\nard.fit(x, Y)\nlinear.fit(x,Y)\n\nard.predict([[1, 8],[1, 9],[1, 10],[1, 11]])\nlinear.predict([[1, 8],[1, 9],[1, 10],[1, 11]])\n\n\nreg.fit(X, Y)\nreg1.fit(x, Y)\nreg1.predict ([[1, 2010, 7]])\nreg1.predict ([[1, 8],[1, 9],[1, 10],[1, 11]])\n\n","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"283725682","text":"# coding=utf-8\nimport os;\nimport sys\nimport threading\nimport time\nfrom SimulatedDriver import *;\nfrom RealDriver import *;\n\nclass Sprinkler(threading.Thread):\n\n nbBallSpins = 0.0\n usedWater = 0\n spinsByLiter = 450\n neededWater = 0\n currentGpio = 0\n driver = None\n\n def __init__(self, neededWater, currentGpioNumber, driver):\n threading.Thread.__init__(self)\n self.neededWater = neededWater\n self.currentGpioNumber = currentGpioNumber\n self._stopevent = threading.Event( )\n self.driver = driver\n self.driver.raspberry.gpios[self.currentGpioNumber].setValue(1)\n\n def run(self):\n actualValue = 0\n activation = 0\n while (not self._stopevent.isSet()) and (self.usedWater < self.neededWater):\n ballValue = self.driver.getGPIO(3)\n if actualValue != ballValue:\n if actualValue == 0:\n activation = 1\n actualValue = ballValue\n #print activation\n if activation == 1:\n self.nbBallSpins = self.nbBallSpins + 1\n self.usedWater = self.nbBallSpins / self.spinsByLiter\n activation = 0\n if self.usedWater >= self.neededWater:\n print(\"Arrosage fini\")\n self.driver.raspberry.gpios[self.currentGpioNumber].setValue(0)\n self.stop()\n \n def stop(self):\n self._stopevent.set( )","sub_path":"cgi-bin/Sprinkler.py","file_name":"Sprinkler.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"642340394","text":"import tensorflow as tf\nfrom tensorflow.contrib.tensor_forest.python import tensor_forest\nfrom tensorflow.python.ops import resources\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\nimport re\nimport matplotlib.pyplot as plt \nimport numpy as np\n# Ignore all GPUs, tf random forest does not benefit from it.\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n\nwith open('/home/daanvir/gg/project/SRM_project/x.csv') as f:\n shape = tuple(int(num) for num in re.findall(r'\\d+', f.readline()))\nX = np.loadtxt('/home/daanvir/gg/project/SRM_project/x.csv').reshape(1024,48)\n\nY = np.genfromtxt(\n r'/home/daanvir/gg/project/SRM_project/y.csv', delimiter=',')\nX, Y = shuffle(X, Y, random_state=1)\ntrain_x, test_x, train_y, test_y = train_test_split(\n X, Y, test_size=0.2, random_state=415)\n\nx=[]\ny=[]\n# Import MNIST data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=False)\n\n# Parameters\nnum_steps = 50 # Total steps to train\nbatch_size = 1024 # The number of samples per batch\nnum_classes = 8 # The 10 digits\nnum_features = 48# Each image is 28x28 pixels\nnum_trees = 10\nmax_nodes = 1000\n\n# Input and Target data\nX = tf.placeholder(tf.float32, shape=[None, num_features])\n# For random forest, labels must be integers (the class id)\nY = tf.placeholder(tf.int32, shape=[None])\n\n# Random Forest Parameters\nhparams = tensor_forest.ForestHParams(num_classes=num_classes,\n num_features=num_features,\n num_trees=num_trees,\n max_nodes=max_nodes).fill()\n\n# Build the Random Forest\nforest_graph = tensor_forest.RandomForestGraphs(hparams)\n# Get training graph and loss\ntrain_op = forest_graph.training_graph(X, Y)\nloss_op = forest_graph.training_loss(X, Y)\n\n# Measure the accuracy\ninfer_op, _, _ = forest_graph.inference_graph(X)\ncorrect_prediction = tf.equal(tf.argmax(infer_op, 1), tf.cast(Y, tf.int64))\naccuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n# Initialize the variables (i.e. assign their default value) and forest resources\ninit_vars = tf.group(tf.global_variables_initializer(),\n resources.initialize_resources(resources.shared_resources()))\n\nsaver=tf.train.Saver()\n\n# Start TensorFlow session\nsess = tf.Session()\n\n# Run the initializer\nsess.run(init_vars)\n\n# Training\nfor i in range(1, num_steps + 1):\n # Prepare Data\n # Get the next batch of MNIST data (only images are needed, not labels)\n \n _, l = sess.run([train_op, loss_op], feed_dict={X: train_x, Y: train_y})\n if i % 2 == 0 or i == 1:\n x.append(i)\n acc = sess.run(accuracy_op, feed_dict={X: train_x, Y: train_y})\n y.append(acc)\n print('Step %i, Loss: %f, Acc: %f' % (i, l, acc))\nplt.plot(x,y)\nplt.xlabel('epochs')\nplt.ylabel('accuracy')\nplt.show()\nsave_path=saver.save(sess,\"/home/daanvir/gg/project/SRM_project/motion_ml/results/model.ckpt\")\n# Test Model\n\nprint(\"Test Accuracy:\", sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y}))","sub_path":"models/random_forest.py","file_name":"random_forest.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"472813570","text":"import random\nimport piskvorky\n\ndef tah_pocitace (herni_pole):\n\twhile True:\n\t\tpozice = random.randrange (0,19)\n\t\tprint(\"počítač si vybírá pozici\", pozice)\n\t\tif herni_pole[pozice] != \"-\":\n\t\t\tprint (\"Políčko číslo \", pozice, \"je obsazené, zkus jiné\")\n\t\telse:\n\t\t\treturn piskvorky.tah(herni_pole, \"o\", pozice)\n","sub_path":"ai.py","file_name":"ai.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"562921150","text":"import cv2\nimport utils\n\n#Import the stuff we need\n#pip install influxdb \nfrom influxdb import InfluxDBClient\nfrom datetime import datetime\nimport pytz\n\n#Setup database\nclient = InfluxDBClient('localhost', 8086, 'admin', 'admin', 'electric_meter6')\nclient.create_database('electric_meter6') #This line not used as only one database needed\nprint(client.get_list_database())\n# client.switch_database('electric_2nd_meter') #This line not used as only one database needed\n\n#Setup Payload\njson_payload = []\n\ninitial_kwh = 16000\ntotal_bill = 3\n\ndata = {\n \"measurement\": \"electric_bill\",\n \"tags\": {\n \"unit\": 'RM'\n },\n \"time\": datetime.now(pytz.timezone('Asia/Singapore')),\n \"fields\": {\n 'value': float(total_bill),\n }\n}\njson_payload.append(data)\n\n#Send our payload\nclient.write_points(json_payload)\n\n# define region of interest (exclude unit)\n(x_min,y_min) = (120,210)\n(x_max,y_max) = (440,280)\n\n# define region of interest of unit\n(unit_x_min,unit_y_min) = (434,220)\n(unit_x_max,unit_y_max) = (540,270)\n\ni = 0\n\ncap= cv2.VideoCapture(\"output2.mp4\")\n# in case of live streaming use cap= cv2.VideoCapture(0)\n\n# Check if camera opened successfully\nif (cap.isOpened()== False):\n print(\"Error opening video stream or file\")\n\n\nwhile (cap.isOpened()):\n ret,frame= cap.read()\n \n i = i+1\n \n if (i % 100 == 0):\n \n # crop image, only process roi\n roi = frame[y_min:y_max,x_min:x_max]\n unit_roi = frame[unit_y_min:unit_y_max,unit_x_min:unit_x_max]\n\n unit_roi= utils.preprocessing_unit(unit_roi)\n unit_text,unit_roi,unit_detection = utils.recognise_text(unit_roi,'eng')\n \n try:\n \n if unit_text == 'kvarh' or unit_text == 'kVarh':\n \n roi = utils.preprocessing(roi)\n text,roi,detection = utils.recognise_text(roi,'ssd_alphanum_plus')\n text = int(text)\n \n if text > 88888888: #ignore if reading more than possible maximum reading(88888888)\n i = i-1\n continue\n \n if text > 10000:\n text = text[4:7]\n \n print(\"meter reading = \",text,unit_text)\n \n data = {\n \"measurement\": \"electric_consumption\",\n \"tags\": {\n \"unit\": 'kVarh'\n },\n \"time\": datetime.now(pytz.timezone('Asia/Singapore')),\n \"fields\": {\n 'value': float(text),\n }\n }\n json_payload.append(data)\n \n #Send our payload\n client.write_points(json_payload)\n \n \n if unit_text == 'kWh' or unit_text == 'kwh':\n \n roi = utils.preprocessing(roi)\n text,roi,detection = utils.recognise_text(roi,'ssd_alphanum_plus')\n text = int(text)\n \n if text > 88888888: #ignore if reading more than possible maximum reading(88888888)\n i = i-1\n continue\n \n if text > 100000: \n text = text[3:7]\n \n print(\"meter reading = \",text,unit_text)\n \n data = {\n \"measurement\": \"electric_consumption\",\n \"tags\": {\n \"unit\": 'kWh'\n },\n \"time\": datetime.now(pytz.timezone('Asia/Singapore')),\n \"fields\": {\n 'value': float(text),\n }\n }\n json_payload.append(data)\n \n #Send our payload\n client.write_points(json_payload)\n \n diff_kwh = text - initial_kwh\n total_bill = utils.calculate_electric_bill(diff_kwh)\n \n data = {\n \"measurement\": \"electric_bill\",\n \"tags\": {\n \"unit\": 'RM'\n },\n \"time\": datetime.now(pytz.timezone('Asia/Singapore')),\n \"fields\": {\n 'value': float(total_bill),\n }\n }\n json_payload.append(data)\n \n #Send our payload\n client.write_points(json_payload)\n \n \n except:\n i = i-1\n continue\n \n if ret == True:\n cv2.imshow('frame', frame)\n cv2.imshow('roi', roi)\n cv2.imshow('unit_roi', unit_roi)\n# \n# # function calling\n# img_h_resize = utils.hconcat_resize([frame,roi,unit_roi])\n# \n# # show the Output image\n# cv2.imshow('hconcat_resize.jpg', img_h_resize)\n \n \n if i > 10000:\n i = 0\n\n if cv2.waitKey(1) & 0xFF == 27: #Press Esc to quit program\n break\n \n\ncap.release()\ncv2.destroyAllWindows()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"208072576","text":"import numpy as np\r\nimport tensorflow as tf\r\nfrom pointnetvlad_cls import *\r\nfrom loading_pointclouds import *\r\n\r\n\r\n#Global Variable \r\nBATCH_NUM_QUERIES = 2\r\nPOSITIVES_PER_QUERY = 2\r\nNEGATIVES_PER_QUERY = 18\r\nEPOCH = 100\r\nNUM_POINTS = 4096\r\nGPU_INDEX = 1\r\nTRAIN_FILE = 'generating_queries/training_queries_baseline.pickle'\r\nTEST_FILE = 'generating_queries/test_queries_baseline.pickle'\r\nLOG_DIR = \"log/\"\r\nTRAINING_QUERIES= get_queries_dict(TRAIN_FILE)\r\nTEST_QUERIES= get_queries_dict(TEST_FILE)\r\n\r\n#batch norm parameter\r\nDECAY_STEP = 20000\r\nBN_INIT_DECAY = 0.5\r\nBN_DECAY_DECAY_RATE = 0.5\r\nBN_DECAY_DECAY_STEP = float(DECAY_STEP)\r\nBN_DECAY_CLIP = 0.99\r\n\r\n#loss parameter\r\nMARGIN1 = 0.5\r\nMARGIN2 = 0.2\r\n\r\n#learning_rate_parameter\r\nBASE_LEARNING_RATE = 0.00005\r\nis_training = True\r\nOPTIMIZER = \"adam\"\r\n\r\ndef get_bn_decay(batch):\r\n\tbn_momentum = tf.train.exponential_decay(BN_INIT_DECAY,batch*BATCH_NUM_QUERIES,BN_DECAY_DECAY_STEP,BN_DECAY_DECAY_RATE,staircase=True)\r\n\tbn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)\r\n\treturn bn_decay\r\n\t\r\n#learning rate halfed every 5 epoch\r\ndef get_learning_rate(epoch):\r\n\tlearning_rate = BASE_LEARNING_RATE*((0.9)**(epoch//5))\r\n\tlearning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!\r\n\treturn learning_rate\r\n\r\ndef init_network():\r\n\twith tf.device('/gpu:'+str(GPU_INDEX)):\r\n\t\tprint(\"In Graph\")\r\n\t\tquery= placeholder_inputs(BATCH_NUM_QUERIES, 1, NUM_POINTS)\r\n\t\tpositives= placeholder_inputs(BATCH_NUM_QUERIES, POSITIVES_PER_QUERY, NUM_POINTS)\r\n\t\tnegatives= placeholder_inputs(BATCH_NUM_QUERIES, NEGATIVES_PER_QUERY, NUM_POINTS)\r\n\t\tother_negatives= placeholder_inputs(BATCH_NUM_QUERIES,1, NUM_POINTS)\r\n\r\n\t\tis_training_pl = tf.placeholder(tf.bool, shape=())\r\n\t\tprint(is_training_pl)\r\n\t\t\r\n\t\tbatch = tf.Variable(0)\r\n\t\tepoch_num = tf.placeholder(tf.float32, shape=())\r\n\t\tbn_decay = get_bn_decay(batch)\r\n\t\ttf.summary.scalar('bn_decay', bn_decay)\r\n\r\n\t\twith tf.variable_scope(\"query_triplets\") as scope:\r\n\t\t\tvecs= tf.concat([query, positives, negatives, other_negatives],1)\r\n\t\t\tprint(vecs)\r\n\t\t\tout_vecs= forward(vecs, is_training_pl, bn_decay=bn_decay)\r\n\t\t\tprint(out_vecs)\r\n\t\t\tq_vec, pos_vecs, neg_vecs, other_neg_vec= tf.split(out_vecs, [1,POSITIVES_PER_QUERY,NEGATIVES_PER_QUERY,1],1)\r\n\t\t\tprint(q_vec)\r\n\t\t\tprint(pos_vecs)\r\n\t\t\tprint(neg_vecs)\r\n\t\t\tprint(other_neg_vec)\r\n\r\n\t\t#loss = lazy_triplet_loss(q_vec, pos_vecs, neg_vecs, MARGIN1)\r\n\t\t#loss = softmargin_loss(q_vec, pos_vecs, neg_vecs)\r\n\t\t#loss = quadruplet_loss(q_vec, pos_vecs, neg_vecs, other_neg_vec, MARGIN1, MARGIN2)\r\n\t\tloss = lazy_quadruplet_loss(q_vec, pos_vecs, neg_vecs, other_neg_vec, MARGIN1, MARGIN2)\r\n\t\ttf.summary.scalar('loss', loss)\r\n\r\n\t\t# Get training operator\r\n\t\tlearning_rate = get_learning_rate(epoch_num)\r\n\t\ttf.summary.scalar('learning_rate', learning_rate)\r\n\t\tif OPTIMIZER == 'momentum':\r\n\t\t\toptimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)\r\n\t\telif OPTIMIZER == 'adam':\r\n\t\t\toptimizer = tf.train.AdamOptimizer(learning_rate)\r\n\r\n\t\tupdate_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n\t\twith tf.control_dependencies(update_ops):\r\n\t\t\ttrain_op = optimizer.minimize(loss, global_step=batch)\r\n\t\t\r\n\t\t# Add ops to save and restore all the variables.\r\n\t\tsaver = tf.train.Saver()\r\n\r\n\t# Create a session\r\n\tgpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)\r\n\tconfig = tf.ConfigProto(gpu_options=gpu_options)\r\n\tconfig.gpu_options.allow_growth = True\r\n\tconfig.allow_soft_placement = True\r\n\tconfig.log_device_placement = False\r\n\t\r\n\tsess = tf.Session(config=config)\r\n\r\n\t# Add summary writers\r\n\tmerged = tf.summary.merge_all()\r\n\ttrain_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),sess.graph)\r\n\ttest_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))\r\n\r\n\t# Initialize a new model\r\n\tinit = tf.global_variables_initializer()\r\n\tsess.run(init)\r\n\tprint(\"Initialized\")\r\n\t\r\n\t#prepare the fid_dict\r\n\tops = {'query': query,\r\n\t\t'positives': positives,\r\n\t\t'negatives': negatives,\r\n\t\t'other_negatives': other_negatives,\r\n\t\t'is_training_pl': is_training_pl,\r\n\t\t'loss': loss,\r\n\t\t'train_op': train_op,\r\n\t\t'merged': merged,\r\n\t\t'step': batch,\r\n\t\t'epoch_num': epoch_num,\r\n\t\t'q_vec':q_vec,\r\n\t\t'pos_vecs': pos_vecs,\r\n\t\t'neg_vecs': neg_vecs,\r\n\t\t'other_neg_vec': other_neg_vec,\r\n\t\t'sess': sess,\r\n\t\t'train_writer': train_writer,\r\n\t\t'test_writer': test_writer}\r\n\t\r\n\treturn ops\r\n\t\r\n\t\r\ndef select_batch(train_file_idxs,batch_ind):\r\n\tbatch_keys= train_file_idxs[batch_ind*BATCH_NUM_QUERIES:(batch_ind+1)*BATCH_NUM_QUERIES]\r\n\tq_tuples=[]\r\n\t\r\n\tprint(\"batch %d\"%(batch_ind))\r\n\tfor j in range(BATCH_NUM_QUERIES):\r\n\t\tif(len(TRAINING_QUERIES[batch_keys[j]][\"positives\"]) List[str]:\n \"\"\"\n Convert a list of time strings (like \"5:45 PM\") to different users'\n timezones and reply with the conversions.\n\n :param msg: Discord message that triggered the conversion\n :param time_strs: a list of strings that may be valid times\n :returns: a list of strings that could not be converted\n \"\"\"\n\n user_data: UserData = self.bot.get_cog('UserData')\n if user_data is None:\n # User data cog couldn't be retrieved, so consider all conversions\n # failed\n return time_strs\n\n try:\n db = await user_data.get_database()\n except DatabaseUnavailable:\n return time_strs\n\n try:\n localized_times, failed = await convert_time_to_user_timezones(\n db, msg.author.id, msg.guild, time_strs\n )\n except UserTimezoneUnset:\n cmd_prefix = self.bot.command_prefix(self.bot, msg)[-1]\n await Embeds.error(\n msg.channel,\n f\"You haven't set your timezone yet. Type \"\n f\"`{cmd_prefix}help timezone set` for more info.\"\n )\n return time_strs\n\n if localized_times:\n output = []\n for tz_name, times in localized_times:\n times = ' | '.join(f'`{time.strftime(time_format)}`'\n for time in times)\n output.append(f'**{tz_name}**: {times}')\n await msg.channel.send('\\n'.join(output))\n\n return failed\n\n async def convert_imperial_metric(\n self, channel: discord.TextChannel,\n quantity_strs: List[str]) -> List[str]:\n \"\"\"\n Convert a list of quantity strings (like \"5 km\") between imperial and\n metric and reply with the conversions.\n\n :param channel: Discord channel to send conversions message to\n :param quantity_strs: a list of strings that may be valid quantities\n :returns: a list of strings that could not be converted\n \"\"\"\n\n conversions = []\n failed = []\n for qstr in quantity_strs:\n q = imperial_metric(qstr)\n if q is not None:\n conversions.append(f'`{q[0]:.2f~P}` = `{q[1]:.2f~P}`')\n else:\n failed.append(qstr)\n\n if conversions:\n await channel.send('\\n'.join(conversions))\n\n return failed\n","sub_path":"sandpiper/conversion/cog.py","file_name":"cog.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"548765495","text":"import logging\nimport os\n\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\n\napp = Flask(__name__)\n\nif 'MOROCCO_DATABASE_URI' not in os.environ:\n raise EnvironmentError('Missing environment MOROCCO_DATABASE_URI')\n\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ['MOROCCO_DATABASE_URI']\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\n\n\ndef load_config_from_db():\n import requests\n from .models import DbProjectSetting\n from .auth import AzureADPublicKeysManager\n\n if not app.debug:\n app.config['PREFERRED_URL_SCHEME'] = 'https'\n\n if app.debug:\n logging.basicConfig(level=logging.DEBUG)\n\n for setting in DbProjectSetting.query.all():\n app.config[setting.name] = setting.value\n\n if not app.secret_key:\n app.secret_key = os.environ.get('MOROCCO_SECRET_KEY', 'session secret key for local testing')\n\n app.config['is_local_server'] = os.environ.get('MOROCCO_LOCAL_SERVER') == 'True'\n\n config_url = 'https://login.microsoftonline.com/{}/.well-known/openid-configuration' \\\n .format(app.config['MOROCCO_AUTH_TENANT'])\n response = requests.get(config_url)\n if response.status_code != 200:\n raise EnvironmentError('Fail to request Azure AD OpenID configuration from {}.'.format(config_url))\n\n result = response.json()\n app.config['auth_authorization_endpoint'] = result['authorization_endpoint']\n app.config['auth_token_endpoint'] = result['token_endpoint']\n app.config['auth_jwks_uri'] = result['jwks_uri']\n app.config['auth_signout_uri'] = result['end_session_endpoint']\n app.config['auth_public_key_manager'] = AzureADPublicKeysManager(app.config['auth_jwks_uri'],\n app.config['MOROCCO_AUTH_CLIENT_ID'])\n","sub_path":"app/morocco/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"59636391","text":"import torch\nfrom torch.utils.data import DataLoader\nimport torch.backends.cudnn as cudnn\nimport argparse\nimport os\n\nfrom preprocess import preprocess_cmu\nfrom models import general_FFTNet\nfrom dataset import CMU_Dataset\nfrom datetime import datetime\nfrom hparams import hparams\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--preprocess', action='store_true')\nparser.add_argument('--wav_dir', type=str, default='/host/data_dsk1/dataset/CMU_ARCTIC_Databases/cmu_us_slt_arctic/wav')\nparser.add_argument('data_dir', type=str)\nparser.add_argument('--feature_type', type=str, default='melspectrogram')\nparser.add_argument('--feature_dim', type=int, default=25, help='number of mcc coefficients')\nparser.add_argument('--mcep_alpha', type=float, default=0.42, help='''all-pass filter constant.\n 16khz: 0.42,\n 10khz: 0.35,\n 8khz: 0.31.''')\nparser.add_argument('--window_length', type=float, default=0.025)\nparser.add_argument('--window_step', type=float, default=0.01)\nparser.add_argument('--minimum_f0', type=float, default=71)\nparser.add_argument('--maximum_f0', type=float, default=800)\nparser.add_argument('--q_channels', type=int, default=256, help='quantization channels')\nparser.add_argument('--interp_method', type=str, default='linear')\nparser.add_argument('--fft_channels', type=int, default=128, help='fftnet layer channels')\nparser.add_argument('--seq_M', type=int, default=5000, help='training sequence length')\nparser.add_argument('--radixs', nargs='+', type=int, default=[2] * 11)\nparser.add_argument('--batch_size', type=int, default=5)\nparser.add_argument('--lr', type=float, default=0.001, help='learning rate')\nparser.add_argument('--steps', type=int, default=100000, help='iteration number')\nparser.add_argument('--injected_noise', action='store_true')\nparser.add_argument('--model_file', type=str, default='slt_fftnet')\nparser.add_argument('checkpoint_dir', type=str, help='Directory to save checkpoints.')\nparser.add_argument('--checkpoint_step', type=int, default=5000)\nparser.add_argument('--transpose', action='store_true')\nparser.add_argument('--predict_dist', type=int, default=1)\n\n\ndef main():\n args = parser.parse_args()\n if args.preprocess:\n print('==> Preprocessing data ...')\n preprocess_cmu(args.wav_dir, args.data_dir, q_channels=args.q_channels, winlen=args.window_length,\n winstep=args.window_step, n_mcep=args.feature_dim, mcep_alpha=args.mcep_alpha,\n minf0=args.minimum_f0, maxf0=args.maximum_f0, type=args.feature_type)\n\n print('==> Loading Dataset..')\n training_dataset = CMU_Dataset(args.data_dir, args.seq_M, 256, hopsize=hparams.frame_shift,\n interp_method=args.interp_method, injected_noise=args.injected_noise,\n predict_dist=args.predict_dist)\n training_loader = DataLoader(training_dataset, batch_size=args.batch_size, num_workers=4, shuffle=True)\n\n print('==> Building model..')\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n net = general_FFTNet(radixs=args.radixs, fft_channels=args.fft_channels, classes=args.q_channels,\n aux_channels=hparams.num_mels, transpose=args.transpose,\n predict_dist=args.predict_dist).to(device)\n\n if torch.cuda.device_count() > 1:\n net = torch.nn.DataParallel(net)\n if device == 'cuda':\n cudnn.benchmark = True\n\n print(sum(p.numel() for p in net.parameters() if p.requires_grad), \"of parameters.\")\n\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)\n\n os.makedirs(args.checkpoint_dir, exist_ok=True)\n print(\"Start Training.\")\n a = datetime.now().replace(microsecond=0)\n global_step = 0\n while global_step < args.steps:\n for batch_idx, (inputs, targets, features) in enumerate(training_loader):\n inputs, targets, features = inputs.cuda(), targets.cuda(), features.cuda()\n\n optimizer.zero_grad()\n\n logits = net(inputs, features)\n loss = criterion(logits.unsqueeze(-1), targets.unsqueeze(-1))\n loss.backward()\n optimizer.step()\n\n print(global_step, \"{:.4f}\".format(loss.item()))\n global_step += 1\n if global_step > args.steps:\n break\n\n if global_step % args.checkpoint_step == 0:\n model = net.module if isinstance(net, torch.nn.DataParallel) else net\n torch.save(model, os.path.join(args.checkpoint_dir, args.model_file + \"_{}.pth\".format(global_step)))\n print(\"Checkpoint saved.\")\n\n print(\"Training time cost:\", datetime.now().replace(microsecond=0) - a)\n\n net = net.module if isinstance(net, torch.nn.DataParallel) else net\n\n torch.save(net, args.model_file + \".pth\")\n print(\"Model saved to\", args.model_file)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"326349783","text":"from django.conf.urls import url\n\nfrom . import views\n\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^prerequisites/$', views.prerequisites, name='prerequisites'),\n url(r'^commitment/$', views.commitment, name='commitment'),\n url(r'^form/thank_you/$', views.form_thank_you, name='form_thank_you'),\n url(r'^form/(?P.+)/$', views.organize_form_wizard, name='form_step')\n]\n","sub_path":"organize/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"644430642","text":"import time\n\nimport numpy as np\nfrom PIL import Image\nfrom PyQt5.QtCore import QTimer\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtWidgets import QApplication, QFileDialog, QErrorMessage, QMessageBox\nfrom PyQt5.QtWidgets import QMainWindow\n\nfrom gui.general import *\nimport os\n\nclass App(QMainWindow):\n \"\"\"\n Standard application interface\n\n Parameters\n ----------\n image : ndarray\n The image to open\n \"\"\"\n image: str\n title: str\n total_epochs: int\n epoch: int\n\n def __init__(self, image='./files/test_images/lena.png'):\n super().__init__()\n self.image = image\n self.title = os.path.basename(self.image)\n\n self.epoch = 0\n self.total_epochs = 0\n self.timer = QTimer(self)\n\n def get_available_windows(self, INFILE):\n \"\"\"\n Get all the windows\n\n Parameters\n ----------\n INFILE : str\n Makes sure we don't reload the file recursively\n \"\"\"\n global WINDOW_MANAGER\n return WINDOW_MANAGER.filter(INFILE)\n\n # https://stackoverflow.com/questions/20243637/pyqt4-center-window-on-active-screen\n def center(self):\n \"\"\"\n Center the window\n \"\"\"\n frame_geometry = self.frameGeometry()\n screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos())\n center = QApplication.desktop().screenGeometry(screen).center()\n frame_geometry.moveCenter(center)\n self.move(frame_geometry.topLeft())\n\n def epochs_change(self):\n \"\"\"\n Updates the epoch label\n \"\"\"\n epochs: int = self.epoch_slider.value()\n self.epoch_label.setText(\"Epochs ({}) (Total {})\".format(epochs, self.total_epochs))\n\n def alpha_chnage(self):\n \"\"\"\n update the method alpha value based on slider\n \"\"\"\n value: int = self.alpha_slider.value()\n self.method.set_alpha(value / 100)\n self.alpha_label.setText(\"Alpha ({})\".format(value / 100))\n\n def boundary_change(self):\n \"\"\"\n Update the boundary based on the boundary group\n \"\"\"\n if not self.boundary_group is None:\n self.method.set_boundary(self.boundary_group.checkedButton().text())\n\n def method_change(self):\n \"\"\"\n Update the method based on the boundary group\n \"\"\"\n if not self.method_group is None:\n self.method.set_mode(self.method_group.checkedButton().text())\n\n def mode_change(self, _):\n \"\"\"\n Changes the view from the combobox\n \"\"\"\n view = self.WINDOWS[self.mode.currentText()]\n view.init_UI()\n view.show()\n self.hide()\n\n @pyqtSlot()\n def update_image_label(self, data=None):\n \"\"\"\n Update image label\n\n Parameters\n ----------\n data : ndarray\n The new image data\n \"\"\"\n if data is None:\n self.label.setPixmap(pil2pixmap(Image.fromarray((255 * self.method.data).astype(np.uint8))))\n else:\n self.label.setPixmap(pil2pixmap(Image.fromarray((255 * data).astype(np.uint8))))\n\n @pyqtSlot()\n def update_image(self):\n \"\"\"\n Wrapper to nicely update the image when preform a iteration from the backend\n \"\"\"\n if not hasattr(self, 'epoch_slider'):\n QApplication.processEvents()\n self.method.fit(epochs=1)\n self.update_image_label()\n self.reset_button.setEnabled(True)\n self.timer.stop()\n QApplication.restoreOverrideCursor()\n else:\n if self.epoch < self.epoch_slider.value():\n self.method.fit(epochs=1)\n self.update_image_label()\n self.epoch += 1\n self.total_epochs += 1\n self.epochs_change()\n self.reset_button.setEnabled(True)\n QApplication.processEvents()\n self.setWindowTitle(\"Calculating...\")\n else:\n QApplication.restoreOverrideCursor()\n self.setWindowTitle(self.title)\n self.reset_button.setEnabled(True)\n self.action_button.setEnabled(True)\n self.timer.stop()\n\n def show_file_dialog(self):\n \"\"\"\n Shows a file dialog\n \"\"\"\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n file_name, _ = QFileDialog.getOpenFileName(self, \"QFileDialog.getOpenFileName()\", \"\",\n \"JPEG (*.jpeg);;jpg (*.jpg);;png (*.png)\",\n options=options)\n if file_name:\n movment_x, movment_y = self.method.change_photo(file_name)\n self.label.setPixmap(pil2pixmap(Image.fromarray((255 * self.method.data).astype(np.uint8))))\n self.label.move(movment_x, self.label.pos().y())\n else:\n print(\"No file selected\")\n\n def show_file_dialog_hdr(self):\n \"\"\"\n Shows a file dialog\n \"\"\"\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n file_name, _ = QFileDialog.getOpenFileNames(self, \"QFileDialog.getOpenFileNames()\", \"\",\n \"JPEG (*.jpeg);;jpg (*.jpg);;png (*.png)\",\n options=options)\n if file_name:\n if len(file_name) == 1:\n warning = QMessageBox()\n warning.setIcon(QMessageBox.Critical)\n warning.setText(\"Error\")\n warning.setInformativeText('Use the shift key to select more photos (one photo = bad results)')\n warning.setWindowTitle(\"Error\")\n warning.exec_()\n \n movment_x, movment_y = self.method.change_photo(file_name[0])\n self.method.update_images(file_name)\n self.label.setPixmap(pil2pixmap(Image.fromarray((255 * self.method.data).astype(np.uint8))))\n self.label.move(movment_x, self.label.pos().y())\n else:\n print(\"No file selected\")\n\n def show_extra(self):\n \"\"\"\n Shows the extra features\n \"\"\"\n self.setGeometry(0, 0, self.pixmap.width() + self.PADDING, self.height)\n self.center()\n\n def screenshot(self):\n \"\"\"\n Takes a screenshot of the current QWindow\n \"\"\"\n screen = self.grab()\n screen.save(\"{}.png\".format(time.time()), 'png')\n\n @pyqtSlot()\n def reset_image_extra(self):\n \"\"\"\n Resets the image\n \"\"\"\n self.total_epochs = 0\n if hasattr(self, 'epoch_label'):\n self.epoch_label.setText(\"Epochs\")\n self.reset_button.setEnabled(False)\n self.method.reset()\n self.label.setPixmap(pil2pixmap(self.pixmap_converter(self.method.data)))\n\n def prepare(self):\n \"\"\"\n Some functions needs some function to be run before method\n\n This method will be overridden\n \"\"\"\n pass\n\n @pyqtSlot()\n def run_method(self, lock_run=False):\n \"\"\"\n Runs one of the backends methods\n \"\"\"\n self.prepare()\n if lock_run:\n QTimer.singleShot(100, lambda: self.action_button.setEnabled(False))\n QApplication.setOverrideCursor(Qt.WaitCursor)\n QApplication.processEvents()\n self.epoch = 0\n self.timer.timeout.connect(self.update_image)\n self.timer.start(100)\n\n def undo(self):\n \"\"\"\n Some functions needs some function to be run when the method is reset\n\n This method will be overridden\n \"\"\"\n pass\n\n def reset_image(self):\n \"\"\"\n Resets the image\n \"\"\"\n if hasattr(self, 'epoch_label'):\n self.epoch_label.setText(\"Epochs\")\n self.total_epochs = 0\n self.undo()\n self.reset_button.setEnabled(False)\n QTimer.singleShot(100, lambda: self.action_button.setEnabled(True))\n self.method.reset()\n self.label.setPixmap(pil2pixmap(Image.fromarray((255 * self.method.data).astype(np.uint8))))\n","sub_path":"src/gui/app_data.py","file_name":"app_data.py","file_ext":"py","file_size_in_byte":8175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"429827129","text":"import numpy as np\n\n\nclass RandomVariable(object):\n \"\"\"\n base class for random variables\n \"\"\"\n\n def ml(self, X, **kwargs):\n \"\"\"\n maximum likelihood estimation of the parameter(s)\n of the distribution given data\n\n Parameters\n ----------\n X : (sample_size, ndim) np.ndarray\n observed data\n \"\"\"\n assert isinstance(X, np.ndarray)\n if hasattr(self, \"_ml\"):\n self._ml(X, **kwargs)\n else:\n raise NotImplementedError\n\n def map(self, X, **kwargs):\n \"\"\"\n maximum a posteriori estimation of the parameter(s)\n of the distribution given data\n\n Parameters\n ----------\n X : (sample_size, ndim) np.ndarray\n observed data\n \"\"\"\n self._check_input(X)\n if hasattr(self, \"_map\"):\n self._map(X, **kwargs)\n else:\n raise NotImplementedError\n\n def bayes(self, X, **kwargs):\n \"\"\"\n bayesian estimation of the parameter(s)\n of the distribution given data\n\n Parameters\n ----------\n X : (sample_size, ndim) np.ndarray\n observed data\n \"\"\"\n self._check_input(X)\n if hasattr(self, \"_bayes\"):\n self._bayes(X, **kwargs)\n else:\n raise NotImplementedError\n\n def pdf(self, X):\n \"\"\"\n compute probability density function\n p(X|parameter)\n\n Parameters\n ----------\n X : (sample_size, ndim) np.ndarray\n input of the function\n\n Returns\n -------\n p : (sample_size,) np.ndarray\n value of probability density function for each input\n \"\"\"\n self._check_input(X)\n if hasattr(self, \"_pdf\"):\n return self._pdf(X)\n else:\n raise NotImplementedError\n\n def draw(self, sample_size=1):\n \"\"\"\n draw samples from the distribution\n\n Parameters\n ----------\n sample_size : int\n sample size\n\n Returns\n -------\n sample : (sample_size, ndim) np.ndarray\n generated samples from the distribution\n \"\"\"\n assert isinstance(sample_size, int)\n if hasattr(self, \"_draw\"):\n return self._draw(sample_size)\n else:\n raise NotImplementedError\n\n def _check_input(self, X):\n assert isinstance(X, np.ndarray)\n assert X.shape[1:] == self.shape\n","sub_path":"prml/random/random.py","file_name":"random.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"90982339","text":"import subprocess\nimport pyttsx3\nfrom gtts import gTTS\n\ndef convert_pdf_to_raw_text(pdf: str) -> str:\n \"\"\"Converts a pdf into text with pdf2txt.py program\"\"\"\n process = subprocess.Popen(['pdf2txt.py', pdf], stdout=subprocess.PIPE)\n raw_text = process.communicate()[0]\n\n return raw_text.decode(\"utf-8\")\n\ndef sanitize_text(text: str) -> str:\n \"\"\"Sanitizes and removes all useless characters from the text, returns sanitized text string\"\"\"\n alphanum_text = \"\"\n for letter in list(text):\n if letter.isalnum() or letter == \" \" or letter in \"!?.,:;\\'\\\"&()$%#@~\":\n alphanum_text += letter\n\n return alphanum_text\n\ndef speak(text: str, rate: int, speaker: str):\n engine = pyttsx3.init()\n # good voice id is com.apple.speech.synthesis.voice.daniel or com.apple.speech.synthesis.voice.samantha\n engine.setProperty('voice', \"com.apple.speech.synthesis.voice.\" + speaker)\n engine.setProperty('rate', engine.getProperty('rate') + rate)\n engine.say(text)\n engine.runAndWait()\n engine.stop()\n\n\ndef text_to_mp3(text: str, file_output: str):\n tts = gTTS(text, lang='en')\n tts.save(file_output+ '.mp3')\n\ndef run(file: str):\n text = convert_pdf_to_raw_text(file+\".pdf\")\n text = sanitize_text(text)\n text_to_mp3(text, file)\n\n\nrun('nsp')\nrun\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"471539062","text":"import cv2 as cv\r\nimport numpy as np\r\nimport os\r\nimport pickle\r\n\r\ndef extract_features(path = 'data'):\r\n data = [[[0, 255, 0]], [1]]\r\n for folder in os.listdir(path):\r\n for file in os.listdir(os.path.join(path, folder)):\r\n if file.endswith('.jpg'):\r\n pathFile = os.path.join(path, folder, file)\r\n image = cv.imread(pathFile, 1)\r\n features = image.reshape(-1, 3)\r\n label = file.split('-')[1][0]\r\n data[0] = np.append(data[0], features, axis=0)\r\n data[1] += [label for _ in range(features.shape[0])]\r\n\r\n coefficients = np.array([[1] for _ in range(len(data[0]))])\r\n data[0] = np.append(data[0], coefficients, axis=1)\r\n return data\r\n\r\ndef save_data(data):\r\n with open('data.pickle', 'wb') as f:\r\n pickle.dump(data, f)\r\nif __name__ == '__main__':\r\n data = extract_features()\r\n save_data(data)","sub_path":"extract_features.py","file_name":"extract_features.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"76711939","text":"def welcome(name):\n print(\"Hello {} and welcome to the World Of Games(WOG).\".format(name))\n print(\"Here you can find many cool games to play\")\n\n\ndef load_game():\n print(\"Please choose a game to play:\\n1.Memory Game - a sequence\" +\n \" of numbers will appear for 1 second and you have to guess\" +\n \" it back\\n2.Guess Game - guess a number and see if you\" +\n \" chose like the computer\\n3.Currency Roulette - \" +\n \"try and guess the value of a random amount of USD in ILS\")\n res = input()\n while True:\n try:\n int(res)\n turn_to_number = int(res)\n\n if 1 <= turn_to_number <= 3:\n print(\"good pick\")\n break\n else:\n res = input(\"value needs to be between 1 and 3:\")\n continue\n except ValueError:\n res = input(\"not a number, try again: \")\n\n difficulty = input(\"please select a difficulty level from 1 to 5: \")\n while True:\n try:\n int(difficulty)\n turn_to_number = int(difficulty)\n\n if 1 <= turn_to_number <= 5:\n print(\"good pick\")\n break\n else:\n difficulty = input(\"value needs to be between 1 and 5:\")\n continue\n except ValueError:\n difficulty = input(\"not a number, try again:\")\n","sub_path":"WorldOfGames/Live.py","file_name":"Live.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572335722","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('home', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Csv_Data',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('gender', models.CharField(max_length=15, verbose_name=b'Gender', choices=[(b'F', b'Female'), (b'M', b'Male')])),\n ('Handedness', models.CharField(max_length=15, verbose_name=b'Handedness', choices=[(b'RH', b'Right-handed'), (b'LH', b'Right-handed')])),\n ],\n options={\n 'verbose_name': 'Csv Data',\n 'verbose_name_plural': 'Csv Datas',\n },\n ),\n ]\n","sub_path":"apps/home/migrations/0002_csv_data.py","file_name":"0002_csv_data.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"328976634","text":"import random, operator\n\nprint\n'==========================================='\n\n\ndef randomCalc(i, j):\n ops = {'+': operator.add,\n '-': operator.sub,\n '*': operator.mul,\n '/': operator.truediv}\n num = [1, 2, 3,4]\n num1, num2 = num[i], num[j]\n op = (list(ops.keys()))[i]\n answer = round(ops.get(op)(num1, num2), 3)\n print('What is {} {} {}?\\n'.format(num1, op, num2))\n return answer\n\n\ndef askQuestion(i):\n answer = randomCalc(i, i + 1)\n guess = float(input())\n return guess == answer, answer\n\n\ndef quiz(numOfQues):\n print('\\nWelcome. This is a ' + str(numOfQues) + ' question math quiz.')\n print('Your answer should be correct to three decimal places.\\n')\n score = 0\n for i in range(numOfQues):\n correct, ans = askQuestion(i)\n if correct:\n score += 1\n print('Correct!\\n')\n else:\n print('Incorrect! The correct answer is ' + str(ans) + '\\n')\n return ('Your score was {}/' + str(numOfQues)).format(score)\n\n\n# Driver Code\nprint(quiz(3))","sub_path":"Basic_Programs/Programming_challenge.py","file_name":"Programming_challenge.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"618230412","text":"'''\nYou are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order and each of their nodes contain a single digit. Add the two numbers and return it as a linked list.\n\nYou may assume the two numbers do not contain any leading zero, except the number 0 itself.\n\nExample\n\nInput: (2 -> 4 -> 3) + (5 -> 6 -> 4)\nOutput: 7 -> 0 -> 8\nExplanation: 342 + 465 = 807.\n'''\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\ndef addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n \n def create_linked_list(number):\n\n head = None\n current = None\n for number in list(str(number))[::-1]:\n\n if not head:\n head = ListNode(number)\n current = head\n else:\n new_node = ListNode(number)\n current.next = new_node\n current = new_node\n return head\n\n def list_to_number(head):\n\n current = head\n num_list = []\n\n while current:\n num_list.append(current.val)\n current = current.next\n\n return sum([int(num)*(10**off) for off,num in enumerate(num_list)])\n \n int1 = list_to_number(l1)\n int2 = list_to_number(l2)\n int3 = int1+int2\n \n return create_linked_list(int3)","sub_path":"2_add_two_numbers.py","file_name":"2_add_two_numbers.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"503585398","text":"#!/usr/bin/env python\nimport numpy as np\nimport pandas as pd\nimport rospy, os, time, pickle, math, copy, random\nfrom geometry_msgs.msg import Point, Pose, Twist\nfrom visualization_msgs.msg import Marker, MarkerArray\nfrom sensor_msgs.msg import LaserScan\nfrom std_msgs.msg import Bool, Float64, Int16, String\n\n\nclass Episode():\n def __init__(self, df, controller_type, confidence, sigma, result, failure_mode, case_number, case_name, dense_reward):\n self.episode_df = df\n self.confidence = confidence\n self.sigma = sigma\n self.result = result\n self.failure_mode = failure_mode\n self.case_number = case_number\n self.case_name = case_name\n self.controller_type = controller_type\n self.dense_reward = dense_reward\n\nclass Experience(object):\n def __init__(self, window_size, prior_alpha, prior_beta, length_scale):\n self.window_size = window_size\n self.prior_alpha = prior_alpha\n self.prior_beta = prior_beta\n self.length_scale = length_scale\n self.col_names = ['state', 'action', 'dense_reward','terminal', 'next_state', 'binary_reward', 'binary_return']\n self.replay_buffer = pd.DataFrame(columns = self.col_names)\n self.episode_list = []\n self.replay_buffer_episodes = []\n\n def new_episode(self, episode_confidence, sigma, case_name, case_number):\n self.episode_df = pd.DataFrame(columns = self.col_names)\n self.episode_confidence = episode_confidence\n self.episode_confidence_sigma = sigma\n self.episode_case_number = case_number\n self.episode_case_name = case_name\n\n def add_step(self, state, action, reward, terminal, new_state):\n self.episode_df.loc[len(self.episode_df)] = [state, action, reward, terminal, new_state, -1.0, -1.0]\n\n def end_episode(self, result, controller_type, dense_reward):\n self.store_episode_result(result['success'])\n episode = Episode(df = self.episode_df, controller_type = controller_type, confidence = self.episode_confidence,\n sigma = self.episode_confidence_sigma, result = result['success'], failure_mode = result['failure_mode'],\n case_number = self.episode_case_number, case_name = self.episode_case_name, dense_reward=dense_reward)\n self.episode_list.append(episode)\n self.add_to_replay_buffer(episode)\n return episode\n\n def add_demo_episode(self):\n self.add_to_replay_buffer('demo_episode')\n\n def add_saved_episode(self, episode):\n self.episode_list.append(episode)\n self.add_to_replay_buffer(episode)\n\n def add_to_replay_buffer(self, episode):\n\n # insert new episode into the window\n self.replay_buffer_episodes.insert(0, episode)\n if self.window_size != float('inf'):\n self.replay_buffer_episodes = self.replay_buffer_episodes[0:self.window_size]\n #print('There are %i demo episodes in the window' %(self.replay_buffer_episodes.count('demo_episode')))\n\n # construct the replay buffer for learner from the window of experience\n self.replay_buffer = pd.DataFrame(columns = self.col_names)\n for episode in self.replay_buffer_episodes:\n if episode != 'demo_episode':\n df_reverse = episode.episode_df.reindex(index=episode.episode_df.index[::-1])\n self.replay_buffer = pd.concat([self.replay_buffer, df_reverse], ignore_index=True)\n\n\n def store_episode_result(self, result):\n if result:\n episode_return = 1.0\n else:\n episode_return = 0.0\n self.episode_df['binary_return'] = episode_return\n self.episode_df.at[(len(self.episode_df)-1), 'binary_reward'] = episode_return\n\n def get_state_value(self, state):\n alpha = copy.copy(self.prior_alpha)\n beta = copy.copy(self.prior_beta)\n length_scale = self.length_scale\n\n for index, row in self.replay_buffer.iterrows():\n old_state = np.array(row['state'])\n state_delta = old_state - state\n\n # calculate the difference between the two states\n weight = np.exp(-1.0*(np.linalg.norm(state_delta/length_scale))**2)\n if weight < 1e-7:\n weight = 1e-7\n\n if np.isnan(round(row[\"binary_return\"])):\n print(row[\"binary_return\"])\n print(self.replay_buffer)\n\n # increment alpha for success and beta for failure\n if int(round(row[\"binary_return\"])) == 1:\n alpha = alpha + weight\n else:\n beta = beta + weight\n\n value = alpha/(alpha + beta)\n variance = alpha*beta/((alpha+beta)**2*(alpha+beta+1.0))\n sigma = np.sqrt(variance)\n return value, sigma, alpha, beta\n","sub_path":"src/learn_to_manipulate/experience.py","file_name":"experience.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"415613814","text":"import numpy as np\nimport io\nimport torch\nfrom deep_sort.deep.original_model import Net\n\ninput_size = [64, 128]\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\noutput_onnx = \"pretrained/original_ckpt.onnx\"\n\nmodel = Net(reid=True)\nstate_dict = torch.load(\"pretrained/original_ckpt.t7\", map_location=lambda storage, loc: storage)['net_dict']\nmodel.load_state_dict(state_dict)\nmodel.eval()\nmodel = model.to(device)\n\nprint(\"==> Exporting model to ONNX format at '{}'\".format(output_onnx))\ninput_names = ['input_1']\noutput_names = ['output_1']\n\nonnx_bytes = io.BytesIO()\nzero_input = torch.zeros([1, 3] + input_size)\nzero_input = zero_input.to(device)\ndynamic_axes = {input_names[0]: {0:'batch'}}\nfor _, name in enumerate(output_names):\n dynamic_axes[name] = dynamic_axes[input_names[0]]\nextra_args = {'opset_version': 10, 'verbose': False,\n 'input_names': input_names, 'output_names': output_names,\n 'dynamic_axes': dynamic_axes}\ntorch.onnx.export(model, zero_input, onnx_bytes, **extra_args)\nwith open(output_onnx, 'wb') as out:\n out.write(onnx_bytes.getvalue())","sub_path":"convert_onnx.py","file_name":"convert_onnx.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"459270176","text":"#\n# Practicing Calendars\n#\n\nimport calendar\n\nc = calendar.TextCalendar(calendar.SUNDAY)\nst = c.formatmonth(2020, 3, 0, 0)\nprint(st)\n\nhc = calendar.HTMLCalendar(calendar.MONDAY)\nst = hc.formatmonth(2020, 3)\nprint(st)\n\nfor name in calendar.month_name:\n print(name)\n\nprint(\"Team meeting will be on\")\nfor m in range(1, 13):\n cal = calendar.monthcalendar(2020, m)\n weekone = cal[0]\n weektwo = cal[1]\n\n if weekone[calendar.FRIDAY] != 0:\n meetday = weekone[calendar.FRIDAY]\n else:\n meetday = weektwo[calendar.FRIDAY]\n\n print(\"%10s %2d\" % (calendar.month_name[m], meetday))\n","sub_path":"chapter3/calendars.py","file_name":"calendars.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"139917331","text":"import sys\nimport os \n\nrttm_path=sys.argv[1]\nreco2numspk_path=sys.argv[2]\nsegments_path=sys.argv[3]\n\n# open multi-reco rttm file, read: reco, start, end, spk \n# make dict: reco2spk_dict and seg_dict\nwith open(rttm_path) as rfile:\n seg_dict = {}\n reco2spk_dict = {}\n for line in rfile.readlines():\n items = line.strip().split()\n reco = items[1]\n start = float(items[3])\n end = start + float(items[4])\n spk = items[7]\n if reco not in seg_dict.keys():\n seg_dict[reco] = []\n reco2spk_dict[reco] = []\n seg_dict[reco].append([start, end])\n reco2spk_dict[reco].append(spk)\n\nreco_list = sorted(seg_dict.keys())\n\n# write into file: reco2numspk\nwith open(reco2numspk_path, 'w') as wfile:\n for reco in reco_list: \n wfile.write('{0} {1}\\n'.format(reco, len(set(reco2spk_dict[reco]))))\n\n# since some segments can be overlapped, we need to merge them first\nseg_merged_dict = {}\nfor reco in reco_list:\n for start, end in seg_dict[reco]:\n if reco not in seg_merged_dict.keys():\n seg_merged_dict[reco] = [[start, end]]\n elif(start <= seg_merged_dict[reco][-1][1] < end):\n seg_merged_dict[reco][-1][1] = end\n elif(start > seg_merged_dict[reco][-1][1]):\n seg_merged_dict[reco].append([start, end])\n\n# write into file: segments\nwith open(segments_path, 'w') as wfile:\n for reco in reco_list:\n for i, [start, end] in enumerate(seg_merged_dict[reco]):\n wfile.write('{1}_{0:03d} {1} {2:.2f} {3:.2f}\\n'.format(i, reco, start, end))\n\n\n","sub_path":"v2/local/extract_from_rttm.py","file_name":"extract_from_rttm.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"27380965","text":"import os\nfrom View.barraProcesso import copyTreeGUI\nfrom tkinter import *\nfrom tkinter.ttk import Combobox, Separator\nfrom Model.arquivoconfig import ArquivoConfig\nimport Model.versoes as versoes\nfrom Model.mssql import Mssql\nfrom Model.oracle import Oracle\nfrom Model.spcfg import Spcfg\nfrom Model.operacoesConsole import OperacoesConsole\n\nclass UtilidadesSAJProcuradoria(object):\n def __init__(self, master):\n\n self.vRbComoUsoPgeNet = IntVar()\n self.vRbAbreSistema = IntVar()\n self.vRbAbreSistema.set(1)\n self.vBusca = StringVar()\n self.spcfg = Spcfg()\n self.mssql = Mssql()\n self.vServerConfigurado = StringVar()\n self.vCbAliasConfigurada = StringVar()\n\n self.ctPesquisa = Frame(master, bg='#CCCCCC')\n self.ctPesquisa.pack(anchor='center', padx=5, pady=5)\n\n self.lbBusca = Label(self.ctPesquisa, bg='#CCCCCC')\n self.lbBusca[\"text\"] = \"Pesquisar versão\"\n self.lbBusca.pack()\n self.tfTextoBusca = Entry(self.ctPesquisa, textvariable=self.vBusca)\n self.tfTextoBusca.pack()\n self.tfTextoBusca.bind(\"\", lambda x: self.versoesDisponiveisParaCopia())\n\n self.ctVersoes = Frame(master, bg='#CCCCCC')\n self.ctVersoes.pack(anchor='center', padx=5, pady=5)\n\n self.listaVersoes = Listbox(self.ctVersoes)\n self.listaVersoes.pack(anchor='center', ipady=30, ipadx=173)\n self.listaVersoes.bind(\"\", lambda x: self.copyPasta())\n self.listaVersoes.bind(\"\", lambda x: self.copyPasta())\n\n self.scrumbarLista = Scrollbar(self.listaVersoes)\n self.scrumbarLista.pack(fill=Y, anchor='ne', expand=1)\n self.listaVersoes.config(yscrollcommand=self.scrumbarLista.set)\n self.scrumbarLista.config(command=self.listaVersoes.yview)\n\n self.ctBases = Frame(master, bg='#CCCCCC')\n self.ctBases.pack(fill=X)\n\n self.lbTipoDeBanco = Label(self.ctBases, text='Tipo de banco: ', bg='#CCCCCC')\n self.lbTipoDeBanco.pack(side=LEFT)\n\n self.lbTipoDeBancoAtual = Label(self.ctBases, bg='#CCCCCC')\n self.lbTipoDeBancoAtual.pack(side=LEFT)\n\n self.lbAlias = Label(self.ctBases, text=' Alias:', bg='#CCCCCC')\n self.lbAlias.pack(side=LEFT)\n\n self.lbAliasConfigurada = Label(self.ctBases, bg='#CCCCCC')\n self.lbAliasConfigurada.pack(side=LEFT)\n\n self.ctAlterarBases = Frame(master, bg='#CCCCCC')\n self.ctAlterarBases.pack(fill=X)\n\n self.lbServer = Label(self.ctAlterarBases, text=\"Server SQL:\", bg='#CCCCCC')\n self.lbServer.pack(side=LEFT)\n self.cbServerConfigurado = Combobox(self.ctAlterarBases, textvariable=self.vServerConfigurado)\n self.cbServerConfigurado.pack(side=LEFT)\n self.cbServerConfigurado.bind(\"<>\", lambda e: self.chargeAliasBanco('SQLSERVER'))\n self.cbServerConfigurado.bind(\"\", lambda e: self.chargeAliasBanco('SQLSERVER'))\n\n self.lbBase = Label(self.ctAlterarBases, text=\"Bases:\", bg='#CCCCCC')\n self.lbBase.pack(side=LEFT)\n\n self.cbBases = Combobox(self.ctAlterarBases, textvariable=self.vCbAliasConfigurada)\n self.cbBases.pack(side=LEFT)\n self.cbBases.bind(\"<>\", lambda e: self.setAlias(self.vCbAliasConfigurada.get()))\n self.separador = Separator( orient=HORIZONTAL)\n self.separador.pack(fill=X, padx=5, pady=10)\n\n self.lbTextoExplicativoUsoDoConsole = Label(text='( F2 ) Abrir pge.net - ( F3 ) - Abrir adm - ( F4 ) Limpar cache - ( F5 ) - Fechar sistema\\n'\n '( F6 ) Base oracle - ( F7 ) - Base SQL', bg='#CCCCCC')\n self.lbTextoExplicativoUsoDoConsole.pack()\n\n self.aposCarregarTela()\n self.validaCampos()\n\n self.ctBases.bind(\"\", lambda a: self.eventoF2())\n self.ctBases.bind(\"\", lambda a: self.eventoF3())\n self.ctBases.bind(\"\", lambda a: self.eventoF4())\n self.ctBases.bind(\"\", lambda a: self.eventoF5())\n self.ctBases.bind(\"\", lambda a: self.eventoF6())\n self.ctBases.bind(\"\", lambda a: self.eventoF7())\n\n self.listaVersoes.bind(\"\", lambda a: self.eventoF2())\n self.listaVersoes.bind(\"\", lambda a: self.eventoF3())\n self.listaVersoes.bind(\"\", lambda a: self.eventoF4())\n self.listaVersoes.bind(\"\", lambda a: self.eventoF5())\n self.listaVersoes.bind(\"\", lambda a: self.eventoF6())\n self.listaVersoes.bind(\"\", lambda a: self.eventoF7())\n\n def aposCarregarTela(self):\n cont=0\n self.config = ArquivoConfig()\n dir = self.getDiretorioAplicativoUtilidadesSajProcuradorias()\n for i in [nome for subpastas in [[os.path.join(i[0], j) for j in i[2]] for i in os.walk(dir)] for nome in subpastas]:\n if 'Config.ini' in i:\n cont = cont+1\n else:\n cont = cont+0\n if cont == 0:\n self.config.createArquivoConfig(dir)\n if cont == 1:\n dirServidor = self.config.getSectionOptionValue(dir, 'diretoriosDestinos', 'dirservidor')\n if dirServidor == '':\n self.config.setOptionValue(dir, 'diretoriosDestinos', 'dirservidor',\n self.config.getCaminhosDiretorios('servidor').replace('\\\\SPJSERVIDOR.exe',''))\n self.config.setOptionValue(dir, 'diretoriosDestinos', 'dirclientepgenet',\n self.config.getCaminhosDiretorios('cliente').replace('\\\\SPJCLIENTEAPP.exe', ''))\n self.config.setOptionValue(dir, 'diretoriosDestinos', 'dirclienteadm',\n self.config.getCaminhosDiretorios('adm').replace('\\\\SPJADMCLIENTEAPP.exe', ''))\n else:\n pass\n\n def validaCampos(self):\n try:\n self.config = ArquivoConfig()\n dirConfig = self.getDiretorioAplicativoUtilidadesSajProcuradorias()\n localServer = self.config.getSectionOptionValue(dirConfig,'diretoriosDestinos','dirservidor')\n tipoBanco = self.spcfg.getSectionOptionValue(localServer, 'Database', 'tipobanco')\n aliasConfigurada = self.spcfg.getSectionOptionValue(localServer, 'Database', 'alias')\n serverConfigurado = self.spcfg.getSectionOptionValue(localServer, 'Database', 'server')\n\n self.lbTipoDeBancoAtual['text'] = tipoBanco\n self.lbAliasConfigurada['text'] = aliasConfigurada\n self.cbServerConfigurado['value'] = serverConfigurado\n\n if tipoBanco == 'ORACLE':\n self.cbBases.delete(0, END)\n self.cbServerConfigurado.delete(0, END)\n self.cbServerConfigurado.config(state=DISABLED)\n self.cbBases['value'] = self.chargeAliasBanco(tipoBanco)\n\n elif tipoBanco == 'SQLSERVER':\n self.cbBases.delete(0, END)\n self.cbServerConfigurado.config(state=ACTIVE)\n self.cbServerConfigurado['value'] = serverConfigurado\n else:\n pass\n except:\n pass\n\n def chargeAliasBanco(self, tipoBanco):\n self.oracle = Oracle()\n self.config = ArquivoConfig()\n dirConfig = self.getDiretorioAplicativoUtilidadesSajProcuradorias()\n localServer = self.config.getSectionOptionValue(dirConfig, 'diretoriosDestinos', 'dirservidor')\n serverConfigurado = self.spcfg.getSectionOptionValue(localServer, 'Database', 'server')\n\n if tipoBanco == 'ORACLE':\n self.cbServerConfigurado.delete(0, END)\n return self.oracle.getAliasCatalogadas()\n\n elif tipoBanco == 'SQLSERVER':\n self.cbBases.config(state=ACTIVE)\n self.cbBases['value'] = str(self.mssql.getBancosDaInstanciaSql(serverConfigurado)).replace('[(', '').replace(',)]', '').replace('(','').replace(',),','').replace(\"'\",\"\")\n\n def configuracaoAuxiliar(self, configuracao):\n self.config = ArquivoConfig()\n dirConfig = self.getDiretorioAplicativoUtilidadesSajProcuradorias()\n localServer = self.config.getSectionOptionValue(dirConfig, 'diretoriosDestinos', 'dirservidor')\n\n if configuracao == 'dirConfig':\n return self.getDiretorioAplicativoUtilidadesSajProcuradorias()\n elif configuracao == 'localServer':\n return self.config.getSectionOptionValue(dirConfig, 'diretoriosDestinos', 'dirservidor')\n elif configuracao == 'serverConfigurado':\n return self.spcfg.getSectionOptionValue(localServer, 'Database', 'server')\n else:\n pass\n\n return\n\n def getDiretorioAplicativoUtilidadesSajProcuradorias(self):\n diretorio = os.path.join(sys.path[0], sys.argv[0])\n return diretorio.replace('/', '\\\\').replace('\\\\UtilidadesSAJProcuradorias.exe', '').replace('\\\\UtilidadesSAJProcuradorias.py', '')\n\n def eventoF2(self):\n self.operacao = OperacoesConsole()\n dir = self.getDiretorioAplicativoUtilidadesSajProcuradorias()\n self.operacao.abroSistema(self.config.getSectionOptionValue(dir, 'diretoriosDestinos', 'dirservidor'), self.config.getSectionOptionValue(dir, 'diretoriosDestinos', 'dirclientepgenet'), self.config.getSectionOptionValue(dir, 'executaveis', 'clientepgenet'))\n\n def eventoF3(self):\n self.operacao = OperacoesConsole()\n dir = self.getDiretorioAplicativoUtilidadesSajProcuradorias()\n self.operacao.abroSistema(self.config.getSectionOptionValue(dir, 'diretoriosDestinos', 'dirservidor'), self.config.getSectionOptionValue(dir, 'diretoriosDestinos', 'dirclienteadm'), self.config.getSectionOptionValue(dir, 'executaveis', 'clienteadm'))\n\n def eventoF4(self):\n self.operacao = OperacoesConsole()\n dir = self.getDiretorioAplicativoUtilidadesSajProcuradorias()\n self.operacao.limpoCache(self.config.getSectionOptionValue(dir, 'diretoriosDestinos', 'dirservidor'))\n\n def eventoF5(self):\n self.operacao = OperacoesConsole()\n result = self.operacao.fechoSistema()\n self.monitoramento(result, 'ok')\n\n def eventoF6(self):\n dir = self.getDiretorioAplicativoUtilidadesSajProcuradorias()\n self.spcfg.setValue(self.config.getSectionOptionValue(dir, 'diretoriosDestinos', 'dirservidor'), 'Database', 'tipobanco','ORACLE')\n self.validaCampos()\n\n def eventoF7(self):\n dir = self.getDiretorioAplicativoUtilidadesSajProcuradorias()\n self.spcfg.setValue(self.config.getSectionOptionValue(dir, 'diretoriosDestinos', 'dirservidor'), 'Database', 'tipobanco', 'SQLSERVER')\n self.validaCampos()\n\n def versoesDisponiveisParaCopia(self):\n dir = self.getDiretorioAplicativoUtilidadesSajProcuradorias()\n self.versoes = versoes.Versoes()\n self.listaVersoes.delete(0, END)\n for i in self.versoes.getListaVersoesFeaturesDisponivel(dir):\n if self.vBusca.get() in i:\n self.listaVersoes.insert(END, 'Features - '+i)\n\n for i in self.versoes.getListaVersoesInternaDisponivel(dir):\n if self.vBusca.get() in i:\n self.listaVersoes.insert(END, 'Interna - ' + i)\n\n def setAlias(self, alias):\n self.spcfg.setValue(self.configuracaoAuxiliar('localServer'), 'Database', 'alias', alias)\n self.validaCampos()\n\n def copyPasta(self):\n\n copyTreeGUI(self.versoes.getSrc(self.getDiretorioAplicativoUtilidadesSajProcuradorias(), self.listaVersoes.get(ACTIVE)),\n ['C:\\\\SAJ\\PGE.net\\\\ser', 'C:\\\\SAJ\\PGE.net', 'C:\\\\SAJ\\PGE.net'],\n self.versoes.getVersaoTratada(self.getDiretorioAplicativoUtilidadesSajProcuradorias(), self.listaVersoes.get(ACTIVE)))\n\nroot = Tk()\nroot.config(bg='#CCCCCC')\nroot.geometry('493x330+0+0')\nroot.title(\"Atualiza versões\")\n\nUtilidadesSAJProcuradoria(root)\nroot.mainloop()\n","sub_path":"View/UtilidadesSAJProcuradorias.py","file_name":"UtilidadesSAJProcuradorias.py","file_ext":"py","file_size_in_byte":11895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"584842369","text":"from django.conf.urls import url\nfrom . import views\nfrom multiurl import multiurl\n\napp_name = 'investimento'\nurlpatterns = [\n multiurl(\n url(r'^', views.investimento, name='investimento'),\n url(r'^', views.resgatar, name='resgate'),\n )\n]\n","sub_path":"apps/operacoes/investimento/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"131458857","text":"# Imports the required modules for the game\ntry:\n import simplegui\nexcept ImportError:\n import SimpleGUICS2Pygame.simpleguics2pygame as simplegui\nimport random\nimport math\n\nCANVAS_WIDTH = 920\nCANVAS_HEIGHT = 700\nWIDTH = CANVAS_WIDTH\nHEIGHT = CANVAS_HEIGHT\nBATTLE_ON = False\nVERTICAL_DIVIDERS = 10\nHORIZONTAL_DIVIDERS = 10\nPLAYER_ALIVE = True\nmap = 0\nscore = 0\nbeenHealed = False\nIMG_ROT = 0\n\n\n# A class for creating and manipulating vectors\nclass Vector:\n # Initialiser\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n # Returns a string representation of the vector\n def __str__(self):\n return \"(\" + str(self.x) + \",\" + str(self.y) + \")\"\n\n # Tests the equality of this vector and another\n def __eq__(self, other):\n return self.x == other.x and self.y == other.y\n\n # Tests the inequality of this vector and another\n def __ne__(self, other):\n return not self.__eq__(other)\n\n # Returns a tuple with the point corresponding to the vector\n def getP(self):\n return (self.x, self.y)\n\n # Returns a copy of the vector\n def copy(self):\n return Vector(self.x, self.y)\n\n # Adds another vector to this vector\n def add(self, other):\n self.x += other.x\n self.y += other.y\n return self\n\n # Creates a copy of the current vector and adds another\n def __add__(self, other):\n return self.copy().add(other);\n\n # Negates the vector (makes it point in the opposite direction)\n def negate(self):\n return self.multiply(-1)\n\n # Creates a copy of the current vector and negates it\n def __neg__(self):\n return self.copy().negate()\n\n # Subtracts another vector from this vector\n def subtract(self, other):\n return self.add(-other)\n\n # Creates a copy of the current vector and subtracts another\n def __sub__(self, other):\n return self.copy().subtract(other)\n\n # Multiplies the vector by a scalar\n def multiply(self, k):\n self.x *= k\n self.y *= k\n return self\n\n # Creates a copy of the current vector and multiplies it by another\n def __mul__(self, k):\n return self.copy().multiply(k)\n\n # Copy of the above ^^^^\n def __rmul__(self, k):\n return self.copy().multiply(k)\n\n # Divides the vector by a scalar\n def divide(self, k):\n return self.multiply(1 / k)\n\n # Creates a copy of the current vector and divides it by another\n def __truediv__(self, k):\n return self.copy().divide(k)\n\n # Normalizes the vector\n def normalize(self):\n return self.divide(self.length())\n\n # Returns a normalized version of the vector\n def getNormalized(self):\n return self.copy().normalize()\n\n # Returns the dot product of this vector with another one\n def dot(self, other):\n return self.x * other.x + self.y * other.y\n\n # Returns the length of the vector\n def length(self):\n return math.sqrt(self.x ** 2 + self.y ** 2)\n\n # Returns the squared length of the vector\n def lengthSquared(self):\n return self.x ** 2 + self.y ** 2\n\n # Reflect this vector on a normal\n def reflect(self, normal):\n n = normal.copy()\n n.multiply(2 * self.dot(normal))\n self.subtract(n)\n return self\n\n\n# Manages the keyboard input\nclass Keyboard:\n # Initialises the variables to determine whether a key is pressed\n def __init__(self):\n self.right = False\n self.left = False\n self.up = False\n self.down = False\n self.run = False\n\n # If a key is down, set its corresponding variable to True\n def keyDown(self, key):\n if key == simplegui.KEY_MAP['right']:\n self.right = True\n if key == simplegui.KEY_MAP['left']:\n self.left = True\n if key == simplegui.KEY_MAP['up']:\n self.up = True\n if key == simplegui.KEY_MAP['down']:\n self.down = True\n if key == simplegui.KEY_MAP['space']:\n self.run = True\n\n # If a key is up, set its corresponding variable to False\n def keyUp(self, key):\n if key == simplegui.KEY_MAP['right']:\n self.right = False\n if key == simplegui.KEY_MAP['left']:\n self.left = False\n if key == simplegui.KEY_MAP['up']:\n self.up = False\n if key == simplegui.KEY_MAP['down']:\n self.down = False\n if key == simplegui.KEY_MAP['space']:\n self.run = False\n\n\n# Creates each map and collisions within the map, and adds any relevant NPCs\nclass Map:\n # Initialises the maps and collisions\n def __init__(self, ImageWidthHeight):\n global map\n global BATTLE_ON\n\n self.mushroom = \"https://orig00.deviantart.net/d700/f/2008/315/4/e/red_mushroom_by_slashingoverlord.png\"\n self.M0LevelUp = Pokeballs(self.mushroom, (256, 256), (590, 460))\n self.M1LevelUp = Pokeballs(self.mushroom, (256, 256), (675, 575))\n self.M2LevelUp = Pokeballs(self.mushroom, (256, 256), (780, 570))\n self.M4LevelUp = Pokeballs(self.mushroom, (256, 256), (725, 290))\n self.M5LevelUp = Pokeballs(self.mushroom, (256, 256), (250, 200))\n self.M6LevelUp = Pokeballs(self.mushroom, (256, 256), (460, 500))\n\n self.M0LevelUpHB = collision(580, 600, 430, 4700)\n self.M0LevelUp1 = False\n self.M1LevelUpHB = collision(665, 685, 565, 585)\n self.M1LevelUp1 = False\n self.M2LevelUpHB = collision(770, 790, 560, 580)\n self.M2LevelUp1 = False\n self.M4LevelUpHB = collision(715, 735, 280, 300)\n self.M4LevelUp1 = False\n self.M5LevelUpHB = collision(240, 260, 190, 210)\n self.M5LevelUp1 = False\n self.M6LevelUpHB = collision(450, 480, 490, 510)\n self.M6LevelUp1 = False\n self.M3LevelUp1 = False\n\n # Finaly Boss\n self.enemyRed = [\n Enemy(\"OnionKnight\", random.randint(140, 160), random.randint(130, 150), random.randint(140, 160),\n random.randint(130, 150), (2, 0)),\n Enemy(\"WaterBowser\", random.randint(140, 160), random.randint(140, 160), random.randint(140, 160),\n random.randint(150, 170), (8, 0)),\n Enemy(\"MyMixtape\", random.randint(120, 140), random.randint(140, 160), random.randint(150, 170),\n random.randint(140, 160), (5, 0))]\n\n # Level Bosses\n self.enemyBoss4 = [Enemy(\"MeTOO\", random.randint(125, 145), random.randint(115, 135), random.randint(125, 145),\n random.randint(115, 135), (24, 5)),\n Enemy(\"Ditto\", random.randint(125, 145), random.randint(125, 145), random.randint(125, 145),\n random.randint(135, 155), (0, 6)),\n Enemy(\"Diabetes\", random.randint(105, 125), random.randint(125, 145),\n random.randint(135, 155),\n random.randint(125, 145), (23, 5))]\n\n self.enemyBoss3 = [\n Enemy(\"PuppyDaddySenpai\", random.randint(115, 135), random.randint(105, 125), random.randint(115, 135),\n random.randint(105, 125), (23, 9)),\n Enemy(\"RainbowBirb\", random.randint(115, 135), random.randint(115, 135), random.randint(115, 135),\n random.randint(125, 145), (24, 9)),\n Enemy(\"ArmouredCore\", random.randint(95, 115), random.randint(115, 135), random.randint(125, 145),\n random.randint(115, 135), (22, 9))]\n\n self.enemyBoss2 = [\n Enemy(\"Lipstick\", random.randint(105, 125), random.randint(95, 115), random.randint(105, 125),\n random.randint(95, 115), (8, 15)),\n Enemy(\"BluFsh\", random.randint(105, 125), random.randint(105, 125), random.randint(105, 125),\n random.randint(115, 135), (6, 15)),\n Enemy(\"Landlord\", random.randint(85, 105), random.randint(105, 125), random.randint(115, 135),\n random.randint(105, 125), (7, 15))]\n\n self.enemyBoss1 = [Enemy(\"WetDoggo\", random.randint(95, 115), random.randint(85, 105), random.randint(95, 115),\n random.randint(85, 105), (8, 5)),\n Enemy(\"SpikyDoggo\", random.randint(95, 115), random.randint(95, 115),\n random.randint(95, 115),\n random.randint(105, 125), (9, 5)),\n Enemy(\"BurntDogo\", random.randint(75, 95), random.randint(95, 115), random.randint(105, 125),\n random.randint(95, 115), (10, 5))]\n\n # Level Trainers\n self.enemyTrainer5 = [Enemy(\"EdgeLord\", 120, 120, 120, 120, (8, 14)),\n Enemy(\"CatchTheseHands\", 130, 105, 120, 125, (11, 8)),\n Enemy(\"LiquidSnek\", 130, 100, 180, 100, (4, 5)),\n Enemy(\"JoMama\", 180, 110, 110, 100, (17, 5)),\n Enemy(\"MomsSpaghetti\", 125, 120, 120, 125, (10, 15))]\n\n self.enemyTrainer4 = [Enemy(\"PunchingBag\", 200, 50, 175, 25, (1, 8)),\n Enemy(\"American\", 110, 100, 140, 80, (13, 11)),\n Enemy(\"HeavyMetal\", 110, 110, 110, 110, (5, 12)),\n Enemy(\"RuleThirtyFour\", 90, 120, 90, 120, (6, 11)),\n Enemy(\"Thorny\", 100, 120, 120, 90, (8, 1))]\n\n self.enemyTrainer3 = [Enemy(\"Smerfing\", 95, 85, 85, 120, (4, 2)),\n Enemy(\"MetaBroke\", 100, 100, 100, 100, (0, 15)),\n Enemy(\"NotLow\", 90, 110, 110, 180, (20, 10)),\n Enemy(\"Dumbo\", 150, 65, 100, 75, (6, 9)), Enemy(\"TeenMom\", 180, 55, 70, 55, (16, 9))]\n\n self.enemyTrainer2 = [Enemy(\"ShuckMe\", 125, 50, 150, 50, (12, 8)),\n Enemy(\"CrimsonChin\", 100, 70, 70, 70, (7, 8)),\n Enemy(\"KimK\", 70, 20, 100, 100, (23, 4)),\n Enemy(\"Shrooms\", 85, 85, 85, 85, (10, 11)),\n Enemy(\"BrainyBacon\", 100, 80, 70, 80, (0, 13))]\n\n self.enemyTrainer1 = [Enemy(\"Bork\", 50, 70, 50, 70, (8, 2)), Enemy(\"DayCare\", 80, 30, 100, 30, (14, 4)),\n Enemy(\"Thicc\", 70, 60, 55, 55, (14, 14)),\n Enemy(\"HepMe\", 80, 75, 60, 45, (23, 7)), Enemy(\"MrSteelYoBirb\", 50, 70, 65, 60, (1, 9))]\n\n # All starters balanced to 310 base total stats\n self.playerMon = [Player1(\"OnionKnight\", 75, 75, 90, 70, (2, 0)), Player1(\"MyMixtape\", 70, 90, 75, 75, (5, 0)),\n Player1(\"WaterBowser\", 75, 80, 80, 75, (8, 0)),\n Player1(\"StingingPetals\", 80, 70, 80, 80, (3, 6)),\n Player1(\"CharGrilled\", 75, 80, 75, 80, (6, 6)),\n Player1(\"Waterbator\", 80, 75, 80, 75, (9, 6)),\n Player1(\"FeafyIsHere\", 70, 85, 70, 85, (3, 10)),\n Player1(\"BlazingChicken\", 80, 85, 70, 75, (6, 10)),\n Player1(\"MySwamp\", 85, 85, 80, 60, (9, 10))]\n\n self.a = Player1(\"Choose Mon\", 1, 1, 1, 1, (6, 5))\n self.b = Enemy(\"Bork\", 1, 1, 1, 1, (0, 0))\n self.a.enemy = self.b\n self.b.player = self.a\n\n # map = 3 #Change this variable to the map that you want to go to.\n self.begin = True\n self.begin2 = False\n\n self.url = simplegui.load_image(\"https://image.ibb.co/jdF3sc/Capture_d_cran_2018_02_28_12_35_14.png\")\n self.ImageWidthHeight = ImageWidthHeight\n self.ImageCenter = (ImageWidthHeight[0] / 2, ImageWidthHeight[1] / 2)\n self.CenterDest = (WIDTH / 2, HEIGHT / 2)\n self.DimDest = (WIDTH, HEIGHT)\n\n self.M0tree1 = collision(540, 590, 280, 380)\n self.M0tree2 = collision(435, 490, 165, 270)\n self.M0tree3 = collision(380, 430, 220, 325)\n self.M0tree4 = collision(595, 648, 165, 270)\n\n self.M1tree1 = collision(700, 920, 0, 150)\n self.M1tree2 = collision(375, 430, 220, 920)\n self.M1tree3 = collision(485, 600, 220, 920)\n self.M1tree4 = collision(0, 105, 0, 475)\n self.M1tree5 = collision(325, 380, 0, 150)\n\n self.M2tree1 = collision(100, 160, 475, 490)\n self.M2tree2 = collision(160, 215, 530, 640)\n self.M2tree3 = collision(270, 330, 260, 370)\n self.M2tree4 = collision(160, 225, 0, 45)\n\n self.M3tree1 = collision(160, 216, 370, 470)\n self.M3tree2 = collision(220, 275, 420, 520)\n self.M3tree3 = collision(325, 380, 370, 470)\n self.M3tree4 = collision(485, 540, 420, 520)\n self.M3tree5 = collision(590, 650, 370, 470)\n self.M3tree6 = collision(700, 750, 370, 470)\n\n self.M0border1 = collision(0, 646, 0, 155)\n self.M0border2 = collision(0, 150, 0, HEIGHT)\n self.M0border3 = collision(0, WIDTH, 540, HEIGHT)\n self.M0border4 = collision(760, WIDTH, 0, HEIGHT)\n self.M0border5 = collision(700, WIDTH, 0, 100)\n\n self.M1border1 = collision(756, WIDTH, 0, HEIGHT)\n self.M1border2 = collision(700, 920, 300, 520)\n self.M1border3 = collision(0, 920, 0, 90)\n self.M1border4 = collision(0, 220, 0, 200)\n self.M1border5 = collision(0, 160, 0, 360)\n self.M1border6 = collision(220, 600, 355, 700)\n self.M1border7 = collision(275, 600, 300, 355)\n self.M1border8 = collision(333, 600, 255, 300)\n self.M1border9 = collision(0, 240, 537, 700)\n\n self.M2border1 = collision(810, 920, 0, 470)\n self.M2border2 = collision(810, 920, 560, 700)\n self.M2border3 = collision(870, 920, 535, 700)\n self.M2border4 = collision(0, 55, 420, 700)\n self.M2border5 = collision(0, 55, 0, 370)\n self.M2border6 = collision(0, 920, 0, 20)\n self.M2border7 = collision(0, 920, 690, 700)\n self.M2border8 = collision(380, 430, 310, 415)\n\n self.M3border1 = collision(855, 920, 0, 360)\n self.M3border2 = collision(855, 920, 415, 700)\n self.M3border3 = collision(0, 50, 0, 300)\n self.M3border4 = collision(0, 50, 365, 700)\n self.M3border5 = collision(0, 920, 0, 45)\n self.M3border6 = collision(0, 920, 630, 700)\n\n self.Miborder1 = collision(815, 920, 0, 700)\n self.Miborder2 = collision(0, 115, 0, 700)\n self.Miborder3 = collision(0, 920, 0, 260)\n self.Miborder4 = collision(0, 920, 415, 700)\n\n self.M4border1 = collision(865, 920, 0, 290)\n self.M4border2 = collision(865, 920, 360, 700)\n self.M4border3 = collision(0, 650, 0, 200)\n self.M4border4 = collision(0, 920, 0, 140)\n self.M4border5 = collision(0, 545, 0, 310)\n self.M4border6 = collision(0, 490, 0, 365)\n self.M4border7 = collision(0, 100, 0, 700)\n self.M4border8 = collision(0, 170, 530, 700)\n self.M4border9 = collision(0, 920, 595, 700)\n self.M4border10 = collision(800, 920, 525, 700)\n\n self.M5border1 = collision(0, 230, 0, 210)\n self.M5border2 = collision(0, 285, 0, 150)\n self.M5border3 = collision(0, 920, 0, 100)\n self.M5border4 = collision(865, 920, 0, 700)\n self.M5border5 = collision(0, 65, 0, 700)\n self.M5border6 = collision(0, 220, 575, 700)\n self.M5border7 = collision(270, 920, 580, 700)\n self.M5border8 = collision(325, 920, 530, 700)\n self.M5border10 = collision(535, 920, 475, 700)\n self.M5border11 = collision(480, 810, 155, 420)\n self.M5border12 = collision(115, 260, 260, 410)\n self.M5border13 = collision(115, 210, 260, 520)\n\n self.M6border1 = collision(650, 920, 0, 700)\n self.M6border2 = collision(0, 275, 0, 920)\n self.M6border3 = collision(0, 920, 0, 150)\n self.M6border4 = collision(0, 385, 200, 365)\n self.M6border5 = collision(540, 920, 200, 365)\n self.M6border6 = collision(0, 380, 580, 700)\n self.M6border7 = collision(430, 920, 580, 700)\n\n self.Ball = \"http://pixelartmaker.com/art/797ff81281c7a32.png\"\n self.M0Ball1 = Pokeballs(self.Ball, (1700, 1700), (300, 245))\n self.M0Ball2 = Pokeballs(self.Ball, (1700, 1700), (250, 245))\n self.M0Ball3 = Pokeballs(self.Ball, (1700, 1700), (350, 245))\n\n self.M0BallBorder1 = collision(225, 275, 216, 250)\n self.M0BallBorder2 = collision(275, 330, 216, 250)\n self.M0BallBorder3 = collision(330, 375, 216, 250)\n\n self.Nurse = \"https://orig00.deviantart.net/c2c9/f/2013/094/a/8/joy_by_innermobius-d60e19z.png\"\n self.M0Nurse1 = NurseJoy(self.Nurse, (272 / 4, 288 / 4), (730, 135))\n self.M1Nurse1 = NurseJoy(self.Nurse, (272 / 4, 288 / 4), (240, 130))\n self.M2Nurse1 = NurseJoy(self.Nurse, (272 / 4, 288 / 4), (500, 90))\n self.MiNurse1 = NurseJoy(self.Nurse, (272 / 4, 288 / 4), (730, 290))\n self.M4Nurse1 = NurseJoy(self.Nurse, (272 / 4, 288 / 4), (430, 500))\n self.M5Nurse1 = NurseJoy(self.Nurse, (272 / 4, 288 / 4), (80, 300))\n\n self.Trainer0 = \"https://orig00.deviantart.net/c9d6/f/2011/137/2/2/kymotonian_wally_complete_by_rafael_animal-d3gk331.png\"\n self.M0Trainer = Trainers(self.Trainer0, (64, 64), (675, 70))\n self.M0TrainerBorder = collision(645, 705, 20, 100)\n self.fight = False\n\n self.Trainer1 = \"https://i.imgur.com/FzG6j3L.png\"\n self.M1Trainer = Trainers(self.Trainer1, (64, 64), (65, 500))\n self.M1TrainerBorder = collision(0, 75, 470, 540)\n self.fight1 = False\n\n self.Trainer2 = \"https://i.imgur.com/sQ2Dd0e.png\"\n self.M2Trainer = Trainers(self.Trainer2, (64, 64), (565, 500))\n self.M2TrainerBorder = collision(555, 580, 470, 530)\n self.fight2 = False\n\n self.Trainer21 = \"https://i.imgur.com/b4Mtzsv.png\"\n self.M21Trainer = Trainers(self.Trainer21, (64, 64), (630, 145))\n self.M21TrainerBorder = collision(580, 680, 120, 160)\n self.fight21 = False\n\n self.Trainer3 = \"https://i.imgur.com/FqzfZzk.png\"\n self.M3Trainer = Trainers(self.Trainer3, (64, 64), (280, 610))\n self.M3TrainerBorder = collision(265, 295, 580, 700)\n self.fight3 = False\n\n self.Trainer31 = \"https://i.imgur.com/dky9sEx.png\"\n self.M31Trainer = Trainers(self.Trainer31, (64, 64), (785, 215))\n self.M31TrainerBorder = collision(765, 810, 200, 230)\n self.fight31 = False\n\n self.Trainer4 = \"https://i.imgur.com/NOBDCGR.png\"\n self.M4Trainer = Trainers(self.Trainer4, (64, 64), (675, 240))\n self.M4TrainerBorder = collision(650, 700, 200, 260)\n self.fight4 = False\n\n self.Trainer41 = \"https://i.imgur.com/NOBDCGR.png\"\n self.M41Trainer = Trainers(self.Trainer41, (64, 64), (241, 385))\n self.M41TrainerBorder = collision(220, 255, 370, 400)\n self.fight41 = False\n\n self.Trainer5 = \"https://cdn.discordapp.com/attachments/416163882189717544/421340540722872321/brenddownvec.png\"\n self.M5Trainer = Trainers(self.Trainer5, (64, 64), (405, 105))\n self.M5TrainerBorder = collision(385, 430, 0, 125)\n self.fight5 = False\n\n self.Trainer51 = \"https://i.imgur.com/HXZ5hUp.png\"\n self.M51Trainer = Trainers(self.Trainer51, (64, 64), (570, 410))\n self.M51TrainerBorder = collision(550, 600, 320, 425)\n self.fight51 = False\n\n self.Trainer6 = \"https://cdn.discordapp.com/attachments/416163882189717544/421339793394237450/redvec.png\"\n self.M6Trainer = Trainers(self.Trainer6, (64, 64), (460, 175))\n self.M6TrainerBorder = collision(410, 515, 0, 200)\n self.fight6 = False\n\n self.M0NurseBorder = collision(700, 755, 100, 164)\n self.M1NurseBorder = collision(220, 265, 90, 150)\n self.M2NurseBorder = collision(475, 525, 50, 110)\n self.MiNurseBorder = collision(700, 750, 260, 300)\n self.M4NurseBorder = collision(400, 460, 470, 530)\n self.M5NurseBorder = collision(0, 120, 270, 330)\n\n self.M0nextMap = collision(640, 700, -10, 40)\n self.M1nextMap = collision(0, 30, 350, 540)\n self.M2nextMap = collision(0, 30, 350, 540)\n self.M3nextMap = collision(0, 20, 295, 370)\n self.M4nextMap = collision(220, 270, 360, 367)\n self.M5nextMap = collision(375, 435, 100, 102)\n\n self.M1previousMap = collision(550, 800, 690, 700)\n self.M2previousMap = collision(890, 920, 450, 550)\n self.M3previousMap = collision(885, 920, 300, 420)\n self.MipreviousMap = collision(650, 700, 415, 475)\n self.M4previousMap = collision(895, 920, 250, 370)\n self.M5previousMap = collision(215, 275, 650, 700)\n self.M6previousMap = collision(375, 435, 640, 700)\n\n self.M2river1 = collision(665, 850, 0, 80)\n self.M2river2 = collision(600, 740, 55, 250)\n self.M2river3 = collision(230, 315, 0, 90)\n self.M2river4 = collision(230, 420, 50, 90)\n self.M2river5 = collision(230, 420, 140, 185)\n self.M2river6 = collision(275, 425, 185, 300)\n self.M2river7 = collision(340, 635, 200, 350)\n self.M2river8 = collision(550, 700, 240, 300)\n self.M2river9 = collision(450, 580, 340, 390)\n self.M2river10 = collision(500, 580, 350, 470)\n self.M2river11 = collision(500, 645, 430, 470)\n self.M2river12 = collision(500, 645, 525, 700)\n self.M2river13 = collision(440, 700, 585, 920)\n self.M2river14 = collision(390, 700, 645, 920)\n\n self.M3house1 = collision(485, 760, 100, 300)\n self.M3house2 = collision(160, 435, 100, 300)\n\n self.M3door1 = collision(325, 385, 300, 305)\n self.M3door2 = collision(645, 700, 300, 305)\n\n self.Mifurniture1 = collision(700, 920, 300, 375)\n self.Mifurniture2 = collision(0, 655, 0, 300)\n self.Mifurniture3 = collision(0, 165, 0, 700)\n self.Mifurniture4 = collision(215, 270, 0, 360)\n self.Mifurniture5 = collision(325, 380, 0, 360)\n\n self.M0NPC1 = NPCs((470, 345),\n \"https://orig00.deviantart.net/9f10/f/2011/137/a/1/kymotonian_birch_complete_by_rafael_animal-d3gk2yc.png\",\n (256, 256), 0, 0)\n self.M0MPCBorder1 = collision(445, 495, 310, 365)\n\n self.M1NPC1 = NPCs((731, 200),\n \"https://orig00.deviantart.net/a375/f/2011/137/a/3/kymotonian_bugcatcher_complete_by_rafael_animal-d3gk7fw.png\",\n (256, 256), 0, 1)\n self.M1MPCBorder1 = collision(700, 920, 150, 220)\n self.fight11 = False\n self.M1NPC2 = NPCs((460, 250),\n \"https://orig00.deviantart.net/2416/f/2011/137/e/b/kyle_rival_mother_complete__by_rafael_animal-d3gkcca.png\",\n (256, 256), 0, 3)\n self.M1MPCBorder2 = collision(430, 485, 225, 255)\n self.fight12 = False\n self.M1NPC3 = NPCs((135, 400),\n \"https://orig00.deviantart.net/a375/f/2011/137/a/3/kymotonian_bugcatcher_complete_by_rafael_animal-d3gk7fw.png\",\n (256, 256), 0, 2)\n self.M1MPCBorder3 = collision(100, 160, 360, 400)\n self.fight13 = False\n\n self.M2NPC1 = NPCs((720, 340),\n \"https://orig00.deviantart.net/1b0f/f/2011/137/d/a/kymotonian_roxanne_complete_by_rafael_animal-d3gkbqi.png\",\n (256, 256), 0, 0)\n self.M2MPCBorder1 = collision(700, 740, 320, 360)\n self.fight24 = False\n self.M2NPC2 = NPCs((250, 610),\n \"https://orig00.deviantart.net/6676/f/2011/137/5/f/kymotonian_norman_complete__by_rafael_animal-d3gk3tf.png\",\n (256, 256), 0, 3)\n self.M2MPCBorder2 = collision(230, 270, 580, 625)\n self.fight22 = False\n self.M2NPC3 = NPCs((340, 160),\n \"https://orig00.deviantart.net/1ce4/f/2011/137/1/b/kyle_hoenn_swimmer_2_complete_by_rafael_animal-d3gk589.png\",\n (254, 254), 0, 3)\n self.M2MPCBorder3 = collision(300, 330, 139, 160)\n self.fight23 = False\n\n self.M3NPC1 = NPCs((750, 520),\n \"https://orig00.deviantart.net/30aa/f/2011/137/f/a/kymotonian_shop__s_boy_complete_by_rafael_animal-d3gk754.png\",\n (256, 256), 0, 3)\n self.M3MPCBorder1 = collision(720, 775, 475, 535)\n self.M3NPC2 = NPCs((455, 50),\n \"https://orig00.deviantart.net/7bcb/f/2012/304/5/9/young_boy_ow___bw_style_by_putillabarata-d5jjgkf.png\",\n (256, 256), 0, 0)\n self.M3MPCBorder2 = collision(425, 475, 0, 80)\n self.M3NPC3 = NPCs((250, 310),\n \"https://orig00.deviantart.net/ef9e/f/2011/137/4/9/pkmn_rse___normal_boy_1_by_rafael_animal-d3gk6mu.png\",\n (256, 256), 0, 0)\n self.M3MPCBorder3 = collision(230, 275, 300, 335)\n\n self.M4NPC2 = NPCs((725, 515),\n \"https://orig00.deviantart.net/a784/f/2011/137/4/2/kymotonian_magma_grunt_compl__by_rafael_animal-d3gk4k9.png\",\n (256, 256), 0, 3)\n self.M4MPCBorder2 = collision(700, 740, 485, 535)\n self.fight42 = False\n\n self.M5NPC2 = NPCs((225, 450),\n \"https://orig00.deviantart.net/a784/f/2011/137/4/2/kymotonian_magma_grunt_compl__by_rafael_animal-d3gk4k9.png\",\n (256, 256), 0, 2)\n self.M5PCBorder2 = collision(210, 250, 410, 460)\n self.fight52 = False\n self.M5NC3 = NPCs((845, 120),\n \"https://orig00.deviantart.net/ee71/f/2012/105/9/c/bw2_girl_overworld__rpgxp__by_rafael_animal-d4w9gsm.png\",\n (256, 256), 0, 1)\n self.M5MCBorder3 = collision(810, 920, 0, 145)\n self.fight53 = False\n\n self.MiNPC2 = NPCs((415, 320),\n \"https://orig00.deviantart.net/ee71/f/2012/105/9/c/bw2_girl_overworld__rpgxp__by_rafael_animal-d4w9gsm.png\",\n (256, 256), 0, 0)\n self.MiPCBorder2 = collision(380, 425, 300, 350)\n\n # Draws items relating to the map on the canvas\n def draw(self, canvas):\n\n if (map == 0):\n canvas.draw_image(\n simplegui.load_image(\"https://image.ibb.co/jdF3sc/Capture_d_cran_2018_02_28_12_35_14.png\"),\n self.ImageCenter, self.ImageWidthHeight, self.CenterDest, self.DimDest)\n self.M0Ball1.draw(canvas)\n self.M0Ball2.draw(canvas)\n self.M0Ball3.draw(canvas)\n self.M0Nurse1.draw(canvas)\n if (self.fight is False):\n self.M0Trainer.draw(canvas)\n if (self.M0LevelUp1 is False):\n self.M0LevelUp.draw(canvas)\n self.M0NPC1.draw(canvas)\n\n if (map == 1):\n self.url = simplegui.load_image(\"https://image.ibb.co/kLwidS/Capture_d_e_cran_2018_03_02_a_22_19_59.png\")\n self.ImageWidthHeight = (919, 700)\n canvas.draw_image(self.url, self.ImageCenter, self.ImageWidthHeight, self.CenterDest, self.DimDest)\n self.M1Nurse1.draw(canvas)\n if (self.fight1 is False):\n self.M1Trainer.draw(canvas)\n if (self.fight11 is False):\n self.M1NPC1.draw(canvas)\n if (self.fight12 is False):\n self.M1NPC2.draw(canvas)\n if (self.fight13 is False):\n self.M1NPC3.draw(canvas)\n if (self.M1LevelUp1 is False):\n self.M1LevelUp.draw(canvas)\n\n if (map == 2):\n self.url = simplegui.load_image(\"https://image.ibb.co/gUcoiS/Capture_d_cran_2018_03_06_14_41_51.png\")\n self.ImageWidthHeight = (919, 700)\n canvas.draw_image(self.url, self.ImageCenter, self.ImageWidthHeight, self.CenterDest, self.DimDest)\n self.M2Nurse1.draw(canvas)\n if (self.fight2 is False):\n self.M2Trainer.draw(canvas)\n if (self.fight21 is False):\n self.M21Trainer.draw(canvas)\n\n if (self.fight24 is False):\n self.M2NPC1.draw(canvas)\n if (self.fight22 is False):\n self.M2NPC2.draw(canvas)\n if (self.fight23 is False):\n self.M2NPC3.draw(canvas)\n if (self.M2LevelUp1 is False):\n self.M2LevelUp.draw(canvas)\n\n if (map == 3):\n self.url = simplegui.load_image(\"https://image.ibb.co/gx5o2n/Pokemon_Ville.png\")\n self.ImageWidthHeight = (819, 628)\n canvas.draw_image(simplegui.load_image(\"https://image.ibb.co/gx5o2n/Pokemon_Ville.png\"), (819 / 2, 628 / 2),\n (819, 628), self.CenterDest, self.DimDest)\n if (self.fight3 is False):\n self.M3Trainer.draw(canvas)\n if (self.fight31 is False):\n self.M31Trainer.draw(canvas)\n self.M3NPC1.draw(canvas)\n self.M3NPC2.draw(canvas)\n self.M3NPC3.draw(canvas)\n # if (self.M3LevelUp1 is False):\n # self.M3LevelUp.draw(canvas)\n\n if (map == -1):\n self.url = simplegui.load_image(\"https://image.ibb.co/dLoYJS/sd.png\")\n self.ImageWidthHeight = (819, 628)\n canvas.draw_image(simplegui.load_image(\"https://image.ibb.co/dLoYJS/sd.png\"), (815 / 2, 625 / 2),\n (815, 625), self.CenterDest, self.DimDest)\n self.MiNurse1.draw(canvas)\n self.MiNPC2.draw(canvas)\n\n if (map == 4):\n self.url = simplegui.load_image(\"https://image.ibb.co/fZGm7n/Snow.png\")\n self.ImageWidthHeight = (1020, 780)\n canvas.draw_image(simplegui.load_image(\"https://image.ibb.co/fZGm7n/Snow.png\"), (1020 / 2, 780 / 2),\n (1020, 780), self.CenterDest, self.DimDest)\n if (self.fight4 is False):\n self.M4Trainer.draw(canvas)\n if (self.fight41 is False):\n self.M41Trainer.draw(canvas)\n if (self.fight42 is False):\n self.M4NPC2.draw(canvas)\n if (self.M4LevelUp1 is False):\n self.M4LevelUp.draw(canvas)\n self.M4Nurse1.draw(canvas)\n\n if (map == 5):\n self.url = simplegui.load_image(\"https://image.ibb.co/cZicDS/Cave.png\")\n self.ImageWidthHeight = (1020, 775)\n canvas.draw_image(simplegui.load_image(\"https://image.ibb.co/cZicDS/Cave.png\"), (1020 / 2, 775 / 2),\n (1020, 775), self.CenterDest, self.DimDest)\n if (self.fight5 is False):\n self.M5Trainer.draw(canvas)\n if (self.fight51 is False):\n self.M51Trainer.draw(canvas)\n if (self.fight52 is False):\n self.M5NPC2.draw(canvas)\n if (self.fight53 is False):\n self.M5NC3.draw(canvas)\n if (self.M5LevelUp1 is False):\n self.M5LevelUp.draw(canvas)\n self.M5Nurse1.draw(canvas)\n\n if (map == 6):\n self.url = simplegui.load_image(\"https://image.ibb.co/iESqHn/Hell.png\")\n canvas.draw_image(simplegui.load_image(\"https://image.ibb.co/iESqHn/Hell.png\"), (1020 / 2, 775 / 2),\n (1020, 775), self.CenterDest, self.DimDest)\n if (self.fight6 is False):\n self.M6Trainer.draw(canvas)\n if (self.M6LevelUp1 is False):\n self.M6LevelUp.draw(canvas)\n\n # Updates the map whenever the method is called\n def update(self, canvas):\n global map\n global BATTLE_ON\n global score\n if (map == 0):\n\n if (self.M0LevelUp1 is False):\n if (self.M0LevelUpHB.isInside()):\n self.a.level_up()\n score = score - 2\n self.M0LevelUp1 = True\n\n self.M0tree1.isInside()\n self.M0tree2.isInside()\n self.M0tree3.isInside()\n self.M0tree4.isInside()\n\n self.M0border1.isInside()\n self.M0border2.isInside()\n self.M0border3.isInside()\n self.M0border4.isInside()\n self.M0border5.isInside()\n\n if (self.M0BallBorder1.isInside()):\n Pokemon.draw1(canvas)\n if Pokemon.frameIndex1 == (2, 0):\n self.a = self.playerMon[0]\n elif Pokemon.frameIndex1 == (5, 0):\n self.a = self.playerMon[1]\n elif Pokemon.frameIndex1 == (8, 0):\n self.a = self.playerMon[2]\n elif Pokemon.frameIndex1 == (3, 6):\n self.a = self.playerMon[3]\n elif Pokemon.frameIndex1 == (6, 6):\n self.a = self.playerMon[4]\n elif Pokemon.frameIndex1 == (9, 6):\n self.a = self.playerMon[5]\n elif Pokemon.frameIndex1 == (3, 10):\n self.a = self.playerMon[6]\n elif Pokemon.frameIndex1 == (6, 10):\n self.a = self.playerMon[7]\n elif Pokemon.frameIndex1 == (9, 10):\n self.a = self.playerMon[8]\n # print(Pokemon.frameIndex2)\n\n if (self.M0BallBorder2.isInside()):\n Pokemon.draw2(canvas)\n if Pokemon.frameIndex2 == (2, 0):\n self.a = self.playerMon[0]\n elif Pokemon.frameIndex2 == (5, 0):\n self.a = self.playerMon[1]\n elif Pokemon.frameIndex2 == (8, 0):\n self.a = self.playerMon[2]\n elif Pokemon.frameIndex2 == (3, 6):\n self.a = self.playerMon[3]\n elif Pokemon.frameIndex2 == (6, 6):\n self.a = self.playerMon[4]\n elif Pokemon.frameIndex2 == (9, 6):\n self.a = self.playerMon[5]\n elif Pokemon.frameIndex2 == (3, 10):\n self.a = self.playerMon[6]\n elif Pokemon.frameIndex2 == (6, 10):\n self.a = self.playerMon[7]\n elif Pokemon.frameIndex2 == (9, 10):\n self.a = self.playerMon[8]\n # print(Pokemon.frameIndex2)\n\n if (self.M0BallBorder3.isInside()):\n Pokemon.draw3(canvas)\n if Pokemon.frameIndex3 == (2, 0):\n self.a = self.playerMon[0]\n elif Pokemon.frameIndex3 == (5, 0):\n self.a = self.playerMon[1]\n elif Pokemon.frameIndex3 == (8, 0):\n self.a = self.playerMon[2]\n elif Pokemon.frameIndex3 == (3, 6):\n self.a = self.playerMon[3]\n elif Pokemon.frameIndex3 == (6, 6):\n self.a = self.playerMon[4]\n elif Pokemon.frameIndex3 == (9, 6):\n self.a = self.playerMon[5]\n elif Pokemon.frameIndex3 == (3, 10):\n self.a = self.playerMon[6]\n elif Pokemon.frameIndex3 == (6, 10):\n self.a = self.playerMon[7]\n elif Pokemon.frameIndex3 == (9, 10):\n self.a = self.playerMon[8]\n # print(Pokemon.frameIndex3)\n\n if (self.M0NurseBorder.isInside()):\n canvas.draw_image(simplegui.load_image('https://image.ibb.co/ikAvYS/undertale_box_1.png'),\n (578 / 2, 152 / 2), (578, 152), (920 / 2, 700 - 150), (578, 152))\n self.a.health_reset()\n\n if (self.M0MPCBorder1.isInside()):\n canvas.draw_image(simplegui.load_image('https://image.ibb.co/dPzkk7/undertale_box_3.png'),\n (578 / 2, 152 / 2), (578, 152), (920 / 2, 700 - 150), (578, 152))\n\n if (self.fight is False):\n if (self.M0TrainerBorder.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyTrainer1)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight = True\n score = score + 10\n # print (score)\n transition.PokeBall.spin = True\n\n if (self.M0nextMap.isInside()):\n self.begin = True\n map = 1\n\n if (map == 1):\n\n if (self.M1LevelUp1 is False):\n if (self.M1LevelUpHB.isInside()):\n self.a.level_up()\n score = score - 2\n # print (self.a.attackStat)\n self.M1LevelUp1 = True\n\n if (self.begin is True):\n Player.pos.x = 675\n Player.pos.y = 670\n\n self.begin = False\n\n self.M1border1.isInside()\n self.M1border2.isInside()\n self.M1border3.isInside()\n self.M1border4.isInside()\n self.M1border5.isInside()\n self.M1border6.isInside()\n self.M1border7.isInside()\n self.M1border8.isInside()\n self.M1border9.isInside()\n\n self.M1tree1.isInside()\n self.M1tree2.isInside()\n self.M1tree3.isInside()\n self.M1tree4.isInside()\n self.M1tree5.isInside()\n\n self.M1previousMap.isInside()\n\n if (self.fight1 is False): # Map1 Boss\n if (self.M1TrainerBorder.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyBoss1)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight1 = True\n score = score + 10\n transition.PokeBall.spin = True\n\n if (self.fight11 is False): # Map1 Npc\n if (self.M1MPCBorder1.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyTrainer2)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight11 = True\n score = score + 10\n transition.PokeBall.spin = True\n\n if (self.fight12 is False): # Map1 Npc\n if (self.M1MPCBorder2.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyTrainer2)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight12 = True\n score = score + 10\n transition.PokeBall.spin = True\n\n if (self.fight13 is False): # Map1 Npc\n if (self.M1MPCBorder3.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyTrainer2)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight13 = True\n score = score + 10\n transition.PokeBall.spin = True\n\n if (self.M1NurseBorder.isInside()):\n canvas.draw_image(simplegui.load_image('https://image.ibb.co/ikAvYS/undertale_box_1.png'),\n (578 / 2, 152 / 2), (578, 152), (920 / 2, 700 - 150), (578, 152))\n self.a.health_reset()\n\n if (self.M1nextMap.isInside()):\n self.begin = True\n map = 2\n\n if (map == 2):\n\n if (self.M2LevelUp1 is False):\n if (self.M2LevelUpHB.isInside()):\n self.a.level_up()\n score = score - 2\n # print (self.a.attackStat)\n self.M2LevelUp1 = True\n\n if (self.begin is True):\n Player.pos.x = 900\n Player.pos.y = 500\n\n self.begin = False\n\n self.M2border1.isInside()\n self.M2border2.isInside()\n self.M2border3.isInside()\n self.M2border4.isInside()\n self.M2border5.isInside()\n self.M2border6.isInside()\n self.M2border7.isInside()\n self.M2border8.isInside()\n\n self.M2tree1.isInside()\n self.M2tree2.isInside()\n self.M2tree3.isInside()\n self.M2tree4.isInside()\n\n self.M2river1.isInside()\n self.M2river2.isInside()\n self.M2river3.isInside()\n self.M2river4.isInside()\n self.M2river5.isInside()\n self.M2river6.isInside()\n self.M2river7.isInside()\n self.M2river8.isInside()\n self.M2river9.isInside()\n self.M2river10.isInside()\n self.M2river11.isInside()\n self.M2river12.isInside()\n self.M2river13.isInside()\n self.M2river14.isInside()\n\n self.M2previousMap.isInside()\n\n if (self.fight2 is False): # Map2 Boss\n if (self.M2TrainerBorder.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyBoss2)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight2 = True\n score = score + 10\n transition.PokeBall.spin = True\n\n if (self.fight21 is False): # Map2 Npc\n if (self.M21TrainerBorder.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyTrainer3)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight21 = True\n score = score + 10\n transition.PokeBall.spin = True\n\n if (self.fight24 is False): # Map2 Npc\n if (self.M2MPCBorder1.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyTrainer3)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight24 = True\n score = score + 10\n transition.PokeBall.spin = True\n if (self.fight23 is False):\n if (self.M2MPCBorder3.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyTrainer3)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight23 = True\n score = score + 10\n transition.PokeBall.spin = True\n if (self.fight22 is False): # Map2 Npc\n if (self.M2MPCBorder2.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyTrainer3)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight22 = True\n score = score + 10\n transition.PokeBall.spin = True\n\n if (self.M2NurseBorder.isInside()):\n canvas.draw_image(simplegui.load_image('https://image.ibb.co/ikAvYS/undertale_box_1.png'),\n (578 / 2, 152 / 2), (578, 152), (920 / 2, 700 - 150), (578, 152))\n self.a.health_reset()\n\n if (self.M2nextMap.isInside()):\n self.begin = True\n map = 3\n\n if (map == 3):\n\n if (self.begin2 is True):\n Player.pos.x = 355\n Player.pos.y = 340\n\n self.begin2 = False\n\n if (self.begin is True):\n Player.pos.x = 880\n Player.pos.y = 395\n\n self.begin = False\n\n self.M3border1.isInside()\n self.M3border2.isInside()\n self.M3border3.isInside()\n self.M3border4.isInside()\n self.M3border5.isInside()\n self.M3border6.isInside()\n\n self.M3house1.isInside()\n self.M3house2.isInside()\n\n self.M3tree1.isInside()\n self.M3tree2.isInside()\n self.M3tree3.isInside()\n self.M3tree4.isInside()\n self.M3tree5.isInside()\n self.M3tree6.isInside()\n\n self.M3previousMap.isInside()\n\n if (self.fight3 is False): # Map3 Npc\n if (self.M3TrainerBorder.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyBoss3)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight3 = True\n score = score + 10\n transition.PokeBall.spin = True\n\n if (self.fight31 is False):\n if (self.M31TrainerBorder.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyTrainer4)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight31 = True\n score = score + 10\n transition.PokeBall.spin = True\n\n if (self.M3door1.isInside()):\n self.begin = True\n map = -1\n\n if (self.M3door2.isInside()):\n canvas.draw_image(simplegui.load_image('https://image.ibb.co/iB5usn/undertale_box_1.png'),\n (578 / 2, 152 / 2), (578, 152), (920 / 2, 700 - 150), (578, 152))\n\n if (self.M3nextMap.isInside()):\n self.begin = True\n map = 4\n\n if (self.M3MPCBorder1.isInside()):\n canvas.draw_image(simplegui.load_image('https://image.ibb.co/fObc2n/undertale_box_7.png'),\n (578 / 2, 152 / 2), (578, 152), (920 / 2, 700 - 150), (578, 152))\n if (self.M3LevelUp1 is False):\n self.a.level_up()\n score = score - 2\n # print (self.a.attackStat)\n self.M3LevelUp1 = True\n\n if (self.M3MPCBorder2.isInside()):\n canvas.draw_image(simplegui.load_image('https://image.ibb.co/fUgMTS/undertale_box_6.png'),\n (578 / 2, 152 / 2), (578, 152), (920 / 2, 700 - 150), (578, 152))\n if (self.M3MPCBorder3.isInside()):\n canvas.draw_image(simplegui.load_image('https://image.ibb.co/b01S2n/undertale_box_5.png'),\n (578 / 2, 152 / 2), (578, 152), (920 / 2, 700 - 150), (578, 152))\n\n if (map == -1):\n if (self.begin is True):\n Player.pos.x = 675\n Player.pos.y = 412\n\n self.begin = False\n\n if (self.MipreviousMap.isInside()):\n self.begin2 = True\n map = 3\n\n self.Miborder1.isInside()\n self.Miborder2.isInside()\n self.Miborder3.isInside()\n self.Miborder4.isInside()\n\n self.Mifurniture1.isInside()\n self.Mifurniture2.isInside()\n self.Mifurniture3.isInside()\n self.Mifurniture4.isInside()\n self.Mifurniture5.isInside()\n\n if (self.MiNurseBorder.isInside()):\n canvas.draw_image(simplegui.load_image('https://image.ibb.co/ikAvYS/undertale_box_1.png'),\n (578 / 2, 152 / 2), (578, 152), (920 / 2, 700 - 150), (578, 152))\n self.a.health_reset()\n\n if (self.MiPCBorder2.isInside()):\n canvas.draw_image(simplegui.load_image('https://image.ibb.co/hs9BdS/undertale_box_4.png'),\n (578 / 2, 152 / 2), (578, 152), (920 / 2, 700 - 150), (578, 152))\n\n if (map == 4):\n\n if (self.M4LevelUp1 is False):\n if (self.M4LevelUpHB.isInside()):\n self.a.level_up()\n score = score - 2\n self.M4LevelUp1 = True\n\n if (self.begin is True):\n Player.pos.x = 890\n Player.pos.y = 340\n\n self.begin = False\n\n self.M4border1.isInside()\n self.M4border2.isInside()\n self.M4border3.isInside()\n self.M4border4.isInside()\n self.M4border5.isInside()\n self.M4border6.isInside()\n self.M4border7.isInside()\n self.M4border8.isInside()\n self.M4border9.isInside()\n self.M4border10.isInside()\n\n if (self.fight4 is False): # Map4 Npc\n if (self.M4TrainerBorder.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyTrainer5)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight4 = True\n score = score + 10\n transition.PokeBall.spin = True\n\n if (self.fight41 is False): # Map4 Boss\n if (self.M41TrainerBorder.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyBoss4)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight41 = True\n score = score + 10\n transition.PokeBall.spin = True\n\n if (self.fight42 is False): # Map4 Npc\n if (self.M4MPCBorder2.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyTrainer5)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight42 = True\n score = score + 10\n transition.PokeBall.spin = True\n\n if (self.M4nextMap.isInside()):\n self.begin = True\n map = 5\n\n self.M4previousMap.isInside()\n\n if (self.M4NurseBorder.isInside()):\n canvas.draw_image(simplegui.load_image('https://image.ibb.co/ikAvYS/undertale_box_1.png'),\n (578 / 2, 152 / 2), (578, 152), (920 / 2, 700 - 150), (578, 152))\n self.a.health_reset()\n\n if (map == 5):\n\n if (self.M5LevelUp1 is False):\n if (self.M5LevelUpHB.isInside()):\n self.a.level_up()\n score = score - 2\n self.M5LevelUp1 = True\n\n if (self.begin is True):\n Player.pos.x = 245\n Player.pos.y = 625\n\n self.begin = False\n\n self.M5border1.isInside()\n self.M5border2.isInside()\n self.M5border3.isInside()\n self.M5border4.isInside()\n self.M5border5.isInside()\n self.M5border6.isInside()\n self.M5border7.isInside()\n self.M5border8.isInside()\n self.M5border10.isInside()\n self.M5border11.isInside()\n self.M5border12.isInside()\n self.M5border13.isInside()\n\n if (self.fight5 is False): # Map5 Boss\n if (self.M5TrainerBorder.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyBoss4)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight41 = True\n self.fight5 = True\n score = score + 10\n transition.PokeBall.spin = True\n\n if (self.fight51 is False): # Map5 Npc\n if (self.M51TrainerBorder.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyBoss4)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight41 = True\n self.fight51 = True\n score = score + 10\n transition.PokeBall.spin = True\n\n if (self.M5nextMap.isInside()):\n self.begin = True\n map = 6\n\n self.M5previousMap.isInside()\n\n if (self.M5NurseBorder.isInside()):\n canvas.draw_image(simplegui.load_image('https://image.ibb.co/ikAvYS/undertale_box_1.png'),\n (578 / 2, 152 / 2), (578, 152), (920 / 2, 700 - 150), (578, 152))\n self.a.health_reset()\n\n if (self.fight53 is False): # Map5 Npc\n if (self.M5MCBorder3.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyBoss4)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight41 = True\n self.fight53 = True\n score = score + 10\n transition.PokeBall.spin = True\n\n if (self.fight52 is False): # Map5 Npc\n if (self.M5PCBorder2.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyBoss4)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight41 = True\n self.fight52 = True\n score = score + 10\n transition.PokeBall.spin = True\n\n if (map == 6):\n\n if (self.M6LevelUp1 is False):\n if (self.M6LevelUpHB.isInside()):\n self.a.level_up()\n score = score - 2\n self.M6LevelUp1 = True\n\n if (self.begin is True):\n Player.pos.x = 400\n Player.pos.y = 625\n\n self.begin = False\n\n self.M6border1.isInside()\n self.M6border2.isInside()\n self.M6border3.isInside()\n self.M6border4.isInside()\n self.M6border5.isInside()\n self.M6border6.isInside()\n self.M6border7.isInside()\n\n if (self.fight6 is False): # RED\n if (self.M6TrainerBorder.isInside()):\n transition.update(canvas)\n if not transition.PokeBall.spin:\n self.b = random.choice(self.enemyRed)\n self.a.enemy = self.b\n self.b.player = self.a\n self.a.attack_reset()\n self.a.defence_reset()\n self.b.attack_reset()\n self.b.defence_reset()\n self.b.health_reset()\n BATTLE_ON = True\n self.fight6 = True\n score = score + 50\n # print(score)\n transition.PokeBall.spin = True\n else:\n menu.playGame = False\n menu.message = \"You Win!\"\n menu.messageTwo = \"Score: \" + str(score)\n menu.messageThree = \"\"\n menu.messageFour = \"\"\n self.a.reset_level()\n Map.reset()\n\n # Resets the map to play again\n def reset(self):\n global score\n global map\n self.fight = False\n self.fight1 = False\n self.fight11 = False\n self.fight12 = False\n self.fight13 = False\n self.fight2 = False\n self.fight21 = False\n self.fight22 = False\n self.fight23 = False\n self.fight24 = False\n self.fight3 = False\n self.fight31 = False\n self.fight4 = False\n self.fight41 = False\n self.fight42 = False\n self.fight5 = False\n self.fight51 = False\n self.fight52 = False\n self.fight53 = False\n self.fight6 = False\n self.M0LevelUp1 = False\n self.M1LevelUp1 = False\n self.M2LevelUp1 = False\n self.M3LevelUp1 = False\n self.M4LevelUp1 = False\n self.M5LevelUp1 = False\n self.M6LevelUp1 = False\n self.begin = False\n score = 0\n map = 0\n Player.pos.x = 300\n Player.pos.y = 340\n\n\n# Creates the transition between the battles(s) and the map(s)\nclass PokeBall:\n # Initialises the spinning Pokeball\n def __init__(self, pos):\n self.pos = pos\n self.radius = 100\n self.spin = True\n self.image = simplegui.load_image('https://imgur.com/GF3Dg8s.png')\n\n # Draws the Pokeball on the canvas\n def draw(self, canvas):\n global IMG_ROT\n IMG_ROT += 0.05\n IMG_DIAMETER = self.radius * 2\n IMG_DIM = 1280\n IMG_WH = (IMG_DIM, IMG_DIM)\n CEN_DEST = self.pos.getP()\n DIM_DEST = (IMG_DIAMETER, IMG_DIAMETER)\n canvas.draw_image(self.image, (IMG_DIM / 2, IMG_DIM / 2), (IMG_WH), (CEN_DEST), (DIM_DEST), IMG_ROT)\n\n # Updates the Pokeballs radius\n def update(self):\n self.radius += 5\n\n # Resets the Pokeball\n def reset(self):\n self.radius = 100\n IMG_ROT = 0\n self.spin = False\n\n\n# The fireball(s) that collide when the frame is started\nclass FireBall:\n # Initialises the fireball with the specified sprite\n def __init__(self, pos, vel):\n self.pos = pos\n self.vel = vel\n self.radius = 60\n self.image = simplegui.load_image('https://imgur.com/TZyNYNu.png')\n self.dimension = 512\n self.imageSize = (self.dimension, self.dimension)\n self.columns = 8\n self.rows = 8\n\n self.frameWidth = self.imageSize[0] / self.columns\n self.frameHeight = self.imageSize[1] / self.rows\n self.frameCentreX = self.frameWidth / 2\n self.frameCentreY = self.frameHeight / 2\n self.frameIndex = [0, 0]\n self.frameDimension = (self.frameWidth, self.frameHeight)\n self.DimDest = (128, 128)\n\n # The fireball from the left to the centre\n def LeftToRight(self):\n self.frameIndex[0] = (self.frameIndex[0] + 1) % self.columns\n self.frameIndex[1] = 4\n\n # The fireball from the right to the centre\n def RightToLeft(self):\n self.frameIndex[0] = (self.frameIndex[0] + 1) % self.columns\n self.frameIndex[1] = 0\n\n # Draws the fireball on canvas\n def draw(self, canvas):\n self.imageCentre = (self.frameWidth * self.frameIndex[0] + self.frameCentreX,\n self.frameHeight * self.frameIndex[1] + self.frameCentreY)\n canvas.draw_image(self.image, self.imageCentre, self.frameDimension, self.pos.getP(), self.DimDest)\n\n # Bounces the fireballs off of the centre point\n def bounce(self, normal):\n self.vel.reflect(normal)\n\n # Updates the velocity of the fireballs\n def update(self):\n self.pos.add(self.vel)\n\n\n# The fire blast that ensues the colliding fireballs\nclass FireBlast:\n # Initialises the fire blast with the provided sprite\n def __init__(self, pos):\n self.pos = pos\n self.radius = 32\n self.image = simplegui.load_image('http://moziru.com/images/drawn-explosion-sprite-1.png')\n self.width = self.image.get_width()\n self.height = self.image.get_height()\n self.imageSize = (self.width, self.height)\n self.columns = 8\n self.rows = 6\n\n self.frameWidth = self.imageSize[0] / self.columns\n self.frameHeight = self.imageSize[1] / self.rows\n self.frameCentreX = self.frameWidth / 2\n self.frameCentreY = self.frameHeight / 2\n self.frameIndex = [0, 0]\n self.frameDimension = (self.frameWidth, self.frameHeight)\n self.DimDest = (2688, 2688)\n\n # Draws the fire blast on canvas\n def draw(self, canvas):\n self.frameIndex[0] = (self.frameIndex[0] + 1) % self.columns\n if self.frameIndex[0] == 0:\n self.frameIndex[1] = (self.frameIndex[1] + 1) % self.rows\n self.imageCentre = (self.frameWidth * self.frameIndex[0] + self.frameCentreX,\n self.frameHeight * self.frameIndex[1] + self.frameCentreY)\n canvas.draw_image(self.image, self.imageCentre, self.frameDimension, self.pos.getP(), self.DimDest)\n\n # Determines whether the fire blast is complete or not\n def complete(self):\n complete = self.frameIndex[0] == 7 and self.frameIndex[1] == 5\n return complete\n\n\n# Rotates a vector but should be in a class\ndef rotateAnti(v):\n return Vector(-v.y, v.x)\n\n\n# Creates a line for the fireballs to bounce off of\nclass Line:\n # Initialises the line\n def __init__(self, point1, point2):\n self.pA = point1\n self.pB = point2\n self.thickness = 3\n self.unit = (self.pB - self.pA).normalize()\n self.normal = rotateAnti(self.unit)\n\n # Determines the distance to the line from somethings position\n def distanceTo(self, pos):\n posToA = pos - self.pA\n proj = posToA.dot(self.normal) * self.normal\n return proj.length()\n\n # Determines if something covers the line from its current position\n def covers(self, pos):\n return ((pos - self.pA).dot(self.unit) >= 0 and\n (pos - self.pB).dot(-self.unit) >= 0)\n\n\n# Animates the fireball explosion upon starting the frame\nclass Opening:\n # Initialises the fireballs, line, and explosion\n def __init__(self):\n self.FireBallLeft = FireBall(Vector(50, 350), Vector(5, 0))\n self.FireBallRight = FireBall(Vector(870, 350), Vector(-5, 0))\n self.line = Line(Vector(460, 300), Vector(460, 400))\n self.inCollision = False\n\n # Updates the canvas to include the fireballs\n # Should be renamed as draw; however, doesn't matter\n def update(self, canvas):\n if self.inCollision == False:\n # FireBalls Left => Right\n self.FireBallLeft.draw(canvas)\n self.FireBallLeft.LeftToRight()\n self.FireBallLeft.update()\n # FireBalls Right => Left\n self.FireBallRight.draw(canvas)\n self.FireBallRight.RightToLeft()\n self.FireBallRight.update()\n\n else:\n if not fb.complete():\n fb.draw(canvas)\n\n if (self.line.distanceTo(self.FireBallRight.pos) < self.line.thickness + self.FireBallRight.radius and\n self.line.covers(self.FireBallRight.pos)):\n if not self.inCollision:\n self.inCollision = True\n else:\n self.inCollision = False\n\n\n# Creates the transition between the battle(s) and the map(s)\nclass Transition:\n # Initialises the Pokeball\n def __init__(self):\n self.PokeBall = PokeBall(Vector(460, 350))\n self.timer = simplegui.create_timer(2100, self.timer_handler)\n\n # Updates the canvas\n def update(self, canvas):\n if self.PokeBall.spin:\n self.PokeBall.draw(canvas)\n self.PokeBall.update()\n # inter.wheel.vel.negate()\n kbd.right = False\n kbd.left = False\n kbd.up = False\n kbd.down = False\n kbd.run = False\n self.timer.start()\n\n # The timer handler to determine how long the Pokeball spins for\n def timer_handler(self):\n self.PokeBall.spin = False\n self.PokeBall.reset()\n self.timer.stop()\n\n\n# Manages the interaction between the player and the keyboard/mouse\nclass Interaction:\n # Initialises the player and keyboard\n def __init__(self, wheel, keyboard):\n self.wheel = wheel\n self.keyboard = keyboard\n\n # Updates the velocity of the player depending on which key is pressed\n def update(self):\n if self.keyboard.right:\n Player.nextFrameRight()\n self.wheel.vel.add(Vector(0.25, 0))\n if self.keyboard.left:\n Player.nextFrameLeft()\n self.wheel.vel.add(Vector(-0.25, 0))\n if self.keyboard.up:\n Player.nextFrameUp()\n self.wheel.vel.add(Vector(0, -0.25))\n if self.keyboard.down:\n Player.nextFrameDown()\n self.wheel.vel.add(Vector(0, 0.25))\n if self.keyboard.run:\n self.wheel.vel.multiply(1.09)\n\n # Draws the menu, battle(s), maps, opening transitions, etc\n def draw(self, canvas):\n opening.update(canvas)\n if fb.complete():\n if not menu.playGame:\n menu.draw(canvas)\n menuButtons.draw(canvas)\n\n elif BATTLE_ON:\n battle.draw(canvas)\n\n else:\n inter.update()\n Map.draw(canvas)\n Player.update()\n Player.draw(canvas)\n Map.update(canvas)\n\n\n# Used to manage the delay at the end of a battle\nclass Clock1:\n # Sets time to 0\n def __init__(self):\n self.time = 0\n\n # Increments the time\n def tick(self):\n self.time = self.time + 1 / 60\n\n # Determines when the transition occurs\n def transition(self, frameDuration):\n self.tick()\n # print(self.time % frameDuration)\n rem = self.time % frameDuration\n return 0 < rem < 0.017 # 0.017 = tick in 60fps so keep it like this or it's broken af\n\n\n# The collision between the player and an object (NPC/Pokeball/Mushroom) on the map\nclass collision:\n # Initialiser\n def __init__(self, BoxLeft, BoxRight, BoxTop, BoxBottom):\n self.BoxLeft = BoxLeft\n self.BoxRight = BoxRight\n self.BoxTop = BoxTop\n self.BoxBottom = BoxBottom\n\n # Determines if the player is within a certain proximity of an object\n def isInside(self):\n if self.BoxLeft <= Player.pos.x <= self.BoxRight and self.BoxTop <= Player.pos.y <= self.BoxBottom:\n if Player.pos.x < (self.BoxLeft + 10):\n Player.pos.x = self.BoxLeft\n return True\n if Player.pos.x > (self.BoxRight - 10):\n Player.pos.x = self.BoxRight\n return True\n if Player.pos.y < (self.BoxTop + 10):\n Player.pos.y = self.BoxTop\n return True\n if Player.pos.y > (self.BoxBottom - 10):\n Player.pos.y = self.BoxBottom\n return True\n\n\n# The pokeballs for choosing pokemon at the start\nclass Pokeballs:\n # Takes arguments for the place where the Pokeballs should be\n def __init__(self, url, ImageWidthHeight, CenterDest):\n self.url = simplegui.load_image(url)\n self.ImageWidthHeight = ImageWidthHeight\n self.ImageCenter = (ImageWidthHeight[0] / 2, ImageWidthHeight[1] / 2)\n self.CenterDest = CenterDest\n self.DimDest = (25, 25)\n\n # Draws the pokeballs on the map with the specified positions\n def draw(self, canvas):\n canvas.draw_image(self.url, self.ImageCenter, self.ImageWidthHeight, self.CenterDest, self.DimDest)\n\n\n# Creates the sprite for Nurse Joy with a specified URL\nclass NurseJoy:\n # Takes arguments to intialise where Nurse Joy should go and how large the NPC should be\n def __init__(self, url, ImageWidthHeight, CenterDest):\n self.url = simplegui.load_image(url)\n self.ImageWidthHeight = ImageWidthHeight\n self.ImageCenter = (ImageWidthHeight[0] / 2, ImageWidthHeight[1] / 2)\n self.CenterDest = CenterDest\n self.DimDest = (60, 60)\n\n # Draws Nurse Joy on the map at the specified place\n def draw(self, canvas):\n canvas.draw_image(self.url, self.ImageCenter, self.ImageWidthHeight, self.CenterDest, self.DimDest)\n\n\n# Creates the players character\nclass Character:\n # Initialises the characters sprite\n def __init__(self, pos):\n self.pos = pos\n self.vel = Vector()\n\n self.url = simplegui.load_image(\"https://i.imgur.com/sCrkzvs.png\")\n\n self.ImageSize = (256, 256)\n self.ImageCenter = (256 / 2, 256 / 2)\n self.colums = 4\n self.rows = 4\n\n self.frameWidth = self.ImageSize[0] / self.colums\n self.frameHeight = self.ImageSize[1] / self.rows\n self.frameCentreX = self.frameWidth / 2\n self.frameCentreY = self.frameHeight / 2\n self.ImageCenter = (self.frameCentreX, self.frameCentreY)\n self.frameIndex = [0, 0]\n self.ImageWidthHeight = (self.frameWidth, self.frameHeight)\n\n self.DimDest = (60, 60)\n\n # Updates the characters velocity\n def update(self):\n self.pos.add(self.vel)\n self.vel.multiply(0.85)\n\n # Determines the next frame down\n def nextFrameDown(self):\n if c.transition(\n 0.2): # For some reason, probably timer related any other values break the animation. : ¯\\_(ツ)_/¯:\n self.frameIndex[0] = (self.frameIndex[0] + 1) % self.colums\n self.frameIndex[1] = 0\n\n # Determines the next frame up\n def nextFrameUp(self):\n if c.transition(\n 0.2): # For some reason, probably timer related any other values break the animation. : ¯\\_(ツ)_/¯:\n self.frameIndex[0] = (self.frameIndex[0] + 1) % self.colums\n self.frameIndex[1] = 3\n\n # Determines the next frame left\n def nextFrameLeft(self):\n if c.transition(\n 0.2): # For some reason, probably timer related any other values break the animation. : ¯\\_(ツ)_/¯:\n self.frameIndex[0] = (self.frameIndex[0] + 1) % self.colums\n self.frameIndex[1] = 1\n\n # Determines the next frame right\n def nextFrameRight(self):\n if c.transition(\n 0.2): # For some reason, probably timer related any other values break the animation. : ¯\\_(ツ)_/¯:\n self.frameIndex[0] = (self.frameIndex[0] + 1) % self.colums\n self.frameIndex[1] = 2\n\n # Draws the character on the canvas\n def draw(self, canvas):\n self.ImageCenter = (self.frameWidth * self.frameIndex[0] + self.frameCentreX,\n self.frameHeight * self.frameIndex[1] + self.frameCentreY)\n canvas.draw_image(self.url, self.ImageCenter, self.ImageWidthHeight, self.pos.getP(), self.DimDest)\n\n\n# Allows the pokemon to choose from a random Pokemon\nclass RandomPokemon:\n # Initialises the random Pokemon from a sprite\n def __init__(self, Pokemon1, Pokemon2, Pokemon3):\n self.Pokemon1 = Pokemon1\n self.Pokemon2 = Pokemon2\n self.Pokemon2 = Pokemon3\n self.url = simplegui.load_image(\"https://veekun.com/static/pokedex/downloads/generation-3.png\")\n\n self.StarterPokemon = [(2, 0), (5, 0), (8, 0), (3, 6), (6, 6), (9, 6), (3, 10), (6, 10), (9, 10)]\n self.ImageSize = (1600, 1024)\n self.ImageCenter = (1600 / 2, 1024 / 2)\n self.colums = 25\n self.rows = 16\n\n self.frameWidth = self.ImageSize[0] / self.colums\n self.frameHeight = self.ImageSize[1] / self.rows\n self.frameCentreX = self.frameWidth / 2\n self.frameCentreY = self.frameHeight / 2\n self.ImageCenter = (self.frameCentreX, self.frameCentreY)\n self.frameIndex1 = self.StarterPokemon[Pokemon1]\n self.frameIndex2 = self.StarterPokemon[Pokemon2]\n self.frameIndex3 = self.StarterPokemon[Pokemon3]\n\n self.ImageWidthHeight = (self.frameWidth, self.frameHeight)\n\n self.DimDest = (150, 150)\n\n # Draws the first random Pokemon\n def draw1(self, canvas):\n self.ImageCenter = (self.frameWidth * self.frameIndex1[0] + self.frameCentreX,\n self.frameHeight * self.frameIndex1[1] + self.frameCentreY)\n canvas.draw_circle((920 / 2, 700 / 2), 110, 1, \"white\", \"white\")\n canvas.draw_image(self.url, self.ImageCenter, self.ImageWidthHeight, (920 / 2, 700 / 2), self.DimDest)\n\n # Draws the second random Pokemon\n def draw2(self, canvas):\n self.ImageCenter = (self.frameWidth * self.frameIndex2[0] + self.frameCentreX,\n self.frameHeight * self.frameIndex2[1] + self.frameCentreY)\n canvas.draw_circle((920 / 2, 700 / 2), 110, 1, \"white\", \"white\")\n canvas.draw_image(self.url, self.ImageCenter, self.ImageWidthHeight, (920 / 2, 700 / 2), self.DimDest)\n\n # Draws the third random Pokemon\n def draw3(self, canvas):\n self.ImageCenter = (self.frameWidth * self.frameIndex3[0] + self.frameCentreX,\n self.frameHeight * self.frameIndex3[1] + self.frameCentreY)\n canvas.draw_circle((920 / 2, 700 / 2), 110, 1, \"white\", \"white\")\n canvas.draw_image(self.url, self.ImageCenter, self.ImageWidthHeight, (920 / 2, 700 / 2), self.DimDest)\n\n\n# Adds trainers to the map\nclass Trainers:\n # Initialises the trainer with the specified dimensions\n def __init__(self, url, ImageWidthHeight, CenterDest):\n self.url = simplegui.load_image(url)\n self.ImageWidthHeight = ImageWidthHeight\n self.ImageCenter = (ImageWidthHeight[0] / 2, ImageWidthHeight[1] / 2)\n self.CenterDest = CenterDest\n self.DimDest = (60, 60)\n\n # Adds the trainer to the map at the specified position/place\n def draw(self, canvas):\n canvas.draw_image(self.url, self.ImageCenter, self.ImageWidthHeight, self.CenterDest, self.DimDest)\n\n\n# Adds NPCs to the map\nclass NPCs:\n # Initialises the sprite for the NPCs\n def __init__(self, pos, url, ImageWidthHeight, frameIndexX, frameIndexY):\n self.pos = pos\n self.vel = Vector()\n\n self.url = simplegui.load_image(url)\n\n self.ImageSize = ImageWidthHeight\n self.ImageCenter = (ImageWidthHeight[0] / 2, ImageWidthHeight[0] / 2)\n self.colums = 4\n self.rows = 4\n\n self.frameWidth = self.ImageSize[0] / self.colums\n self.frameHeight = self.ImageSize[1] / self.rows\n self.frameCentreX = self.frameWidth / 2\n self.frameCentreY = self.frameHeight / 2\n self.ImageCenter = (self.frameCentreX, self.frameCentreY)\n self.frameIndex = [0, 0]\n self.ImageWidthHeight = (self.frameWidth, self.frameHeight)\n\n self.DimDest = (60, 60)\n\n self.frameIndex[0] = frameIndexX\n self.frameIndex[1] = frameIndexY\n\n # Draws the NPCs on the map\n def draw(self, canvas):\n self.ImageCenter = (self.frameWidth * self.frameIndex[0] + self.frameCentreX,\n self.frameHeight * self.frameIndex[1] + self.frameCentreY)\n canvas.draw_image(self.url, self.ImageCenter, self.ImageWidthHeight, self.pos, self.DimDest)\n\n\n# A class that creates the main menu of the game\nclass Menu:\n # Initialises the welcome messages, and sets playGame to false;\n def __init__(self):\n self.message = \"Welcome to Not Pokemon!\"\n self.messageTwo = \"Click or press 'Play' to begin\"\n self.messageThree = \"\"\n self.messageFour = \"\"\n self.playGame = False\n self.quitGame = False\n\n # Below for background image\n self.image = simplegui.load_image(\n 'https://orig00.deviantart.net/486e/f/2011/305/c/5/spirit_master_background_by_kymotonian-d4elxyc.png')\n self.imageWidth = self.image.get_width()\n self.imageHeight = self.image.get_height()\n self.centreSource = (self.imageWidth / 2, self.imageHeight / 2)\n\n # Allows the game to begin - gets rid of the menu screen\n def play(self):\n self.playGame = True\n self.message = \"\"\n self.messageTwo = \"Click or press 'Play' again to begin\"\n self.messageThree = \"\"\n self.messageFour = \"\"\n\n # Allows the user to quit the game (frame)\n def quit(self):\n self.message = \"You have quit the game!\"\n self.messageTwo = \"\"\n self.messageThree = \"\"\n self.messageFour = \"\"\n self.playGame = False\n self.quitGame = True\n\n # Displays the info of the game\n def getInfo(self):\n self.message = \"Made by: Diego Toledano, Juuso Helvio, \"\n self.messageTwo = \"Malik Rajwani, Liam Jones, \"\n self.messageThree = \"Patthawi Roddouyboon\"\n self.messageFour = \"\"\n\n def getGuide(self):\n self.message = \"Attack - Attacks the enemy (1 turn)\"\n self.messageTwo = \"Boost Attack - Boosts the players attack (1 turn)\"\n self.messageThree = \"Boost Defence - Boosts the players defence (1 turn)\"\n self.messageFour = \"Use the arrow keys to move and the space key to run\"\n\n # Resets everything to default\n def default(self):\n self.message = \"Welcome to Not Pokemon!\"\n self.playGame = False\n self.quitGame = False\n\n # Draws the menu\n def draw(self, canvas):\n if not self.quitGame:\n if not PLAYER_ALIVE:\n if not self.playGame:\n # The image throws two libpng warnings\n text_widthOne = frame.get_canvas_textwidth(self.message, 30, 'monospace')\n text_widthTwo = frame.get_canvas_textwidth(self.messageTwo, 30, 'monospace')\n text_widthThree = frame.get_canvas_textwidth(self.messageThree, 30, 'monospace')\n # The image throws two libpng warnings\n canvas.draw_image(menu.image, menu.centreSource, (menu.imageWidth, menu.imageHeight),\n (CANVAS_WIDTH / 2, CANVAS_HEIGHT / 2), (CANVAS_WIDTH, CANVAS_HEIGHT))\n canvas.draw_text(self.message, [CANVAS_WIDTH / 2 - text_widthOne / 2, CANVAS_HEIGHT / 1.5], 30,\n \"White\", \"monospace\")\n canvas.draw_text(self.messageTwo, [CANVAS_WIDTH / 2 - text_widthTwo / 2, CANVAS_HEIGHT / 1.4], 30,\n \"White\", \"monospace\")\n canvas.draw_text(self.messageThree, [CANVAS_WIDTH / 2 - text_widthThree / 2, CANVAS_HEIGHT / 1.3],\n 30, \"White\", \"monospace\")\n else:\n text_widthOne = frame.get_canvas_textwidth(self.message, 30, 'monospace')\n text_widthTwo = frame.get_canvas_textwidth(self.messageTwo, 30, 'monospace')\n text_widthThree = frame.get_canvas_textwidth(self.messageThree, 30, 'monospace')\n text_widthFour = frame.get_canvas_textwidth(self.messageFour, 30, 'monospace')\n # The image throws two libpng warnings\n canvas.draw_image(menu.image, menu.centreSource, (menu.imageWidth, menu.imageHeight),\n (CANVAS_WIDTH / 2, CANVAS_HEIGHT / 2), (CANVAS_WIDTH, CANVAS_HEIGHT))\n canvas.draw_text(self.message, [CANVAS_WIDTH / 2 - text_widthOne / 2, CANVAS_HEIGHT / 1.5], 30, \"White\",\n \"monospace\")\n canvas.draw_text(self.messageTwo, [CANVAS_WIDTH / 2 - text_widthTwo / 2, CANVAS_HEIGHT / 1.4], 30,\n \"White\", \"monospace\")\n canvas.draw_text(self.messageThree, [CANVAS_WIDTH / 2 - text_widthThree / 2, CANVAS_HEIGHT / 1.3], 30,\n \"White\", \"monospace\")\n canvas.draw_text(self.messageFour, [CANVAS_WIDTH / 2 - text_widthFour / 2, CANVAS_HEIGHT / 1.6], 30,\n \"White\", \"monospace\")\n else:\n self.default() # Not necessarily needed\n frame.stop()\n\n # Begins the game when the user clicks\n def mouse_handler(self, position):\n global BATTLE_ON\n self.playGame = True\n BATTLE_ON = False\n self.message = \"Welcome to Not Pokemon!\"\n self.messageTwo = \"Click or press 'Play' again to begin\"\n self.messageThree = \"\"\n self.messageFour = \"\"\n\n\n# The class for the on-screen menu buttons\nclass MenuButtons:\n def __init__(self):\n self.play = \"Play\"\n self.info = \"Info\"\n self.quit = \"Quit\"\n self.guide = \"Guide\"\n\n # Draws the menu buttons with text\n def draw(self, canvas):\n canvas.draw_polygon([(135, 600), (135, 550), (335, 550), (335, 600)], 1, \"White\", \"White\")\n canvas.draw_polygon([(355, 600), (355, 550), (555, 550), (555, 600)], 1, \"White\", \"White\")\n canvas.draw_polygon([(575, 600), (575, 550), (775, 550), (775, 600)], 1, \"White\", \"White\")\n canvas.draw_polygon([(355, 670), (355, 620), (555, 620), (555, 670)], 1, \"White\", \"White\")\n canvas.draw_text(self.play, (195, 585), 30, \"Black\", \"monospace\")\n canvas.draw_text(self.info, (415, 585), 30, \"Black\", \"monospace\")\n canvas.draw_text(self.quit, (635, 585), 30, \"Black\", \"monospace\")\n canvas.draw_text(self.guide, (410, 655), 30, \"Black\", \"monospace\")\n\n # Handles the on-screen menu buttons\n def mouse_handler(self, position):\n if ((position[0] >= 135 and position[1] <= 600) and (position[0] <= 335 and position[1] >= 550)):\n menu.mouse_handler(position)\n elif ((position[0] >= 355 and position[1] <= 600) and (position[0] <= 555 and position[1] >= 550)):\n menu.getInfo()\n elif ((position[0] >= 575 and position[1] <= 600) and (position[0] <= 775 and position[1] >= 550)):\n menu.quit()\n elif ((position[0] >= 355 and position[1] <= 670) and (position[0] <= 555 and position[1] >= 620)):\n menu.getGuide()\n\n\n# The background image of the battle\nclass Background:\n def __init__(self):\n # For battlefield\n self.image = simplegui.load_image('https://www.subeimagenes.com/img/bgs-1210457.PNG')\n self.width = self.image.get_width()\n self.height = self.image.get_height()\n self.columns = 3\n self.rows = 4\n self.frameSize = ((self.width // self.columns), (self.height // self.rows))\n self.frameCentre = (self.frameSize[0] / 2, self.frameSize[1] / 2)\n self.frame_index = (0, 0)\n self.center_source = [self.frameSize[i] * self.frame_index[i] + self.frameCentre[i] for i in [0, 1]]\n\n # For bottom box\n self.box = simplegui.load_image('https://media.giphy.com/media/lUWK158j2nTeU/giphy-facebook_s.jpg')\n self.boxWidth = self.box.get_width()\n self.boxHeight = self.box.get_height()\n self.boxColumns = 1\n self.boxRows = 1\n self.boxFrameSize = ((self.boxWidth // self.boxColumns), (self.boxHeight // self.boxRows))\n self.boxFrameCentre = (self.boxFrameSize[0] / 2, self.boxFrameSize[1] / 2)\n self.boxFrame_index = (0, 0)\n self.boxCenter_source = [self.boxFrameSize[i] * self.boxFrame_index[i] + self.boxFrameCentre[i] for i in [0, 1]]\n\n # Draws the battle backgrounds on canvas\n def draw(self, canvas):\n canvas.draw_image(self.image, self.center_source, self.frameSize,\n (CANVAS_WIDTH / 2, (CANVAS_HEIGHT / VERTICAL_DIVIDERS) * 4),\n (CANVAS_WIDTH, (CANVAS_HEIGHT / VERTICAL_DIVIDERS) * 8))\n\n canvas.draw_image(self.box, self.boxCenter_source, self.boxFrameSize,\n (CANVAS_WIDTH / 2, (CANVAS_HEIGHT / VERTICAL_DIVIDERS) * 9),\n (CANVAS_WIDTH, (CANVAS_HEIGHT / VERTICAL_DIVIDERS) * 2))\n\n\n# The buttons used to determine what actions the user can take/do in battle\nclass Buttons:\n # Initialises the buttons and their text\n def __init__(self):\n # For bottom box\n self.box = simplegui.load_image('http://play.everafterhigh.com/Static/images/ui/button-wdyfl-sprites.png')\n self.boxWidth = self.box.get_width()\n self.boxHeight = self.box.get_height()\n self.boxColumns = 1\n self.boxRows = 4\n self.boxFrameSize = ((self.boxWidth // self.boxColumns), (self.boxHeight // self.boxRows))\n self.boxFrameCentre = (self.boxFrameSize[0] / 2, self.boxFrameSize[1] / 2)\n self.boxFrame_index = (0, 1)\n self.boxCenter_source = [self.boxFrameSize[i] * self.boxFrame_index[i] + self.boxFrameCentre[i] for i in [0, 1]]\n self.attack = \"ATTACK\"\n self.aBoost = \"BOOST ATTACK\"\n self.dBoost = \"BOOST DEFENCE\"\n\n # Draws the buttons on canvas\n def draw(self, canvas):\n canvas.draw_image(self.box, self.boxCenter_source, self.boxFrameSize,\n ((CANVAS_WIDTH / HORIZONTAL_DIVIDERS) * 2.5, (CANVAS_HEIGHT / VERTICAL_DIVIDERS) * 8.25),\n ((CANVAS_WIDTH / HORIZONTAL_DIVIDERS) * 2.25, (CANVAS_HEIGHT / VERTICAL_DIVIDERS)))\n\n canvas.draw_image(self.box, self.boxCenter_source, self.boxFrameSize,\n ((CANVAS_WIDTH / HORIZONTAL_DIVIDERS) * 5, (CANVAS_HEIGHT / VERTICAL_DIVIDERS) * 8.25),\n ((CANVAS_WIDTH / HORIZONTAL_DIVIDERS) * 2.25, (CANVAS_HEIGHT / VERTICAL_DIVIDERS)))\n\n canvas.draw_image(self.box, self.boxCenter_source, self.boxFrameSize,\n ((CANVAS_WIDTH / HORIZONTAL_DIVIDERS) * 7.5, (CANVAS_HEIGHT / VERTICAL_DIVIDERS) * 8.25),\n ((CANVAS_WIDTH / HORIZONTAL_DIVIDERS) * 2.25, (CANVAS_HEIGHT / VERTICAL_DIVIDERS)))\n\n attack = (frame.get_canvas_textwidth(self.attack, 20, 'monospace')) / 2\n boostAttack = frame.get_canvas_textwidth(self.aBoost, 20, 'monospace') / 2\n boostDefence = frame.get_canvas_textwidth(self.dBoost, 20, 'monospace') / 2\n\n canvas.draw_text(self.attack, (\n ((CANVAS_WIDTH / HORIZONTAL_DIVIDERS) * 2.25 - attack / 2.5, (CANVAS_HEIGHT / VERTICAL_DIVIDERS) * 8.3)),\n 20,\n 'White', 'monospace')\n canvas.draw_text(self.aBoost, (\n ((CANVAS_WIDTH / HORIZONTAL_DIVIDERS) * 5 - boostAttack, (CANVAS_HEIGHT / VERTICAL_DIVIDERS) * 8.3)), 20,\n 'White', 'monospace')\n canvas.draw_text(self.dBoost, (\n ((CANVAS_WIDTH / HORIZONTAL_DIVIDERS) * 7.25 - boostDefence / 1.5,\n (CANVAS_HEIGHT / VERTICAL_DIVIDERS) * 8.3)),\n 20, 'White', 'monospace')\n\n # Handles the on-screen battle buttons\n def mouse_handler(self, position):\n if ((position[0] >= 135 and position[1] <= 610) and (position[0] <= 335 and position[1] >= 545)):\n battle.attack_button()\n elif ((position[0] >= 355 and position[1] <= 610) and (position[0] <= 555 and position[1] >= 545)):\n battle.boost_attack_button()\n elif ((position[0] >= 575 and position[1] <= 610) and (position[0] <= 775 and position[1] >= 545)):\n battle.boost_defence_button()\n\n\n# Determines which sprite to use\nclass Sprite:\n def __init__(self, image, columns, rows):\n self.image = simplegui.load_image(image)\n self.width = self.image.get_width()\n self.height = self.image.get_height()\n self.columns = columns\n self.rows = rows\n self.frameSize = ((self.width // self.columns), (self.height // self.rows))\n self.enlarged = (self.frameSize[0] * 3, self.frameSize[1] * 3)\n self.frameCentre = (self.frameSize[0] / 2, self.frameSize[1] / 2)\n\n # Draws the sprite to canvas\n def draw(self, canvas, pos, size, frame_index=(0, 0)):\n center_source = [self.frameSize[i] * frame_index[i] + self.frameCentre[i] for i in [0, 1]]\n size_source = self.frameSize\n center_dest = pos.getP()\n size_dest = size\n canvas.draw_image(self.image, center_source, size_source, center_dest, size_dest)\n\n\n# A class for creating a sprite for the sheild when then player or enemy's attack is boosted\nclass Sheild:\n # Loads the sprite\n def __init__(self):\n self.image = simplegui.load_image('https://www.spriters-resource.com/resources/sheets/97/100140.png')\n self.width = self.image.get_width()\n self.height = self.image.get_height()\n self.columns = 5\n self.rows = 5\n self.frameSize = (self.width // self.columns, self.height // self.rows)\n self.frameCentre = (self.frameSize[0] / 2, self.frameSize[1] / 2)\n self.frameIndex = [0, 0]\n\n # Draws the sprite on canvas\n def draw(self, canvas, pos, size):\n center_source = [self.frameSize[i] * self.frameIndex[i] + self.frameCentre[i] for i in [0, 1]]\n size_source = self.frameSize\n center_dest = pos.getP()\n size_dest = size\n canvas.draw_image(self.image, center_source, size_source, center_dest, size_dest)\n\n # Obtains the next frame of the sprite\n def nextFrame(self):\n self.frameIndex[0] = (self.frameIndex[0] + 1) % self.columns\n\n if self.frameIndex[0] == 0:\n self.frameIndex[1] = (self.frameIndex[1] + 1) % self.rows\n\n # Resets the sprite\n def reset(self):\n self.frameIndex = [0, 0]\n\n # Determines if the sprite has finished its animation\n def complete(self):\n complete = self.frameIndex[0] == 1 and self.frameIndex[1] == 3\n if complete:\n self.reset()\n return complete\n\n\n# The fireball that goes towards the player or enemy\nclass Fireball:\n # Initialises the sprite for the fireball\n def __init__(self, frameIndex):\n self.image = simplegui.load_image('https://opengameart.org/sites/default/files/fireball_0.png')\n self.width = self.image.get_width()\n self.height = self.image.get_height()\n self.columns = 8\n self.rows = 8\n self.frameSize = (self.width // self.columns, self.height // self.rows)\n self.frameSizeEnlarged = (self.frameSize[0] * 4, self.frameSize[1] * 4)\n self.frameCentre = (self.frameSize[0] / 2, self.frameSize[1] / 2)\n self.frameIndex = frameIndex\n self.originalFrameIndex = frameIndex\n self.empty = (8, 8)\n\n # Draws the fireball on the canvas\n def draw(self, canvas, pos, size):\n center_source = [self.frameSize[i] * self.frameIndex[i] + self.frameCentre[i] for i in [0, 1]]\n size_source = self.frameSize\n center_dest = pos.getP()\n size_dest = size\n canvas.draw_image(self.image, center_source, size_source, center_dest, size_dest)\n\n # Obtains the next frame for the sprite\n def nextFrame(self):\n self.frameIndex[0] = (self.frameIndex[0] + 1) % self.columns\n\n # Essentially clears the explostion once it reaches its las\n def blank(self):\n self.frameIndex = [10, 10]\n\n # Resets the sprite ready for its next use\n def reset(self):\n self.frameIndex = self.originalFrameIndex\n\n\n# The explosion for when the fireball hits a player\nclass Explosion:\n # Initialises the sprite for the explosion\n def __init__(self):\n self.image = simplegui.load_image('http://moziru.com/images/drawn-explosion-sprite-1.png')\n self.width = self.image.get_width()\n self.height = self.image.get_height()\n self.columns = 8\n self.rows = 6\n self.frameSize = (self.width // self.columns, self.height // self.rows)\n self.frameSizeEnlarged = (self.frameSize[0] * 1.5, self.frameSize[1] * 1.5)\n self.frameCentre = (self.frameSize[0] / 2, self.frameSize[1] / 2)\n self.frameIndex = [0, 0]\n\n # Draws the explosion on canvas when the fireball has completed\n def draw(self, canvas, pos, size):\n center_source = [self.frameSize[i] * self.frameIndex[i] + self.frameCentre[i] for i in [0, 1]]\n size_source = self.frameSize\n center_dest = pos.getP()\n size_dest = size\n canvas.draw_image(self.image, center_source, size_source, center_dest, size_dest)\n\n # Obtains the next frame for the explosion\n def nextFrame(self):\n self.frameIndex[0] = (self.frameIndex[0] + 1) % self.columns\n if self.frameIndex[0] == 0:\n self.frameIndex[1] = (self.frameIndex[1] + 1) % self.rows\n\n # Determines whether the explosion sprite has finished or not\n def complete(self):\n complete = self.frameIndex[0] == 7 and self.frameIndex[1] == 5\n return complete\n\n\n# The sprite used to indicate that the player or enemy has boosted their defence\nclass Sword:\n # Initialises the sprite\n def __init__(self):\n self.image = simplegui.load_image('https://image.ibb.co/dTFXTS/State_Up1.png')\n self.width = self.image.get_width()\n self.height = self.image.get_height()\n self.columns = 5\n self.rows = 3\n self.frameSize = (self.width // self.columns, self.height // self.rows)\n self.frameCentre = (self.frameSize[0] / 2, self.frameSize[1] / 2)\n self.frameIndex = [0, 0]\n\n # Draws the sprite on canvas\n def draw(self, canvas, pos, size):\n center_source = [self.frameSize[i] * self.frameIndex[i] + self.frameCentre[i] for i in [0, 1]]\n size_source = self.frameSize\n center_dest = pos.getP()\n size_dest = size\n canvas.draw_image(self.image, center_source, size_source, center_dest, size_dest, 135)\n\n # Obtains the next frame for the sprite\n def nextFrame(self):\n self.frameIndex[0] = (self.frameIndex[0] + 1) % self.columns\n if self.frameIndex[0] == 0:\n self.frameIndex[1] = (self.frameIndex[1] + 1) % self.rows\n\n # Resets the sprite\n def reset(self):\n self.frameIndex = [0, 0]\n\n # Determines if the sprite is complete\n def complete(self):\n complete = self.frameIndex[0] == 4 and self.frameIndex[1] == 2\n if complete:\n self.reset()\n return complete\n\n\n# The class for the projectile\nclass Projectile:\n def __init__(self, pos, other):\n self.pos = pos\n self.radius = 20\n self.other = other\n self.vel = self.other.pos.copy().subtract(self.pos)\n\n # The distance to the ...\n def distance_to(self):\n return (self.pos - self.other.pos).length()\n\n # Updates the position\n def update(self):\n self.pos.add(self.vel.copy().divide(20))\n\n def updateWaterFall(self):\n vel = self.other.pos.copy().subtract(self.pos.copy())\n self.pos.add(vel.copy().divide(10))\n\n\n# Drawing HP bars\nclass hpBar:\n # Initialises the health bar for the player/enemy\n def __init__(self, pos):\n self.backgroundWidth = 200\n self.maxWidth = 200\n self.height = 50\n self.pos = pos\n\n # Draws the health bars on the canvas\n def draw(self, canvas, health, maxHealth, colour):\n width = health / maxHealth * self.maxWidth\n if width < 0:\n width = 0\n canvas.draw_polygon([self.pos, (self.pos[0] + self.backgroundWidth, self.pos[1]),\n (self.pos[0] + self.backgroundWidth, self.pos[1] + self.height),\n (self.pos[0], self.pos[1] + self.height)], 1, 'White', 'White')\n canvas.draw_polygon(\n [self.pos, (self.pos[0] + width, self.pos[1]), (self.pos[0] + width, self.pos[1] + self.height),\n (self.pos[0], self.pos[1] + self.height)], 1, colour, colour)\n\n\n# Player in battle\nclass Player1:\n # Initialises the player during a battle by setting stats\n def __init__(self, name, health, attack, defence, speed, sprite):\n self.name = name\n self.baseHealth = health\n self.baseAttack = attack\n self.baseDefence = defence\n self.baseSpeed = speed\n\n self.maxHealth = self.baseHealth\n self.health = health\n self.attackStat = self.baseAttack\n self.defenceStat = self.baseDefence\n self.speedStat = self.baseSpeed\n\n # Base boosts and move\n self.defBoost = 1\n self.attBoost = 1\n self.attacking = False\n self.projectile = False\n self.defending = False\n self.attackboosting = False\n\n # Drawing and animations\n self.radius = 50\n self.x = (CANVAS_WIDTH / HORIZONTAL_DIVIDERS) * 3\n self.y = (CANVAS_HEIGHT / VERTICAL_DIVIDERS) * 6.8\n self.pos = Vector(self.x, self.y)\n self.pos_start = Vector(self.x, self.y)\n self.vel = Vector()\n self.sprite = Sprite('https://veekun.com/static/pokedex/downloads/generation-3-back.png', 25, 16)\n self.frame_index = sprite\n self.shield = Sheild()\n self.sword = Sword()\n self.fireball = Fireball([0, 3])\n self.explosion = Explosion()\n self.hpBar = hpBar((self.x * 2, self.y))\n self.textPos = (self.x * 2, self.y - 10)\n self.text = ''\n self.moveOver = False\n self.p = None\n self.nuke = []\n self.nukeExplosion = []\n self.attackMax = False\n self.first = False\n global BATTLE_ON\n # Score\n global beenHealed\n global score\n\n # Boost attack\n def attack_boost(self):\n if self.attBoost < 3:\n self.attBoost += 0.5\n\n # Boost defence\n def defence_boost(self):\n if self.defBoost < 3:\n self.defBoost += 0.5\n\n # Reset attack boost\n def attack_reset(self):\n self.attBoost = 1\n\n # Reset defence boost\n def defence_reset(self):\n self.defBoost = 1\n\n # Heals the player by resetting to maxHealth\n def health_reset(self):\n global beenHealed\n global score\n if (beenHealed is False):\n # print ('healing')\n self.health = self.maxHealth\n score = score - 3\n beenHealed = True\n if (beenHealed is True):\n if (c.transition(5)):\n beenHealed = False\n # print ('treueueu')\n\n # Add to players base stats\n def level_up(self):\n self.maxHealth += random.randint(8, 12)\n self.health = self.maxHealth\n self.attackStat += random.randint(8, 12)\n self.defenceStat += random.randint(8, 12)\n self.speedStat += random.randint(8, 12)\n\n # Reset all stats to base\n def reset_level(self):\n self.maxHealth = self.baseHealth\n self.health = self.maxHealth\n self.attackStat = self.baseAttack\n self.defenceStat = self.baseDefence\n self.speedStat = self.baseSpeed\n\n # Returns current health of player\n def alive(self):\n return self.health > 0\n\n # Attack opponent\n def attack(self, other):\n\n self.moveOver = True\n self.attacking = True\n self.text = 'Player attacked'\n self.p = Projectile(self.pos.copy(), other)\n other.health = other.health - (\n ((self.attackStat * self.attBoost) / (other.defenceStat * other.defBoost)) * randomModifierPlayer())\n if self.attackMax:\n for x in range(4):\n pPos = other.pos.copy() - Vector(random.randint(-CANVAS_WIDTH, CANVAS_WIDTH),\n random.randint(CANVAS_HEIGHT, CANVAS_HEIGHT * 2))\n p = Projectile(pPos, other)\n e = Explosion()\n self.nuke.append(p)\n self.nukeExplosion.append(e)\n self.attack_reset()\n other.defence_reset()\n\n # Boost player attack modifier\n def boost_attack(self):\n self.moveOver = True\n self.attackboosting = True\n if self.attBoost <= 3:\n self.attack_boost()\n self.text = 'Player boosted attacked'\n if self.attBoost == 3:\n self.text = 'Player attack maximised'\n self.attackMax = True\n self.first = True\n\n # Boost player defence modifier\n def boost_defence(self):\n self.moveOver = True\n self.defending = True\n if self.defBoost <= 3:\n self.defence_boost()\n self.text = 'Player boosted defence'\n if self.defBoost == 3:\n self.text = 'Player defence maximised'\n\n # Drawing player and animations\n def draw(self, canvas):\n self.hpBar.draw(canvas, self.health, self.maxHealth, 'Green')\n canvas.draw_text(self.name, (self.textPos), 24, 'White', 'monospace')\n self.sprite.draw(canvas, self.pos, self.sprite.enlarged, self.frame_index)\n if self.defending:\n self.shield.draw(canvas, self.pos, self.shield.frameSize)\n if self.attackboosting:\n self.sword.draw(canvas, self.pos, self.sword.frameSize)\n if self.attacking:\n if self.attackMax:\n if self.first:\n self.fireball.frameIndex = [0, 6]\n self.first = False\n else:\n for p in self.nuke:\n self.fireball.draw(canvas, p.pos, self.fireball.frameSizeEnlarged)\n if p.distance_to() < (self.p.radius + self.enemy.radius):\n for x in self.nukeExplosion:\n x.draw(canvas, p.pos, self.explosion.frameSizeEnlarged)\n else:\n self.fireball.reset()\n self.fireball.draw(canvas, self.p.pos, self.fireball.frameSizeEnlarged)\n if self.p.distance_to() < (self.p.radius + self.enemy.radius):\n self.p.vel = Vector(0, 0)\n self.explosion.draw(canvas, self.p.pos, self.explosion.frameSizeEnlarged)\n\n # updating player and animations\n def update(self):\n global BATTLE_ON\n if self.defending:\n if clock.transition(3):\n self.shield.nextFrame()\n if self.shield.complete():\n self.defending = False\n self.moveOver = False\n if self.attackboosting:\n if clock.transition(3):\n self.sword.nextFrame()\n if self.sword.complete():\n self.attackboosting = False\n self.moveOver = False\n if self.attacking:\n if self.attackMax:\n if self.attackMax:\n for p in self.nuke:\n p.updateWaterFall()\n self.fireball.nextFrame()\n if p.distance_to() < (self.p.radius + self.enemy.radius):\n self.fireball.blank()\n for x in self.nukeExplosion:\n x.nextFrame()\n if x.complete():\n self.fireball.reset()\n self.attacking = False\n self.moveOver = False\n self.attackMax = False\n if self.enemy.health <= 0:\n BATTLE_ON = False\n else:\n self.p.update()\n self.fireball.nextFrame()\n if self.p.distance_to() < (self.p.radius + self.enemy.radius):\n self.explosion.nextFrame()\n self.fireball.blank()\n if self.explosion.complete():\n self.fireball.reset()\n self.attacking = False\n self.moveOver = False\n if self.enemy.health <= 0:\n BATTLE_ON = False\n\n\n# Enemy in battle\nclass Enemy:\n # Initialises the enemy in battle and sets its stats\n def __init__(self, name, health, attack, defence, speed, sprite):\n self.name = name\n self.maxHealth = health\n self.health = health\n self.attackStat = attack\n self.defenceStat = defence\n self.speedStat = speed\n self.defBoost = 1\n self.attBoost = 1\n self.attacking = False\n self.projectile = False\n self.defending = False\n self.attackboosting = False\n self.x = (CANVAS_WIDTH / HORIZONTAL_DIVIDERS) * 7.25\n self.y = (CANVAS_HEIGHT / VERTICAL_DIVIDERS) * 3.25\n self.pos = Vector(self.x, self.y)\n self.pos_start = Vector(self.x, self.y)\n self.vel = Vector()\n self.sprite = Sprite('https://veekun.com/static/pokedex/downloads/generation-3.png', 25, 16)\n self.frame_index = sprite\n self.radius = 50\n self.fireball = Fireball([0, 7])\n self.explosion = Explosion()\n self.shield = Sheild()\n self.sword = Sword()\n self.hpBar = hpBar((self.x / 4, self.y / 3))\n self.textPos = (self.x / 4, (self.y / 3) - 10)\n self.text = ''\n self.p = None\n self.nuke = []\n self.nukeExplosion = []\n self.attackMax = False\n self.first = False\n self.player_alive = True\n global PLAYER_ALIVE\n global BATTLE_ON\n\n # Boost attack\n def attack_boost(self):\n if self.attBoost < 3:\n self.attBoost += 0.5\n\n # Boost defence\n def defence_boost(self):\n if self.defBoost < 3:\n self.defBoost += 0.5\n\n # Reset attack boost\n def attack_reset(self):\n self.attBoost = 1\n\n # Reset defence boost\n def defence_reset(self):\n self.defBoost = 1\n\n # Heals the enemy by resetting to maxHealth\n def health_reset(self):\n self.health = self.maxHealth\n\n # Attack opponent\n def attack(self, other):\n self.moveOver = True\n self.attacking = True\n self.text = 'Enemy attacked'\n self.p = Projectile(self.pos.copy(), other)\n other.health = other.health - (\n ((self.attackStat * self.attBoost) / (other.defenceStat * other.defBoost)) * randomModifierPlayer())\n if self.attackMax:\n for x in range(4):\n pPos = other.pos.copy() - Vector(random.randint(-CANVAS_WIDTH, CANVAS_WIDTH),\n random.randint(CANVAS_HEIGHT, CANVAS_HEIGHT * 2))\n p = Projectile(pPos, other)\n e = Explosion()\n self.nuke.append(p)\n self.nukeExplosion.append(e)\n self.attack_reset()\n other.defence_reset()\n\n # Boost attack modifier\n def boost_attack(self):\n self.moveOver = True\n self.attackboosting = True\n if self.attBoost <= 3:\n self.attack_boost()\n self.text = 'Enemy boosted attacked'\n if self.attBoost == 3:\n self.text = 'Enemy attack maximised'\n self.attackMax = True\n self.first = True\n\n # Boost defence modifier\n def boost_defence(self):\n self.moveOver = True\n self.defending = True\n if self.defBoost >= 3:\n self.text = 'Enemy defence maximised'\n else:\n self.defence_boost()\n self.text = 'Enemy boosted defence'\n\n # Select random move\n def random_move(self):\n rand_move = random.randint(1, 100)\n if rand_move >= 1 and rand_move <= 30:\n self.attack(self.player)\n self.update()\n\n elif rand_move >= 31 and rand_move <= 65:\n self.boost_attack()\n\n elif rand_move >= 66 and rand_move <= 100:\n self.boost_defence()\n\n def is_alive(self):\n return self.player_alive == True\n\n # Drawing enemy character\n def draw(self, canvas):\n self.hpBar.draw(canvas, self.health, self.maxHealth, 'Red')\n canvas.draw_text(self.name, (self.textPos), 24, 'White', 'monospace')\n self.sprite.draw(canvas, self.pos, self.sprite.enlarged, self.frame_index)\n if self.defending:\n self.shield.draw(canvas, self.pos, self.shield.frameSize)\n if self.attackboosting:\n self.sword.draw(canvas, self.pos, self.sword.frameSize)\n if self.attacking:\n if self.attackMax:\n if self.first:\n self.fireball.frameIndex = [0, 6]\n self.first = False\n else:\n for p in self.nuke:\n self.fireball.draw(canvas, p.pos, self.fireball.frameSizeEnlarged)\n if p.distance_to() < (self.p.radius + self.player.radius):\n p.vel = Vector(0, 0)\n for x in self.nukeExplosion:\n x.draw(canvas, p.pos, self.explosion.frameSizeEnlarged)\n else:\n self.fireball.draw(canvas, self.p.pos, self.fireball.frameSizeEnlarged)\n if self.p.distance_to() < (self.p.radius + self.player.radius):\n self.p.vel = Vector(0, 0)\n self.explosion.draw(canvas, self.p.pos, self.explosion.frameSizeEnlarged)\n\n # Updateing enemy animations\n def update(self):\n if self.defending:\n if clock.transition(3):\n self.shield.nextFrame()\n if self.shield.complete():\n self.defending = False\n if self.attackboosting:\n if clock.transition(3):\n self.sword.nextFrame()\n if self.sword.complete():\n self.attackboosting = False\n if self.attacking:\n if self.attackMax:\n if self.attackMax:\n for p in self.nuke:\n p.updateWaterFall()\n self.fireball.nextFrame()\n if p.distance_to() < (self.p.radius + self.player.radius):\n self.fireball.blank()\n for x in self.nukeExplosion:\n x.nextFrame()\n if x.complete():\n self.fireball.reset()\n self.attacking = False\n self.attackMax = False\n if not self.player.alive():\n self.player_alive = True\n BATTLE_ON = False\n else:\n self.p.update()\n self.fireball.nextFrame()\n if self.p.distance_to() < (self.p.radius + self.player.radius):\n self.explosion.nextFrame()\n self.fireball.blank()\n if self.explosion.complete():\n self.fireball.reset()\n self.attacking = False\n if not self.player.alive():\n self.player_alive = True\n BATTLE_ON = False\n\n\n# Player vs Enemy battle\nclass Battle:\n # Initialises the battle\n def __init__(self, player, enemy):\n self.background = Background()\n self.textPlayer = 'Battle Started'\n self.textEnemy = ''\n self.button = Buttons()\n\n # Method for clicking attack button\n def attack_button(self):\n self.player = Map.a\n self.enemy = Map.b\n if BATTLE_ON and self.player.speedStat == self.enemy.speedStat:\n rand_order = self.rand_first()\n if BATTLE_ON and rand_order == 1:\n self.player.attack(self.enemy)\n self.textPlayer = self.player.text\n if BATTLE_ON:\n self.enemy.random_move()\n self.textEnemy = self.enemy.text\n elif BATTLE_ON and rand_order == 0:\n self.enemy.random_move()\n self.textEnemy = self.enemy.text\n if BATTLE_ON:\n self.player.attack(self.enemy)\n self.textPlayer = self.player.text\n\n elif BATTLE_ON and ((self.player.speedStat < self.enemy.speedStat)):\n self.enemy.random_move()\n self.textEnemy = self.enemy.text\n if BATTLE_ON:\n self.player.attack(self.enemy)\n self.textPlayer = self.player.text\n\n elif BATTLE_ON and ((self.player.speedStat > self.enemy.speedStat)):\n self.player.attack(self.enemy)\n self.textPlayer = self.player.text\n if BATTLE_ON:\n self.enemy.random_move()\n self.textEnemy = self.enemy.text\n\n # Method for clicking boost attack button\n def boost_attack_button(self):\n self.player = Map.a\n self.enemy = Map.b\n if BATTLE_ON and self.player.speedStat == self.enemy.speedStat:\n rand_order = self.rand_first()\n if BATTLE_ON and rand_order == 1:\n self.player.boost_attack()\n self.textPlayer = self.player.text\n if BATTLE_ON:\n self.enemy.random_move()\n self.textEnemy = self.enemy.text\n elif BATTLE_ON and rand_order == 0:\n self.enemy.random_move()\n self.textEnemy = self.enemy.text\n if BATTLE_ON:\n self.player.boost_attack()\n self.textPlayer = self.player.text\n\n elif BATTLE_ON and ((self.player.speedStat > self.enemy.speedStat)):\n self.player.boost_attack()\n self.textPlayer = self.player.text\n if BATTLE_ON:\n self.enemy.random_move()\n self.textEnemy = self.enemy.text\n\n elif BATTLE_ON and ((self.player.speedStat < self.enemy.speedStat)):\n self.enemy.random_move()\n self.textEnemy = self.enemy.text\n if BATTLE_ON:\n self.player.boost_attack()\n self.textPlayer = self.player.text\n\n # Method for clicking boost defence button\n def boost_defence_button(self):\n self.player = Map.a\n self.enemy = Map.b\n if BATTLE_ON and self.player.speedStat == self.enemy.speedStat:\n rand_order = self.rand_first()\n if BATTLE_ON and rand_order == 1:\n self.player.boost_defence()\n self.textPlayer = self.player.text\n if BATTLE_ON:\n self.enemy.random_move()\n self.textEnemy = self.enemy.text\n elif BATTLE_ON and rand_order == 0:\n self.enemy.random_move()\n self.textEnemy = self.enemy.text\n if BATTLE_ON:\n self.player.boost_defence()\n self.textPlayer = self.player.text\n\n elif BATTLE_ON and ((self.player.speedStat > self.enemy.speedStat)):\n self.player.boost_defence()\n self.textPlayer = self.player.text\n if BATTLE_ON:\n self.enemy.random_move()\n self.textEnemy = self.enemy.text\n\n elif BATTLE_ON and ((self.player.speedStat < self.enemy.speedStat)):\n self.enemy.random_move()\n self.textEnemy = self.enemy.text\n if BATTLE_ON:\n self.player.boost_defence()\n self.textPlayer = self.player.text\n\n # Method to get random int for who goes first\n @staticmethod\n def rand_first():\n return random.randint(0, 1)\n\n # Method for drawing the battle\n def draw(self, canvas):\n self.player = Map.a\n self.enemy = Map.b\n\n clock.tick()\n self.background.draw(canvas)\n canvas.draw_text(self.textPlayer,\n ((CANVAS_WIDTH / HORIZONTAL_DIVIDERS) * 0.5, (CANVAS_HEIGHT / VERTICAL_DIVIDERS) * 9.25),\n 30, 'White', 'monospace')\n enemyTextSize = (frame.get_canvas_textwidth(self.textEnemy, 30, 'monospace'))\n canvas.draw_text(self.textEnemy,\n (CANVAS_WIDTH - ((CANVAS_WIDTH / HORIZONTAL_DIVIDERS) * 0.5 + enemyTextSize),\n (CANVAS_HEIGHT / VERTICAL_DIVIDERS) * 9.25), 30, 'White', 'monospace')\n self.enemy.draw(canvas)\n self.player.draw(canvas)\n self.player.update()\n self.enemy.update()\n self.button.draw(canvas)\n\n if not self.player.alive() and self.enemy.is_alive():\n print(self.enemy.player_alive)\n print(PLAYER_ALIVE)\n text_width = frame.get_canvas_textwidth('BATTLE OVER', 50, 'sans-serif')\n canvas.draw_text('BATTLE OVER', ((CANVAS_WIDTH / 2 - text_width / 2), (CANVAS_HEIGHT / 2)), 50, 'Grey',\n 'sans-serif')\n menu.playGame = False\n menu.message = \"Battle Over\"\n menu.messageTwo = \"\"\n self.player.reset_level()\n Map.reset()\n\n\n# Used for timing?\nclass Clock:\n def __init__(self):\n self.time = 0\n\n def tick(self):\n # time> 5000 is to reset time to 0 to prevent overflow\n if self.time > 5000:\n self.time = 0\n self.time += 1\n\n def transition(self, rate):\n return self.time % rate == 0\n\n\n# Randomises the players sprite/character\ndef randomModifierPlayer():\n return random.randint(40, 60)\n\n\n# Randomises the enemy's sprite/character\ndef randomModifierEnemy():\n return random.randint(40, 55)\n\n\n# Handles the mouse clicks for the buttons\ndef mouse_handler(position):\n if (menu.playGame):\n battle.button.mouse_handler(position)\n else:\n menuButtons.mouse_handler(position)\n\n\n# Initialises basic items required for gameplay\nmenu = Menu()\nmenuButtons = MenuButtons()\nc = Clock1()\nclock = Clock()\nkbd = Keyboard()\nopening = Opening()\ntransition = Transition()\nfb = FireBlast(Vector(460, 350))\n\n# Initialises the players, maps, etc\nPlayer = Character(Vector(300, 340))\ninter = Interaction(Player, kbd)\nPokemon = RandomPokemon(random.randint(0, 8), random.randint(0, 8), random.randint(0, 8))\nMap = Map((920, 700))\nbattle = Battle(Map.a, Map.b)\n\n# Required to set various things to allow user interaction and to draw on canvas\n# Also begins the frame\nframe = simplegui.create_frame('Not Pokemon', CANVAS_WIDTH, CANVAS_HEIGHT)\nframe.set_draw_handler(inter.draw)\nframe.set_keydown_handler(kbd.keyDown)\nframe.set_keyup_handler(kbd.keyUp)\nframe.set_mouseclick_handler(mouse_handler)\nframe.start()","sub_path":"FinalGame.py","file_name":"FinalGame.py","file_ext":"py","file_size_in_byte":123691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"538894286","text":"def checker(sentence, letter):\r\n result = []\r\n for i in range(0,len(sentence)) :\r\n if letter in sentence[i] :\r\n result.append(i) \r\n return print(result)\r\n \r\na = checker(\"Apple\", \"p\") # a = [1, 2]\r\nb = checker(\"Banana\", \"p\") # b = []\r\nc = checker(\"Cat\", \"a\") # c = [1]\r\n","sub_path":"L4/L4Q4.py","file_name":"L4Q4.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"21868545","text":"import argparse\nimport os\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom torch.utils.data import DataLoader\nimport importlib\nfrom pointnet2 import pointnet2_utils as pn2_utils\nfrom utils.utils import knn_point\nfrom chamfer_distance import chamfer_distance\nfrom auction_match import auction_match\nfrom dataset import Dataset\n\n\nparser = argparse.ArgumentParser(description=\"Arg parser\")\nparser.add_argument('--gpu', type=int, default=0, help='GPU to use')\nparser.add_argument(\"--model\", type=str, default='punet')\nparser.add_argument('--log_dir', default='checkpoint/PUonly', help='Log dir [default: checkpoint/PUonly]')\nparser.add_argument('--npoint', type=int, default=2048,help='Point Number [1024/2048] [default: 1024]')\nparser.add_argument('--up_ratio', type=int, default=1, help='Upsampling Ratio [default: 4]')\nparser.add_argument('--max_epoch', type=int, default=300, help='Epochs to run [default: 100]')\nparser.add_argument('--batch_size', type=int, default=1, help='Batch Size during training')\nparser.add_argument(\"--use_bn\", action='store_true', default=True)\nparser.add_argument(\"--use_res\", action='store_true', default=False)\nparser.add_argument(\"--alpha\", type=float, default=1.0) # for repulsion loss\nparser.add_argument('--optim', type=str, default='adam')\nparser.add_argument('--use_decay', action='store_true', default=True)\nparser.add_argument('--lr', type=float, default=0.0005)\nparser.add_argument('--lr_decay', type=float, default=0.5)\nparser.add_argument('--lr_clip', type=float, default=0.000001)\nparser.add_argument('--decay_step_list', type=list, default=[100, 300])\nparser.add_argument('--weight_decay', type=float, default=0.0005)\nparser.add_argument('--workers', type=int, default=4)\nparser.add_argument(\"--results_saved_dir\", default='results', help='results_saved_dir [default: results]')\nargs = parser.parse_args()\nprint(args)\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.gpu)\n\nclass UpsampleLoss(nn.Module):\n def __init__(self, alpha=1.0, nn_size=5, radius=0.07, h=0.03, eps=1e-12):\n super().__init__()\n self.alpha = alpha\n self.nn_size = nn_size\n self.radius = radius\n self.h = h\n self.eps = eps\n\n def get_emd_loss(self, pointclouds, gt, pcd_radius):\n idx, _ = auction_match(pointclouds, gt)\n matched_out = pn2_utils.gather_operation(gt.transpose(1, 2).contiguous(), idx)\n matched_out = matched_out.transpose(1, 2).contiguous()\n dist2 = (pointclouds - matched_out) ** 2\n dist2 = dist2.view(dist2.shape[0], -1)\n dist2 = torch.mean(dist2, dim=1, keepdims=True)\n dist2 /= pcd_radius\n return torch.mean(dist2)\n\n def get_cd_loss(self, sk, skeleton, pcd_radius):\n cost_for, cost_bac = chamfer_distance(sk, skeleton)\n cost = torch.mean(cost_for) + torch.mean(cost_bac)\n return cost\n\n def get_repulsion_loss(self, pred):\n _, idx = knn_point(self.nn_size, pred, pred, transpose_mode=True)\n idx = idx[:, :, 1:].to(torch.int32)\n idx = idx.contiguous()\n\n pred = pred.transpose(1, 2).contiguous()\n grouped_points = pn2_utils.grouping_operation(pred, idx)\n\n grouped_points = grouped_points - pred.unsqueeze(-1)\n dist2 = torch.sum(grouped_points ** 2, dim=1)\n dist2 = torch.max(dist2, torch.tensor(self.eps).cuda())\n dist = torch.sqrt(dist2)\n weight = torch.exp(- dist2 / self.h ** 2)\n\n uniform_loss = torch.mean((self.radius - dist) * weight)\n return uniform_loss\n\n def forward(self, pred_fullpoint, gt_fullpoint,radius_data):\n return self.get_emd_loss(pred_fullpoint, gt_fullpoint, radius_data)*250,\\\n self.get_repulsion_loss(pred_fullpoint)\n\ndef get_optimizer():\n if args.optim == 'adam':\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n elif args.optim == 'sgd':\n optimizer = torch.optim.SGD(model.parameters(), \n lr=args.lr, \n momentum=0.98, \n weight_decay=args.weight_decay, \n nesterov=True)\n else:\n raise NotImplementedError\n \n if args.use_decay:\n def lr_lbmd(cur_epoch):\n cur_decay = 1\n for decay_step in args.decay_step_list:\n if cur_epoch >= decay_step:\n cur_decay = cur_decay * args.lr_decay\n return max(cur_decay, args.lr_clip / args.lr)\n lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lbmd)\n return optimizer, lr_scheduler\n else:\n return optimizer, None\n\n\nif __name__ == '__main__':\n train_dst = Dataset(npoint=args.npoint, use_norm=True, split='train', is_training=True)\n train_loader = DataLoader(train_dst, batch_size=args.batch_size, \n shuffle=True, pin_memory=True, num_workers=args.workers)\n Model = importlib.import_module('models.' + args.model)\n model = Model.get_model(npoint=args.npoint, up_ratio=args.up_ratio,use_normal=False, use_bn=args.use_bn, use_res=args.use_res)\n model.cuda()\n\n load_model_dir = 'checkpoint/PUonly/punet_epoch_***.pth' #here type in the model you want to continue training\n if os.path.exists(load_model_dir):\n checkpoint = torch.load(load_model_dir)\n model.load_state_dict(checkpoint['model_state'])\n start_epoch = checkpoint['epoch']\n print('load {} checkpoint successfully!'.format(start_epoch))\n else:\n print('No checkpoint detected, retrain')\n start_epoch = 0\n\n optimizer, lr_scheduler = get_optimizer()\n loss_func = UpsampleLoss(alpha=args.alpha)\n\n model.train()\n for epoch in range(start_epoch,args.max_epoch):\n loss_list = []\n emd_loss_list = []\n cd_loss_list = []\n repu_loss_list = []\n print('{}th_ epoch training starts!'.format(epoch))\n for iter, batch in enumerate(train_loader):\n input_data, gt_fullpoint, _, radius_data = batch\n optimizer.zero_grad()\n\n input_data = input_data.float().cuda()\n gt_fullpoint = gt_fullpoint.float().cuda()\n gt_fullpoint = gt_fullpoint[..., :3].contiguous()\n radius_data = radius_data.float().cuda()\n\n pred_fullpoint= model(input_data)\n emd_loss,repulsion_loss= loss_func(pred_fullpoint, gt_fullpoint, radius_data)\n loss = emd_loss + repulsion_loss\n\n loss.backward()\n\n optimizer.step()\n loss_list.append(loss.item())\n emd_loss_list.append(emd_loss.item())\n repu_loss_list.append(repulsion_loss.item())\n\n print(' -- epoch {}, loss {:.4f}, weighted emd loss {:.4f}, repul loss{:.4f},lr {}.'.format(\n epoch, np.mean(loss_list), np.mean(emd_loss_list), np.mean(repu_loss_list), \\\n optimizer.state_dict()['param_groups'][0]['lr']))\n \n if lr_scheduler is not None:\n lr_scheduler.step(epoch)\n if (epoch ) % 2 == 0:\n state = {'epoch': epoch, 'model_state': model.state_dict()}\n save_path = os.path.join(args.log_dir, 'punet_epoch_{}.pth'.format(epoch))\n torch.save(state, save_path)\n","sub_path":"train_PUnet_only.py","file_name":"train_PUnet_only.py","file_ext":"py","file_size_in_byte":7244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"131628198","text":"import argparse, socket, sys\n\nMAX_BYTES = 65535\n\n# server\ndef server(port):\n #@ make the socket and assign it to sock\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n \n #@ assign the broadcast IP address to the variable named interface\n interface = \"127.0.0.1\"\n \n #@ bind sock to the pair of interface, port\n sock.bind((interface, port))\n print('Listening at', sock.getsockname())\n while True:\n data, address = sock.recvfrom(MAX_BYTES)\n text = data.decode('ascii')\n print('The client at {} says {!r}'.format(address, text))\n\n# client\ndef client(port):\n #@ make the socket and assign it to sock\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n \n #@ assign the broadcast IP address to the variable named hostname\n hostname = \"127.0.0.1\"\n \n sock.setsockopt(socket.SOL_SOCKET,socket.SO_BROADCAST,1)\n #@ connect sock to the pair of interface, port\n sock.connect((hostname,port))\n print('Client socket name is {}'.format(sock.getsockname()))\n\n text = input(\"What data should we broadcast?\")\n \n # send datagram while more to send\n while len(text):\n data = text.encode('ascii')\n \n # send datagram\n sock.send(data)\n \n # get next datagram to send\n text = input(\"What data should we broadcast?\")\n \n# main\nif __name__ == '__main__':\n choices = {'client': client, 'server': server}\n parser = argparse.ArgumentParser(description='Send and receive UDP')\n parser.add_argument('role', choices=choices, help='which role to take')\n parser.add_argument('-p', metavar='PORT', type=int, default=1060,\n help='UDP port (default 1060)')\n args = parser.parse_args()\n function = choices[args.role]\n function(args.p)\n","sub_path":"CSIS 349 P4/assignment04.py","file_name":"assignment04.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572237241","text":"import gzip\nimport _pickle\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Translate a list of labels into an array of 0's and one 1.\n# i.e.: 4 -> [0,0,0,0,1,0,0,0,0,0]\ndef one_hot(x, n):\n \"\"\"\n :param x: label (int)\n :param n: number of bits\n :return: one hot code\n \"\"\"\n if type(x) == list:\n x = np.array(x)\n x = x.flatten()\n o_h = np.zeros((len(x), n))\n o_h[np.arange(len(x)), x] = 1\n return o_h\n\n\nf = gzip.open('mnist.pkl.gz', 'rb')\ntrain_set, valid_set, test_set = _pickle.load(f, encoding='latin1')\nf.close()\n\ntrain_x, train_y = train_set\nvalid_x, valid_y = valid_set\ntest_x, test_y = test_set\n\n#Defining NN\n\ny_data = one_hot(train_y.astype(int), 10)\ny_valid = one_hot(valid_y.astype(int), 10)\ny_test = one_hot(test_y.astype(int), 10)\n\nx = tf.placeholder(\"float\", [None, 784]) # samples\ny_ = tf.placeholder(\"float\", [None, 10]) # labels\n\nW1 = tf.Variable(np.float32(np.random.rand(784, 100)) * 0.1)\nb1 = tf.Variable(np.float32(np.random.rand(100)) * 0.1)\n\nW2 = tf.Variable(np.float32(np.random.rand(100, 20)) * 0.1)\nb2 = tf.Variable(np.float32(np.random.rand(20)) * 0.1)\n\nW3 = tf.Variable(np.float32(np.random.rand(20, 10)) * 0.1)\nb3 = tf.Variable(np.float32(np.random.rand(10)) * 0.1)\n\nh = tf.nn.sigmoid(tf.matmul(x, W1) + b1)\nj = tf.nn.sigmoid(tf.matmul(h, W2) + b2)\n# h = tf.matmul(x, W1) + b1 # Try this!\ny = tf.nn.softmax(tf.matmul(j, W3) + b3)\n\nloss = tf.reduce_sum(tf.square(y_ - y))\n\nlrate=0.05\ntrain = tf.train.GradientDescentOptimizer(lrate).minimize(loss) # learning rate: 0.01\n\ninit = tf.initialize_all_variables()\n\nsess = tf.Session()\nsess.run(init)\n\nprint(\"Parámetros:\\nCapas ocultas: 2\\nNúmero de neuronas en la capa 1: 100\\n\"\n \"Número de neuronas en la capa 2: 20\\nLearning Rate:\", lrate)\n\nbatch_size = 20\n\nerror=9999999\nepoch = 1\nerrorArray = []\nwhile True:\n for jj in range(int(len(train_x) / batch_size)):\n batch_xs = train_x[jj * batch_size: jj * batch_size + batch_size]\n batch_ys = y_data[jj * batch_size: jj * batch_size + batch_size]\n sess.run(train, feed_dict={x: batch_xs, y_: batch_ys})\n\n result = sess.run(y, feed_dict={x: batch_xs})\n\n errorPrev=error\n error = sess.run(loss, feed_dict={x: valid_x, y_: y_valid})\n errorArray.append(error)\n\n if (abs(error-errorPrev)/errorPrev) < 0.001 or error > errorPrev:\n break\n epoch += 1\n\nprint (\"----------------------------------------\")\n\ntest_result = sess.run(y, feed_dict={x: test_x})\naciertos=0\nfor nn, real in zip(y_test, test_result):\n if np.argmax(nn) == np.argmax(real):\n aciertos += 1\n\nprecision = aciertos/len(y_test)*100\nprint(\"Precisión de la red:\", precision, \"%\")\n\nplt.subplot(1, 2, 1)\nplt.plot(errorArray)\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Error de validación\")\nplt.title(\"Variación del error\")\nplt.show()\n","sub_path":"nn_mnist.py","file_name":"nn_mnist.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"295439198","text":"import random\nimport decimal\nfrom decimal import *\nfrom random import uniform\nimport re\n_=singular.lib('random.lib')\n_=singular.lib('sing.lib')\npossiblevars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y']\ndef automator(Num,variablecount,length):\n poly = ''\n successes = []\n for x in range(Num):\n poly = polynomial(variablecount,length)\n\ndef polynomial(variablecount,length):\n if(variablecount > 24):\n variablecount = 24\n vars = []\n final = []\n variables = '('\n s = 0\n for X in range(variablecount):\n s = randint(0,50)\n if s not in vars:\n vars.append(s)\n variables = variables + ',' + possiblevars[s]\n variables = variables[0]+variables[2:]+')'\n R = singular.ring(0,variables,'ds')\n poly = singular.sparsepoly(length);\"\";\n poly1 = poly\n if singular.dim_slocus(poly) == 1:\n print(poly1)\n\ndef polynomial(variablecount,length):\n if(variablecount > 24):\n variablecount = 24\n vars = []\n final = []\n variables = '('\n s = 0\n for X in range(variablecount):\n s = randint(0,50)\n if s not in vars:\n vars.append(s)\n variables = variables + ',' + possiblevars[s]\n variables = variables[0]+variables[2:]+')'\n R = singular.ring(0,variables,'ds')\n poly = singular.sparsepoly(length);\"\";\n if singular.dim_slocus(poly) == 1:\n print(poly)\n\n#find the milnor numbers when you test with a large exponent if milnor is below 200 or above 500\n#find ones that have small changes in the milnor numbers when increased by 1\n#set one variable to zero and test singular locus, should be zero\n#find a linear combination of the variables such that when you restrict one variable that that has an isolated critical point\n#factor over reals is ok\n\n# 'Γ', 'Δ', 'Ε', 'Ζ', 'Η', 'Θ', 'Ι', 'Κ', 'Λ', 'Μ', 'Ν', 'Ξ', 'Ο', 'Π', 'Ρ', 'Σ', 'Τ', 'Υ', 'Φ', 'Χ', 'Ψ', 'Ω', 'α', 'β', 'γ', 'δ', 'ε', 'ζ', 'η', 'θ', 'ι', 'κ', 'λ', 'μ', 'ν', 'ξ', 'ο', 'π', 'ρ', 'σ', 'τ', 'υ', 'φ', 'χ', 'ψ', 'ω',\n","sub_path":"singularityv5.py","file_name":"singularityv5.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"163676905","text":"# Justin Hill - Team boil\r\n# Ryan Carey - Team Close\r\nimport sys\r\nimport Adafruit_DHT\r\nimport time\r\nimport Adafruit_CharLCD as LCD\r\nimport RPi.GPIO as GPIO\r\nimport requests\r\nfrom datetime import datetime, date\r\n\r\n# dh11 sensor\r\nsensor = 11\r\npin = 4\r\n# Define LCD column and row size for 16x2 LCD.\r\nlcd_columns = 16\r\nlcd_rows = 2\r\n# Define Buzzer pin\r\nbuzzerPin = 18\r\n\r\nGPIO.setmode(GPIO.BCM)\r\nGPIO.setup(buzzerPin, GPIO.OUT)\r\n\r\nglobal temperature, humidity, start, end, duration\r\n\r\n# Try to grab a sensor reading. Use the read_retry method which will retry up\r\n# to 15 times to get a sensor reading (waiting 2 seconds between each retry).\r\nhumidity, temperature = Adafruit_DHT.read_retry(sensor, pin)\r\n\r\n# Initialize the LCD using the pins\r\nlcd = LCD.Adafruit_CharLCDBackpack(address=0x21)\r\n\r\n# Comment the line below to convert the temperature to Celcius.\r\ntemperature = temperature * 9 / 5.0 + 32\r\n# added the line to get closer to designated temperature rather than room temperature\r\ntemperature *= 2\r\n\r\nnow = datetime.now()\r\ncurrentTime = now.strftime(\"%H:%M:%S\")\r\n\r\n\r\n# identifying sensor and pin and returning the temp and humidity\r\ndef readTemperature(sensor, pin):\r\n return temperature, humidity\r\n time.sleep(1)\r\n\r\n\r\n# function to initialize local variables\r\ndef __init__(self, temperature, humidity):\r\n self.humidity = humidity\r\n self.temperature = temperature\r\n\r\n\r\n# retrieving most recent record from Mother Brew table\r\ndef GetFromMotherbrew():\r\n url = 'https://emplkasperpsu1.service-now.com/api/now/table/x_snc_beer_brewing_mother_brewv2?sysparm_query=numberISNOTEMPTY%5Eactive%3Dtrue%5Ebrew_phase%3DBoil&sysparm_limit=1'\r\n user = 'kasper440'\r\n pwd = 'kasper440'\r\n # Servicenow Headers\r\n headers = {\"Content-Type\": \"application/json\", \"Accept\": \"application/json\"}\r\n # HTTP request\r\n response = requests.get(url, auth=(user, pwd), headers=headers)\r\n # Check for HTTP codes other than 200\r\n if response.status_code != 200:\r\n print('Status:', response.status_code, 'Headers:', response.headers, 'Error Response:', response.json())\r\n exit()\r\n\r\n # Decode the JSON response into a dictionary and use the data\r\n global beerName, beerType, hops1, hops2, hops3, number, hops1Time, hops2Time, hops3Time, waterWeight\r\n beerName = response.json()['result'][0]['beer_name']\r\n beerType = response.json()['result'][0]['beer_type']\r\n hops1 = response.json()['result'][0]['boil_hops1']\r\n hops2 = response.json()['result'][0]['boil_hops2']\r\n hops3 = response.json()['result'][0]['boil_hops3']\r\n number = response.json()['result'][0]['number']\r\n hops1Time = response.json()['result'][0]['boil_hops_1_time']\r\n hops2Time = response.json()['result'][0]['boil_hops_2_time']\r\n hops3Time = response.json()['result'][0]['boil_hops_3_time']\r\n waterWeight = response.json()['result'][0]['water_by_weight']\r\n print('Ticket Number: ' + number)\r\n print('Beer Name: ' + beerName)\r\n print('Beer Type: ' + beerType)\r\n print('1st Hops: ' + hops1)\r\n print('2nd Hops: ' + hops2)\r\n print('3rd Hops: ' + hops3)\r\n time.sleep(1)\r\n # return the local variables\r\n return beerName, beerType, hops1, hops2, hops3, number, hops1Time, hops2Time, hops3Time, waterWeight\r\n\r\n\r\n# retrieving BoilPi tasks from the LKBrewTasks table\r\ndef GetFromBrewTasks():\r\n url = 'https://emplkasperpsu1.service-now.com/api/now/table/x_snc_beer_brewing_lkbrewtask?sysparm_query=active%3Dtrue%5Erpi_to_executeSTARTSWITHBoilPi%5Estate%3D-5&sysparm_limit=10'\r\n user = 'kasper440'\r\n pwd = 'kasper440'\r\n\r\n # Set proper headers\r\n headers = {\"Content-Type\": \"application/json\", \"Accept\": \"application/json\"}\r\n\r\n # Do the HTTP request\r\n response = requests.get(url, auth=(user, pwd), headers=headers)\r\n\r\n # Check for HTTP codes other than 200\r\n if response.status_code != 200:\r\n print('Status:', response.status_code, 'Headers:', response.headers, 'Error Response:', response.json())\r\n exit()\r\n\r\n # Decode the JSON response into a dictionary and use the data\r\n global boilWaterTask1, addHopsTask2, drainWortTask3, checkTempTask4\r\n boilWaterTask1 = response.json()['result'][0]['number']\r\n shortDescr1 = response.json()['result'][0]['short_description']\r\n addHopsTask2 = response.json()['result'][1]['number']\r\n shortDescr2 = response.json()['result'][1]['short_description']\r\n drainWortTask3 = response.json()['result'][2]['number']\r\n shortDescr3 = response.json()['result'][2]['short_description']\r\n checkTempTask4 = response.json()['result'][3]['number']\r\n shortDescr4 = response.json()['result'][3]['short_description']\r\n data = response.json()['result'] # [0]['what do you want to get']\r\n print('Task Number: ' + boilWaterTask1 + ' ' + shortDescr1)\r\n print('Task Number: ' + addHopsTask2 + ' ' + shortDescr2)\r\n print('Task Number: ' + drainWortTask3 + ' ' + shortDescr3)\r\n print('Task Number: ' + checkTempTask4 + ' ' + shortDescr4)\r\n time.sleep(1)\r\n\r\n return boilWaterTask1, shortDescr1, addHopsTask2, shortDescr2, drainWortTask3, shortDescr3, checkTempTask4, shortDescr4\r\n\r\n\r\ndef GetFromIngredients():\r\n # Set the request parameters\r\n url = 'https://emplkasperpsu1.service-now.com/api/now/table/x_snc_beer_brewing_ingredients?sysparm_limit=1'\r\n\r\n # Eg. User name=\"admin\", Password=\"admin\" for this code sample.\r\n user = 'kasper440'\r\n pwd = 'kasper440'\r\n\r\n # Set proper headers\r\n headers = {\"Content-Type\": \"application/json\", \"Accept\": \"application/json\"}\r\n\r\n # Do the HTTP request\r\n response = requests.get(url, auth=(user, pwd), headers=headers)\r\n\r\n # Check for HTTP codes other than 200\r\n if response.status_code != 200:\r\n print('Status:', response.status_code, 'Headers:', response.headers, 'Error Response:', response.json())\r\n exit()\r\n\r\n # Decode the JSON response into a dictionary and use the data\r\n data = response.json()\r\n hops = response.json()['result'][0]['hops']\r\n # print('Hops: ' + hops)\r\n # print(data)\r\n\r\n return hops\r\n\r\n\r\ndef Post():\r\n # Set the request parameters\r\n url = 'https://emplkasperpsu1.service-now.com/api/now/table/x_snc_beer_brewing_boil'\r\n user = 'kasper440'\r\n pwd = 'kasper440'\r\n # Set proper headers\r\n headers = {\"Content-Type\": \"application/json\", \"Accept\": \"application/json\"}\r\n # Do the HTTP request\r\n response = requests.post(url, auth=(user, pwd), headers=headers,\r\n data=\"{\\\"sys_id\\\":\\\"\\\",\\\"short_description\\\":\\\"Boiling\\\",\\\"current_temperature\\\":\\\"\" + str(\r\n temperature) + \"\\\",\\\"sys_updated_on\\\":\\\"\\\"}\")\r\n # Check for HTTP codes other than 200\r\n if response.status_code != 200 and response.status_code != 201:\r\n print('Status:', response.status_code, 'Headers:', response.headers, 'Error Response:', response)\r\n exit()\r\n # Decode the JSON response into a dictionary and use the data\r\n data = response\r\n\r\n return data\r\n\r\n\r\ndef PostFinalTemp():\r\n # Set the request parameters\r\n url = 'https://emplkasperpsu1.service-now.com/api/now/table/x_snc_beer_brewing_boil'\r\n user = 'kasper440'\r\n pwd = 'kasper440'\r\n # Set proper headers\r\n headers = {\"Content-Type\": \"application/json\", \"Accept\": \"application/json\"}\r\n # Do the HTTP request\r\n response = requests.post(url, auth=(user, pwd), headers=headers,\r\n data=\"{\\\"sys_id\\\":\\\"\\\",\\\"short_description\\\":\\\"Boiling\\\",\\\"current_temperature\\\":\\\"\" + str(\r\n temperature) + \"\\\",\\\"sys_updated_on\\\":\\\"\\\"}\")\r\n # Check for HTTP codes other than 200\r\n if response.status_code != 200 and response.status_code != 201:\r\n print('Status:', response.status_code, 'Headers:', response.headers, 'Error Response:', response)\r\n exit()\r\n # Decode the JSON response into a dictionary and use the data\r\n data = response\r\n return data\r\n\r\n\r\ndef systemCheck():\r\n # used to check system functionality before continuing\r\n # if all is good, then pass\r\n # if there are problems, then throw error\r\n pass\r\n\r\n\r\ndef AddWater():\r\n print('Amount of water: ' + waterWeight + 'gal.')\r\n print('Adding water to brew chamber...')\r\n time.sleep(15)\r\n print('Water added to brew chamber.')\r\n\r\n\r\ndef tempDisplay():\r\n print()\r\n print('Beginning Task: ' + boilWaterTask1)\r\n print('Making Beer: ' + beerName)\r\n print()\r\n timeloop = True\r\n count = 0\r\n\r\n while (timeloop and count < 60):\r\n try:\r\n # Turn backlight on\r\n lcd.set_backlight(0)\r\n if humidity is not None and temperature is not None:\r\n lcd.message('Temp={0:0.1f}*\\nHumidity={1:0.1f}%\\n'.format(temperature, humidity))\r\n count += 1\r\n time.sleep(1)\r\n print(count)\r\n if (count == hops1Time):\r\n print('Beginning Task: ' + addHopsTask2)\r\n print('Adding ' + hops1 + ' hops.')\r\n if (count == hops2Time):\r\n print('Adding ' + hops2 + ' hops.')\r\n if (count == hops3Time):\r\n print('Adding ' + hops3 + ' hops.')\r\n\r\n else:\r\n lcd.message('Failed to get reading. Try again!')\r\n except KeyboardInterrupt:\r\n # Turn the screen off\r\n lcd.clear()\r\n lcd.set_backlight(1)\r\n\r\n print(boilWaterTask1 + ' Boiling process complete')\r\n Post()\r\n\r\n\r\ndef WortCooled():\r\n global temperature\r\n print()\r\n print('Cooling Wort....')\r\n temperature -= 40\r\n time.sleep(10)\r\n if (temperature >= 79):\r\n print('Wort still too hot... decreasing temperature')\r\n temperature -= 30\r\n time.sleep(10)\r\n print()\r\n print('Wort has been cooled to ' + str(temperature) + ' degrees.')\r\n\r\n print('Sending data to Servicenow...')\r\n time.sleep(5)\r\n PostFinalTemp()\r\n\r\n\r\ndef WortDrained():\r\n print()\r\n print('Beginning Task: ' + drainWortTask3)\r\n print('Transferring wort to heat exchanger...')\r\n time.sleep(10)\r\n print('Wort has been transferred.')\r\n\r\n\r\ndef QualityCheck():\r\n GPIO.ouptut(buzzerPin, GPIO.LOW)\r\n time.sleep(1)\r\n print('Beginning Quality Check')\r\n\r\n\r\ndef PostToLogTable():\r\n # Set the request parameters\r\n url = 'https://emplkasperpsu1.service-now.com/api/now/table/x_snc_beer_brewing_log_table'\r\n\r\n # Eg. User name=\"admin\", Password=\"admin\" for this code sample.\r\n user = 'admin'\r\n pwd = 'admin'\r\n\r\n # Set proper headers\r\n headers = {\"Content-Type\": \"application/json\", \"Accept\": \"application/json\"}\r\n\r\n # Do the HTTP request\r\n response = requests.post(url, auth=(user, pwd), headers=headers,\r\n data=\"{\\\"boiling_temperature\\\":\\\"\" + str(temperature) + \"\\\",\\\"boil_start_time\\\":\\\"\" + str(\r\n start) + \"\\\",\\\"boil_end_time\\\":\\\"\" + str(end) + \"\\\",\\\"boil_duration\\\":\\\"\" + str(\r\n duration) + \"\\\",\\\"boil_quality_check\\\":\\\"Complete\\\",\\\"boil_reset_clean\\\":\\\"\\\",\\\"boil_errors\\\":\\\"\\\"}\")\r\n\r\n # Check for HTTP codes other than 200\r\n if response.status_code != 201:\r\n print('Status:', response.status_code, 'Headers:', response.headers, 'Error Response:', response.json())\r\n exit()\r\n\r\n # Decode the JSON response into a dictionary and use the data\r\n data = response.json()\r\n print(data)\r\n\r\n\r\ndef main():\r\n global start, end, duration\r\n # processDuration = 0\r\n # processDuration += 1\r\n print('Beginning the Boiling Process')\r\n start = datetime.now()\r\n print(start)\r\n time.sleep(1)\r\n GetFromMotherbrew()\r\n GetFromBrewTasks()\r\n GetFromIngredients()\r\n systemCheck()\r\n readTemperature(temperature, humidity)\r\n AddWater()\r\n tempDisplay()\r\n WortCooled()\r\n # Quality Check()\r\n WortDrained()\r\n # ResetClean()\r\n print(\"done\")\r\n end = datetime.now()\r\n print('End time: ')\r\n print(end)\r\n duration = end - start\r\n print('Process Duration: ')\r\n print(duration)\r\n return start, end, duration\r\n PostToLogTable()\r\n import GetFromMotherBrew\r\n\r\n\r\nmain()\r\n\r\n\r\n","sub_path":"Boiling.py","file_name":"Boiling.py","file_ext":"py","file_size_in_byte":12183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"257074466","text":"# selenium 라이브러리\n\n# 브라우저를 직접 제어할 수 이는 라이브러리\n# 크롬 브라우저를 제어 해보겠습니다.\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\n# 크롬 웹 드라이버가 필요합니다. \n# https://sites.google.com/a/chromium.org/chromedriver/downloads\n\nbrowser = webdriver.Chrome('D:\\\\IJH\\\\chromedriver.exe')\n\n# 크롬 부라우저 내부 대기\nbrowser.implicitly_wait(5)\n\n# 브라우저 크기 조절\n# browser.maximize_window()\n# browser.minimize_window()\nbrowser.set_window_size(800, 500) # 해상도( width x height )\n\nbrowser.get('http://www.naver.com')\n\n# 웹 페이지 소스 출력\nprint(browser.page_source)\n\n# 쿠키 확인\nprint(browser.get_cookies())\n\n# 현재 URL 확인\nprint(browser.current_url)\n\n# search = browser.find_element_by_id('query')\nsearch = browser.find_element_by_css_selector('div.green_window > input#query')\nsearch.send_keys(\"검색어\")\n# search.submit()\nsearch.send_keys(Keys.RETURN)\n\n\n\n# 브라우저 종료\n# browser.quit()\n","sub_path":"web/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"4167356","text":"import base64\nfrom wsgiref.simple_server import make_server\n\ndef application(environ, start_response):\n path = environ['PATH_INFO']\n method = environ['REQUEST_METHOD']\n try:\n import boto3\n kms_client = boto3.client('kms', region_name='us-east-2')\n # Decrypt a data key\n ciphertext = base64.b64decode('AQICAHhz3DTDbBFKvcH3h3G0XcAydE7z0NSuctiln97zJ5nE4wFxA5N+tzlgp802MoxbiGzFAAAAazBpBgkqhkiG9w0BBwagXDBaAgEAMFUGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQMKLqxiMZZV/jDc05yAgEQgCh4shBdE4wRPXLFtrp+4ImXFimaBz78P2WDWOA9TpxE1Ye57CyqTIJh')\n\n response = kms_client.decrypt(\n CiphertextBlob=ciphertext\n )['Plaintext']\n \n except Exception as e:\n response = str(e)\n\n status = '200 OK'\n headers = [('Content-type', 'text/html')]\n\n start_response(status, headers)\n return [response]\n\n\nif __name__ == '__main__':\n httpd = make_server('', 8000, application)\n print(\"Serving on port 8000...\")\n httpd.serve_forever()\n","sub_path":"awskms/beanstalk-python/python-v1/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"578719474","text":"from django.shortcuts import render\r\nimport pyrebase\r\n\r\nconfig = {\r\n\t'apiKey': \"AIzaSyA70V9nqwo5U_mVo4mr8YKprrMyeMaTvuw\",\r\n\t'authDomain': \"r-place-project.firebaseapp.com\",\r\n\t'databaseURL': \"https://r-place-project.firebaseio.com\",\r\n\t'projectId': \"r-place-project\",\r\n\t'storageBucket': \"r-place-project.appspot.com\",\r\n\t'messagingSenderId': \"115418004367\"\r\n}\r\n\r\nfirebase = pyrebase.initialize_app(config)\r\n\r\ndb = firebase.database()\r\n\r\ndef createTextBox(request):\r\n return render(request, \"add_stuff.html\")\r\n\r\ndef generateResult(request):\r\n\tword = request.POST.get('word')\r\n\tdb.set({\"item\": word})\r\n\titem = db.child(\"item\").get().val()\r\n\tprint(item)\r\n\tupdate()\r\n\treturn render(request, \"response.html\", {\"data\": item})\r\n\r\n\r\n#an example of how we could update DB with new pixel\r\ndef update():\r\n\t#pixel = request.POST.get('pixel')\r\n\tpixel = (0, 0, \"blue\")\r\n\t\r\n\t#maybe we can use a tuple to pass information around?\r\n\trow, col, color = pixel\r\n\r\n\t#first child will always be grid, then we just find the right row and col\r\n\tdb.child(\"grid\").child(row).set({col: color})\r\n\r\ndef initializeDB(size):\r\n \tfor row in range(0, size):\r\n \t\t\tfor col in range(0, size):\r\n \t\t\t\t\tdb.child(\"grid\").child(row).child(col).set(\"black\")\r\n\r\ninitializeDB(10)","sub_path":"DropboxPrototype/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"309169286","text":"from flask import Blueprint, request\nfrom sqlalchemy import func, or_\n\nfrom models.cfp import Proposal, CFPMessage, CFPVote, CFP_STATES\nfrom ..common import require_permission\n\ncfp_review = Blueprint('cfp_review', __name__)\nadmin_required = require_permission('cfp_admin') # Decorator to require admin permissions\nanon_required = require_permission('cfp_anonymiser')\nreview_required = require_permission('cfp_reviewer')\nschedule_required = require_permission('cfp_schedule')\n\nordered_states = [\n 'edit', 'new', 'locked', 'checked', 'rejected', 'cancelled', 'anonymised',\n 'anon-blocked', 'manual-review', 'reviewed', 'accepted', 'finished'\n]\n\n\ndef sort_by_notice(notice):\n return {\n '1 week': 0,\n '1 month': 1,\n '> 1 month': 2,\n }.get(notice, -1)\n\n\ndef get_proposal_sort_dict(parameters):\n sort_keys = {\n 'state': lambda p: (p.state, p.modified, p.title),\n 'date': lambda p: (p.modified, p.title),\n 'type': lambda p: (p.type, p.title),\n 'user': lambda p: (p.user.name, p.title),\n 'title': lambda p: p.title,\n 'ticket': lambda p: (p.user.tickets.count() > 0, p.title),\n 'notice': lambda p: (sort_by_notice(p.notice_required), p.title),\n 'duration': lambda p: (p.scheduled_duration or 0)\n }\n\n sort_by_key = parameters.get('sort_by')\n return {\n 'key': sort_keys.get(sort_by_key, sort_keys['state']),\n 'reverse': bool(parameters.get('reverse'))\n }\n\n\ndef get_next_proposal_to(prop, state):\n return Proposal.query.filter(\n Proposal.id != prop.id,\n Proposal.state == state,\n Proposal.modified >= prop.modified # ie find something after this one\n ).order_by('modified', 'id').first()\n\n\n@cfp_review.context_processor\ndef cfp_review_variables():\n unread_count = CFPMessage.query.filter(\n # is_to_admin AND (has_been_read IS null OR has_been_read IS false)\n or_(CFPMessage.has_been_read.is_(False),\n CFPMessage.has_been_read.is_(None)),\n CFPMessage.is_to_admin.is_(True)\n ).count()\n\n count_dict = dict(Proposal.query.with_entities(\n Proposal.state,\n func.count(Proposal.state),\n ).group_by(Proposal.state).all())\n proposal_counts = {state: count_dict.get(state, 0) for state in CFP_STATES}\n\n unread_reviewer_notes = CFPVote.query.join(Proposal).filter(\n Proposal.id == CFPVote.proposal_id,\n Proposal.state == 'anonymised',\n or_(CFPVote.has_been_read.is_(False),\n CFPVote.has_been_read.is_(None))\n ).count()\n\n return {\n 'ordered_states': ordered_states,\n 'unread_count': unread_count,\n 'proposal_counts': proposal_counts,\n 'unread_reviewer_notes': unread_reviewer_notes,\n 'view_name': request.url_rule.endpoint.replace('cfp_review.', '.')\n }\n\n\nfrom . import base # noqa: F401\nfrom . import review # noqa: F401\nfrom . import anonymise # noqa: F401\n","sub_path":"apps/cfp_review/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"490959620","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 19 22:59:23 2019\n\n@author: midingoy\n\"\"\"\nimport os.path\nfrom path import Path\n\nfrom pycropml.transpiler.main import Main\n\nfrom pycropml import render_cyml\nfrom pycropml.pparse import model_parser\nfrom pycropml.writeTest import WriteTest\n\ndef transpile_file(source, language):\n sourcef = source\n file = Path(sourcef)\n with open(file, 'r') as fi:\n source = fi.read()\n name = sourcef.split(\".\")[0]\n test = Main(file, language)\n test.parse()\n test.to_ast(source)\n code = test.to_source()\n print(code)\n filename = \"%s.%s\" % (name, language)\n with open(filename, \"wb\") as tg_file:\n tg_file.write(code.encode('utf-8'))\n return 0\n\ndef transpile_package(package, language):\n # translate from crop2ml package\n sourcef = package\n pkg = Path(sourcef)\n models = model_parser(pkg) # parse xml files and create python model object\n output = pkg/'src'\n dir_test= pkg/'test'\n m=[model.name for model in models]\n\n # Generate packages if the directories does not exists.\n if not output.isdir():\n output.mkdir()\n\n if not dir_test.isdir():\n dir_test.mkdir()\n\n m2p = render_cyml.Model2Package(models, dir=output)\n m2p.generate_package() # generate cyml models in \"pyx\" directory\n tg_rep = Path(output/\"%s\"%(language)) # target language models directory in output\n dir_test_lang = Path(dir_test/\"%s\"%(language))\n\n if not tg_rep.isdir():\n tg_rep.mkdir()\n\n if not dir_test_lang.isdir() : #Create if it doesn't exist\n dir_test_lang.mkdir()\n\n # generate\n cyml_rep = Path(output/'pyx') # cyml model directory in output\n for k, file in enumerate(cyml_rep.files()):\n #print(file)\n with open(file, 'r') as fi:\n source = fi.read()\n\n name = os.path.split(file)[1].split(\".\")[0]\n for model in models: # in the case we have'nt the same order\n if name == model.name.lower():\n test=Main(file, language, model)\n test.parse()\n test.to_ast(source)\n code=test.to_source()\n filename = tg_rep/\"%s.%s\"%(name, language)\n with open(filename, \"wb\") as tg_file:\n tg_file.write(code.encode('utf-8'))\n\n # writeTest\n test = WriteTest(models,language,dir_test_lang)\n test.write()\n\n status = 0\n return status\n","sub_path":"src/pycropml/cyml.py","file_name":"cyml.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"149801244","text":"#!/usr/bin/env python\n#\n# Copyright (c) 2017 Amazon.com, Inc. or its affiliates. All Rights\n# Reserved.\n#\n# Additional copyrights may follow\n#\n\nimport os\nimport sys\nimport re\nimport argparse\nimport logging\nimport time\nimport shlex\nimport shutil\nimport requests\nimport BuilderUtils\n\n\n_cov_filename = 'coverity_tools.tgz'\n\ndef run_coverity_internal(logger, build_root, source_tarball, config):\n # read the token file\n file = open(config['token_file'], 'r')\n token = file.readline().rstrip('\\n')\n\n # get the tool\n if not os.path.isdir(config['tool_dir']):\n os.makedirs(config['tool_dir'])\n os.chdir(config['tool_dir'])\n timestamp = 0\n if os.path.exists(_cov_filename):\n timestamp = os.stat(_cov_filename).st_mtime\n if (timestamp + (24 * 3600)) > int(time.time()):\n logger.debug('Reusing existing tarball')\n else:\n logger.debug('Downloading %s' % (config['tool_url']))\n # As of 9 Aug 2021, this file is 2+GB. Downloading it all\n # into a Python script and then writing it out to disk is not\n # a good idea on our limited resources AWS VM (meaning: it\n # brings the VM to a crawl). From\n # https://stackoverflow.com/questions/38969164/coverity-scan-for-projects-outside-github,\n # we can use a command line tool to download, instead. It's\n # not very Pythonic, but it doesn't bring our VM to its knees.\n cmd = [\n 'wget',\n config[\"tool_url\"],\n '--post-data',\n f'token={token}&project={config[\"project_name\"]}',\n '-O',\n _cov_filename\n ]\n BuilderUtils.logged_call(cmd,\n log_file=os.path.join(build_root, 'coverity-tools-download-output.txt'))\n\n # make sure we have a build root\n if not os.path.isdir(build_root):\n os.makedirs(build_root)\n os.chdir(build_root)\n\n # The name of the top-level directory in the tarball changes every\n # time Coverity releases a new version of the tool. So search\n # around and hope we find something.\n logger.debug('Expanding ' + _cov_filename)\n BuilderUtils.logged_call(['tar', 'xf', os.path.join(config['tool_dir'], _cov_filename)],\n log_file=os.path.join(build_root, 'coverity-tools-untar-output.txt'))\n cov_path=''\n for file in os.listdir(build_root):\n if file.startswith('cov-'):\n cov_path = os.path.join(build_root, file, 'bin')\n break\n logger.debug('Found Coverity path %s' % (cov_path))\n\n child_env = os.environ.copy()\n child_env['PATH'] = cov_path + ':' + child_env['PATH']\n\n logger.debug('Extracting build tarball: %s' % (source_tarball))\n BuilderUtils.logged_call(['tar', 'xf', source_tarball],\n log_file=os.path.join(build_root, 'coverity-source-untar-output.txt'))\n\n # guess the directory based on the tarball name. Don't worry\n # about the exception, because we want out in that case anyway...\n build_version = re.search('^' + config['project_prefix'] + '-(.*)\\.tar\\..*$',\n os.path.basename(source_tarball)).group(1)\n srcdir = config['project_prefix'] + '-' + build_version\n os.chdir(srcdir)\n\n logger.debug('coverity configure')\n args = ['./configure']\n if 'configure_args' in config:\n args.extend(shlex.split(config['configure_args']))\n BuilderUtils.logged_call(args, env=child_env,\n log_file=os.path.join(build_root, 'coverity-configure-output.txt'))\n\n logger.debug('coverity build')\n args = ['cov-build', '--dir', 'cov-int', 'make']\n if 'make_args' in config:\n args.extend(shlex.split(config['make_args']))\n BuilderUtils.logged_call(args, env=child_env,\n log_file=os.path.join(build_root, 'coverity-make-output.txt'))\n\n logger.debug('bundling results')\n results_tarball = os.path.join(build_root, 'analyzed.tar.bz2')\n BuilderUtils.logged_call(['tar', 'jcf', results_tarball, 'cov-int'],\n log_file=os.path.join(build_root, 'coverity-results-tar-output.txt'))\n\n logger.debug('submitting results')\n url = 'https://scan.coverity.com/builds?project=' + config['project_name']\n files = { 'file': open(results_tarball, 'rb') }\n values = { 'email' : config['email'],\n 'version' : build_version,\n 'description' : 'nightly-master',\n 'token' : token }\n r = requests.post(url, files=files, data=values)\n r.raise_for_status()\n\n\ndef run_coverity(logger, build_root, source_tarball, config):\n \"\"\"Run coverity test and submit results\n\n Run Coverity test and submit results to their server. Can be run\n either standalone (with a tarball as a target) or integrated into\n the Builder class.\n\n \"\"\"\n cwd = os.getcwd()\n try:\n run_coverity_internal(logger, build_root, source_tarball, config)\n finally:\n os.chdir(cwd)\n\n\nif __name__ == '__main__':\n config = { 'tool_url' : 'https://scan.coverity.com/download/cxx/linux64',\n 'log_level' : 'INFO' }\n\n parser = argparse.ArgumentParser(description='Coverity submission script for Open MPI related projects')\n parser.add_argument('--log-level', help='Log level.', type=str,\n choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'])\n parser.add_argument('--build-root',\n help='Directory to use as base of build tree.',\n type=str)\n parser.add_argument('--source-tarball',\n help='Tarball to submit for analysis',\n type=str)\n parser.add_argument('--tool-dir',\n help='Directory in which to store downloaded tool (for reuse)',\n type=str)\n parser.add_argument('--tool-url',\n help='URL for downloading Coverity tool',\n type=str)\n parser.add_argument('--project-name',\n help='Coverity project name',\n type=str)\n parser.add_argument('--project-prefix',\n help='prefix of the tarball directory',\n type=str)\n parser.add_argument('--token-file',\n help='File containing the Coverity token for project',\n type=str)\n parser.add_argument('--configure-args',\n help='Configuration arguments for source tarball',\n type=str)\n parser.add_argument('--make-args',\n help='Build arguments for source tarball',\n type=str)\n parser.add_argument('--email',\n help='Coverity submission email address',\n type=str)\n\n for key, value in vars(parser.parse_args()).items():\n if not value == None:\n config[key] = value\n\n logging.basicConfig()\n logger = logging.getLogger()\n logger.setLevel(config['log_level'])\n\n run_coverity(logger, config['build_root'], config['source_tarball'], config)\n","sub_path":"nightly-tarball/Coverity.py","file_name":"Coverity.py","file_ext":"py","file_size_in_byte":7159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"460604261","text":"#!/usr/bin/python3\n\n# Copyright (c) 2020 Teradici Corporation\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\nimport base64\nimport os\nfrom google.cloud import kms_v1\nfrom google.oauth2 import service_account\n\n\nSECRETS_START_FLAG = \"# <-- Start of secrets section, do not edit this line. -->\"\n\nclass Tfvars_Encryptor_GCP:\n \"\"\"Tfvars_Encryptor_GCP is used to automate the encryption or decryption of secrets in a terraform \n tfvars file so that it is ready to be used for terraform deployments using encrypted secrets.\n\n Attributes:\n tfvars_path (str) Path to the terraform.tfvars file.\n tfvars_data (dict) Holds key value pairs for all terraform.tfvars configuration data.\n tfvars_secrets (dict) Holds key value pairs for all terraform.tfvars secrets.\n max_key_length (int) Longest string length of a tfvars_secrets key used to write secrets left-justified.\n gcp_credentials_file (str) Path to the GCP credentials file used for GCP KMS.\n gcp_credentials (object) GCP Credentials object for a GCP service account.\n kms_client (object) Instance of GCP Key Management Service Client.\n project_id (str) GCP project ID associated with the GCP service account.\n location (str) Defaulted to use \"global\" as the location.\n key_ring_id (str) Defaulted to use \"cas_keyring\" as a key ring ID.\n crypto_key_id (str) Defaulted to use \"cas_key\" as a crypto key ID.\n crypto_key_path (str) Full GCP resource path to the crypto key being used to encrypt / decrypt.\n\n Methods:\n __init__(self, tfvars_path)\n create_crypto_key(crypto_key_id)\n create_key_ring(key_ring_id)\n decrypt_ciphertext(ciphertext)\n decrypt_file(file_path)\n decrypt_tfvars_secrets()\n encrypt_file(file_path)\n encrypt_plaintext(plaintext)\n encrypt_tfvars_secrets()\n initialize_keyring(key_ring_id)\n initialize_cryptokey(crypto_key_id)\n get_crypto_keys(key_ring_id)\n get_key_rings()\n read_tfvars(tfvars_file)\n write_new_tfvars()\n \"\"\"\n\n def __init__(self, tfvars_path):\n \"\"\"Tfvars_Encryptor_GCP Class Constructor to initialize the object.\n \n Args: \n tfvars_path (str): a full path to the terraform.tfvars file\n \"\"\"\n\n # Read tfvars data and secrets into dictionaries\n self.tfvars_path = tfvars_path\n self.tfvars_data, self.tfvars_secrets = self.read_tfvars(tfvars_path)\n\n # Find the max string length of all the keys to left-justify align them\n self.max_key_length = max(map(len, self.tfvars_secrets))\n\n # Set GCP credentials instance variable from tfvars_data\n self.gcp_credentials_file = self.tfvars_data.get(\"gcp_credentials_file\")\n\n # Create a client for the KMS API using the provided GCP service account\n self.gcp_credentials = service_account.Credentials.from_service_account_file(self.gcp_credentials_file)\n self.kms_client = kms_v1.KeyManagementServiceClient(credentials = self.gcp_credentials)\n\n # GCP KMS resource variables\n self.project_id = self.tfvars_data.get(\"gcp_project_id\")\n self.location = \"global\"\n self.key_ring_id = self.initialize_keyring(\"cas_keyring\")\n self.crypto_key_id = self.initialize_cryptokey(\"cas_key\")\n self.crypto_key_path = self.kms_client.crypto_key_path_path(self.project_id, self.location, self.key_ring_id, self.crypto_key_id)\n\n\n def create_crypto_key(self, crypto_key_id):\n \"\"\"A method to create a crypto key on GCP KMS.\n \n Args:\n crypto_key_id (str): the name of the crypto key to be created\n Returns:\n string: the name of the crypto key created\n \"\"\"\n\n # Create the crypto key object template\n purpose = kms_v1.enums.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT\n crypto_key = { \"purpose\": purpose }\n\n # Create a crypto key for the given key ring\n parent = self.kms_client.key_ring_path(self.project_id, self.location, self.key_ring_id)\n response = self.kms_client.create_crypto_key(parent, crypto_key_id, crypto_key)\n\n return response.name\n\n\n def create_key_ring(self, key_ring_id):\n \"\"\"A method to create a key ring on GCP KMS.\n \n Args:\n key_ring_id (str): the name of the key ring to be created\n Returns:\n string: the name of the key ring created\n \"\"\"\n\n # Create the key ring object template\n keyring_path = self.kms_client.key_ring_path(self.project_id, self.location, key_ring_id)\n keyring = {\"name\": keyring_path}\n\n # Create a key ring\n parent = self.kms_client.location_path(self.project_id, self.location)\n response = self.kms_client.create_key_ring(parent, key_ring_id, keyring)\n\n return response.name\n\n\n def decrypt_ciphertext(self, ciphertext):\n \"\"\"A method that decrypts ciphertext.\n\n Uses GCP KMS to decrypt ciphertext back to plaintext using the provided\n symmetric crypto key that belongs to this instance.\n \n Args:\n ciphertext (str): the ciphertext being decrypted\n Returns:\n string: the plaintext\n \"\"\"\n\n # Convert ciphertext string to a byte string, then Base64 decode it\n ciphertext = base64.b64decode(ciphertext.encode(\"utf-8\"))\n\n # Use the KMS API to decrypt the data\n response = self.kms_client.decrypt(self.crypto_key_path, ciphertext)\n\n # Decode Base64 plaintext\n plaintext = response.plaintext.decode(\"utf-8\")\n\n return plaintext\n\n\n def decrypt_file(self, file_path):\n \"\"\"A method that decrypts the contents of a text file.\n\n Uses GCP KMS to decrypt ciphertext back to plaintext using the provided\n symmetric crypto key that belongs to this instance.\n \n Args:\n file_path (str): the path of the text file being decrypted\n Returns:\n string: the path to the decrypted text file created\n \"\"\"\n\n try:\n print(\"Decrypting file: {}...\".format(file_path))\n\n with open(file_path) as f:\n f_ciphertext = f.read()\n \n f_plaintext = self.decrypt_ciphertext(f_ciphertext)\n\n # Removes the .encrypted appended using this encryptor\n file_path_decrypted = \"{}.decrypted\".format(file_path).replace(\".encrypted\", \"\")\n\n with open(file_path_decrypted, \"w\") as f:\n f.write(f_plaintext)\n\n except Exception as err:\n print(\"An exception occurred decrypting file.\")\n print(\"{}\\n\".format(err))\n raise SystemExit()\n \n return file_path_decrypted\n\n\n def decrypt_tfvars_secrets(self):\n \"\"\"A method that decrypts the secrets contained in the terraform.tfvars file.\n\n This method contains the logic for handling the decryption of the secrets \n and any file paths associated with it using GCP KMS. Once decrypted, it calls \n write_new_tfvars() to write all secrets to a new terraform.tfvars file. \n \"\"\" \n\n # Set crypto key path to use kms_cryptokey_id\n self.crypto_key_path = self.tfvars_data.get(\"kms_cryptokey_id\")\n\n # Decrypt all secrets\n try:\n for secret in self.tfvars_secrets:\n # Additional handling needed if the string is a path to a file (IE. cam_credentials_file)\n if os.path.isfile(self.tfvars_secrets.get(secret)):\n self.tfvars_secrets[secret] = self.decrypt_file(self.tfvars_secrets.get(secret))\n else:\n print(\"Decrypting {}...\".format(secret))\n self.tfvars_secrets[secret] = self.decrypt_ciphertext(self.tfvars_secrets.get(secret))\n \n # Write encrypted secrets into new terraform.tfvars file\n self.write_new_tfvars()\n print(\"\\nSuccessfully decrypted all secrets!\\n\")\n\n except Exception as err:\n print(\"An exception occurred decrypting secrets:\")\n print(\"{}\\n\".format(err))\n raise SystemExit()\n\n\n def encrypt_file(self, file_path):\n \"\"\"A method that encrypts the contents of a text file.\n\n Uses GCP KMS to encrypt the plaintext in a file to ciphertext using \n the provided symmetric crypto key that belongs to this instance.\n \n Args:\n file_path (str): the path of the text file being encrypted\n Returns:\n string: the path to the encrypted text file created\n \"\"\"\n\n try:\n print(\"Encrypting file: {}...\".format(file_path))\n \n with open(file_path) as f:\n f_string = f.read()\n\n f_encrypted_string = self.encrypt_plaintext(f_string)\n file_path_encrypted = \"{}.encrypted\".format(file_path).replace(\".decrypted\", \"\")\n \n with open(file_path_encrypted, \"w\") as f:\n f.write(f_encrypted_string)\n\n except Exception as err:\n print(\"An exception occurred encrypting the file:\")\n print(\"{}\\n\".format(err))\n raise SystemExit()\n \n return file_path_encrypted\n\n\n def encrypt_plaintext(self, plaintext):\n \"\"\"A method that encrypts plaintext.\n\n Uses GCP KMS to encrypt plaintext to ciphertext using the provided\n symmetric crypto key that belongs to this instance.\n \n Args:\n ciphertext (str): the plainttext being encrypted\n Returns:\n string: the ciphertext\n \"\"\"\n\n # Use the KMS API to encrypt the data.\n response = self.kms_client.encrypt(self.crypto_key_path, plaintext.encode(\"utf-8\"))\n\n # Base64 encoding of ciphertext\n ciphertext = base64.b64encode(response.ciphertext).decode(\"utf-8\")\n\n return ciphertext\n\n\n def encrypt_tfvars_secrets(self):\n \"\"\"A method that encrypts secrets contained in the terraform.tfvars file.\n\n This method contains the logic for handling the encryption of the secrets \n and any file paths associated with it using GCP KMS. Once encrypted, it calls \n write_new_tfvars() to write all secrets to a new terraform.tfvars file. \n \"\"\" \n\n # Encrypt all secrets found in the tfvars_secrets dictionary\n try:\n for secret in self.tfvars_secrets:\n # Additional handling needed if the string is a path to a file (IE. cam_credentials_file)\n if os.path.isfile(self.tfvars_secrets.get(secret)):\n self.tfvars_secrets[secret] = self.encrypt_file(self.tfvars_secrets.get(secret))\n else:\n print(\"Encrypting {}...\".format(secret))\n self.tfvars_secrets[secret] = self.encrypt_plaintext(self.tfvars_secrets.get(secret))\n\n # Write encrypted secrets into new terraform.tfvars file\n self.write_new_tfvars()\n print(\"\\nSuccessfully encrypted all secrets!\\n\")\n\n except Exception as err:\n print(\"An exception occurred encrypting secrets:\")\n print(\"{}\\n\".format(err))\n raise SystemExit()\n\n\n def initialize_cryptokey(self, crypto_key_id):\n \"\"\"A method that initializes this instance's crypto key.\n\n This initialization method is called in the constructor to\n create a default crypto key if it doesn't exist. If the key\n exists already, then reuse it for this instance.\n \n Args:\n crypto_key_id (str): crypto key used to encrypt and decrypt\n Returns:\n string: the crypto key used\n \"\"\"\n\n crypto_keys_list = self.get_crypto_keys(self.key_ring_id)\n\n # Create the crypto key only if it doesn't exist\n if crypto_key_id not in crypto_keys_list:\n try:\n self.create_crypto_key(crypto_key_id)\n print(\"Created key: {}\\n\".format(crypto_key_id))\n \n except Exception as err:\n print(\"An exception occurred creating new crypto key:\")\n print(\"{}\".format(err))\n raise SystemExit()\n else:\n print(\"Using existing crypto key: {}\\n\".format(crypto_key_id))\n \n return crypto_key_id\n\n\n def initialize_keyring(self, key_ring_id):\n \"\"\"A method that initializes this instance's key ring.\n\n This initialization method is called in the constructor to\n create a default key ring if it doesn't exist.\n \n Args:\n key_ring_id (str): key ring being created\n Returns:\n string: the key ring used\n \"\"\"\n\n key_rings_list = self.get_key_rings()\n\n # Create the key ring only if it doesn't exist\n if key_ring_id not in key_rings_list:\n try:\n self.create_key_ring(key_ring_id)\n print(\"Created key ring: {}\\n\".format(key_ring_id))\n \n except Exception as err:\n print(\"An exception occurred creating new key ring:\")\n print(\"{}\".format(err))\n raise SystemExit()\n else: \n print(\"Using existing key ring: {}\\n\".format(key_ring_id))\n\n return key_ring_id\n\n\n def get_crypto_keys(self, key_ring_id):\n \"\"\"A method that retrieves a list of crypto keys associated with a key ring.\n\n This method returns a list of all the crypto keys associated with a specific key ring.\n \n Args:\n key_ring_id (str): a GCP KMS key ring\n Returns:\n list: a list of all the crypto keys associated with the key ring argument.\n \"\"\"\n\n parent = self.kms_client.key_ring_path(self.project_id, self.location, key_ring_id)\n response = self.kms_client.list_crypto_keys(parent)\n\n # Access the name property and split string from the right. [2] to get the string after the separator\n # eg. name: \"projects/user-terraform/locations/global/keyRings/cas_keyring/cryptoKeys/cas_key\"\n crypto_keys_list = list(map(lambda key: key.name.rpartition('/')[2], response))\n\n return crypto_keys_list\n\n\n def get_key_rings(self):\n \"\"\"A method that retrieves a list of key rings.\n\n This method returns a list of all the key rings associated \n with the GCP service account.\n \n Returns:\n list: a list of all the key rings\n \"\"\"\n\n parent = self.kms_client.location_path(self.project_id, self.location)\n response = self.kms_client.list_key_rings(parent)\n\n # Access the name property and split string from the right. [2] to get the string after the separator\n # eg. name: \"projects/user-terraform/locations/global/keyRings/cas_keyring\"\n key_rings_list = list(map(lambda key_ring: key_ring.name.rpartition('/')[2], response))\n\n return key_rings_list\n\n\n def read_tfvars(self, tfvars_file):\n \"\"\"A method that reads terraform.tfvars for all configuration data.\n\n This method reads a terraform.tfvars file for all the user-provided \n configuration data above the secrets.\n Args:\n tfvars_file (str): a path to a terraform.tfvars file\n Returns:\n tuple containing:\n dict: key value pairs for all the terraform.tfvars data\n dict: key value pairs for all the terraform.tfvars secrets\n \"\"\"\n\n tf_data = {}\n tf_secrets = {}\n\n begin_reading_secrets = False\n\n with open(tfvars_file, 'r') as f:\n for line in f:\n line = line.strip()\n\n if SECRETS_START_FLAG in line:\n begin_reading_secrets = True\n continue\n\n # Skip blank lines and comment lines\n # \"not line\" must come first using short circuiting to avoid string index out of range error\n if not line or line[0] in (\"#\"):\n continue\n \n # Split the line into key value pairs using the first delimiter\n key, value = map(str.strip, line.split('=', 1))\n\n if begin_reading_secrets:\n tf_secrets[key] = value.replace(\"\\\"\", \"\")\n else:\n tf_data[key] = value.replace(\"\\\"\", \"\")\n\n return tf_data, tf_secrets\n\n\n def write_new_tfvars(self):\n \"\"\"A method that writes a new terraform.tfvars file\n\n This method writes a new terraform.tfvars file that is ready to be used by\n Terraform after encrypting or decrypting. \n \"\"\"\n\n # Parse existing tfvars and store each line into a list\n lines = []\n \n with open(self.tfvars_path, 'r') as f:\n for line in f:\n \n # Remove leading and trailing whitespace including \"\\n\" and \"\\t\"\n line = line.strip()\n\n # Append the crypto key path to kms_cryptokey_id line\n if \"kms_cryptokey_id =\" in line:\n if not self.tfvars_data.get(\"kms_cryptokey_id\"):\n lines.append(\"{} = \\\"{}\\\"\".format(\"kms_cryptokey_id\", self.crypto_key_path))\n else:\n lines.append(\"# {} = \\\"{}\\\"\".format(\"kms_cryptokey_id\", self.crypto_key_path))\n continue\n\n # Blank lines and comments are unchanged\n # \"not line\" must come first using short circuit to avoid string index out of range error\n if not line or line[0] in (\"#\"):\n lines.append(line)\n continue\n \n # Need to keep the .strip() here to sanitize the key being read\n key = line.split(\"=\")[0].strip()\n\n if key in self.tfvars_secrets.keys():\n # Left justify all the secrets with space as padding on the right\n lines.append(\"{} = \\\"{}\\\"\".format(key.ljust(self.max_key_length, \" \"), self.tfvars_secrets.get(key)))\n else:\n lines.append(line)\n\n # Add .backup postfix to the original tfvars file\n print(\"Creating backup of terraform.tfvars...\")\n os.rename(self.tfvars_path, \"{}.backup\".format(self.tfvars_path))\n\n # Rewrite the existing terraform.tfvars\n print(\"Writing new terraform.tfvars...\")\n with open(self.tfvars_path, 'w') as f:\n f.writelines(\"%s\\n\" %line for line in lines)\n\n\ndef main():\n # Set up argparse\n parser_description = (\"Creates GCP KMS keyring and key, and uses the key to encrypt or decrypt secrets in the specified terraform.tfvars.\"\n \"The script encrypts by default. To decrypt instead, add the -d flag.\")\n\n parser = argparse.ArgumentParser(description=parser_description)\n\n parser.add_argument(\"tfvars\", help=\"specify the path to terraform.tfvars file\")\n parser.add_argument(\"-d\", help=\"decrypt secrets in terraform.tfvars specified\", action='store_true')\n\n args = parser.parse_args()\n \n # Instantiate a new Tfvars_Encryptor_GCP with the tfvars path\n tfvars_encryptor_gcp = Tfvars_Encryptor_GCP(args.tfvars)\n \n # Abort the script if GCP credentials is missing\n if not tfvars_encryptor_gcp.tfvars_data.get(\"gcp_credentials_file\"):\n print(\"Missing gcp_credentials_file in tfvars. Ensure gcp_credentials_file is valid and try again.\\n\")\n raise SystemExit()\n\n # Encryption is the default, user can specify a -d flag for decryption\n if args.d:\n # Abort the decryption if there is not a kms_cryptokey_id in the tfvars file\n if not tfvars_encryptor_gcp.tfvars_data.get(\"kms_cryptokey_id\"):\n print(\"No kms_cryptokey_id present in tfvars. Ensure the secrets are encrypted and try again.\\n\")\n raise SystemExit()\n\n tfvars_encryptor_gcp.decrypt_tfvars_secrets()\n else:\n # Abort the encryption if the tfvars is already encrypted with a kms_cryptokey_id present\n if tfvars_encryptor_gcp.tfvars_data.get(\"kms_cryptokey_id\"):\n print(\"Detected kms_cryptokey_id in tfvars. Ensure secrets are not already encrypted and try again.\\n\")\n raise SystemExit()\n\n tfvars_encryptor_gcp.encrypt_tfvars_secrets()\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"tools/kms_secrets_encryption.py","file_name":"kms_secrets_encryption.py","file_ext":"py","file_size_in_byte":20628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"577862101","text":"#!/usr/bin/python3\n# Neste exemplo o arquivo e carregado por stream e usamos o try / finally\n# O finally sempre e executado independente de erros\ntry:\n arquivo = open(\"pessoas.csv\")\n for registro in arquivo:\n print(\"Nome: {} Idade: {}\".format(*registro.strip().split(',')))\nfinally:\n arquivo.close()\n print(\"Arquivo fechado\")\n","sub_path":"manipulacao_de_arquivos/io_v4.py","file_name":"io_v4.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"515626709","text":"#!/usr/bin/env python3\n# MIT License\n#\n# Copyright (c) 2020 FABRIC Testbed\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n#\n# Author: Komal Thareja (kthare10@renci.org)\nfrom fabric.actor.core.apis.i_authority_proxy import IAuthorityProxy\nfrom fabric.actor.core.apis.i_broker_proxy import IBrokerProxy\nfrom fabric.actor.core.apis.i_proxy import IProxy\n\n\nclass ProxyRegistry:\n class ProtocolEntry:\n def __init__(self):\n # all proxies\n self.proxies = {}\n # all proxies to brokers\n self.broker_proxies = []\n # all proxies to sites\n self.site_proxies = []\n\n def clear(self):\n self.proxies.clear()\n self.broker_proxies.clear()\n self.site_proxies.clear()\n\n def __init__(self):\n self.protocols = {}\n\n def clear(self):\n for protocol in self.protocols.values():\n protocol.clear()\n\n def get_broker_proxies(self, *, protocol: str):\n if protocol not in self.protocols:\n return None\n\n entry = self.protocols[protocol]\n return entry.broker_proxies\n\n def get_site_proxies(self, *, protocol: str):\n if protocol not in self.protocols:\n return None\n\n entry = self.protocols[protocol]\n return entry.site_proxies\n\n def get_proxies(self, *, protocol: str):\n if protocol not in self.protocols:\n return None\n\n entry = self.protocols[protocol]\n return entry.proxies.values()\n\n def get_proxy(self, *, protocol: str, actor_name: str):\n if protocol not in self.protocols:\n return None\n entry = self.protocols[protocol]\n if actor_name not in entry.proxies:\n return None\n return entry.proxies[actor_name]\n\n def register_proxy(self, *, proxy: IProxy):\n protocol = proxy.get_type()\n\n entry = None\n if protocol not in self.protocols:\n entry = self.ProtocolEntry()\n self.protocols[protocol] = entry\n else:\n entry = self.protocols[protocol]\n\n name = proxy.get_identity().get_name()\n if name not in entry.proxies:\n entry.proxies[name] = proxy\n\n if isinstance(proxy, IAuthorityProxy):\n entry.site_proxies.append(proxy)\n\n if isinstance(proxy, IBrokerProxy):\n entry.broker_proxies.append(proxy)\n\n def unregister(self, *, actor_name: str):\n for protocol in self.protocols.values():\n if actor_name in protocol.proxies:\n proxy = protocol.proxies[actor_name]\n protocol.proxies.pop(actor_name)\n if isinstance(proxy, IAuthorityProxy):\n protocol.site_proxies.remove(proxy)\n\n if isinstance(proxy, IBrokerProxy):\n protocol.broker_proxies.remove(proxy)\n","sub_path":"fabric/actor/core/registry/proxy_registry.py","file_name":"proxy_registry.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"365442570","text":"from abc import ABC\n\nclass Wallet(ABC):\n def __init__(self):\n pass\n\n\nclass BigWallet(Wallet):\n def __init__(self, papers, coins):\n self.papers = list(papers)\n self.coins = list(coins)\n self.consist = BigWallet._get_money(papers, coins)\n self.total = sum(self.papers + self.coins)\n\n def __iter__(self, papers=False, coins=False):\n return IterBigWallet(self.consist)\n\n @staticmethod\n def _get_money(papers, coins):\n entire = []\n for value in papers:\n entire.append(value * 100)\n for value in coins:\n entire.append(value)\n return entire\n\n\nclass IterBigWallet(BigWallet):\n\n def __init__(self, wallet):\n self.wallet = wallet\n self.index = 0\n\n def __len__(self):\n count = 0\n while True:\n try:\n self.wallet[count]\n except Exception as e:\n break\n count += 1\n return count+1\n\n def __next__(self):\n try:\n value = self.wallet[self.index]\n except IndexError:\n raise StopIteration()\n self.index += 1\n return value\n\n def __iter__(self):\n return self\n\nfirst = BigWallet(papers=(1,2,3), coins=(10,25))\nprint(first.total)\niterka = iter(first)\nprint(iterka)\nprint(next(iterka))\nprint(next(iterka))\nprint(next(iterka))\nprint(next(iterka))\nprint(len(iterka))\n\nclass Repeator():\n def __init__(self, word):\n self.word = word\n\n #def __iter__(self):\n # return ReapeatorIterator(self.word)\n\n #or just\n def __iter__(self):\n for i in self.word:\n yield i.lower()\n else:\n raise StopIteration\n\nclass ReapeatorIterator(Repeator):\n def __init__(self, word):\n self.word = word\n self.counter = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n try:\n value = self.word[self.counter]\n except:\n raise StopIteration\n self.counter +=1\n return value\n\nrepeater = Repeator(\"Hello\")\nfor i in repeater:\n print(i) # hello","sub_path":"home_practise/iterable_and_iterator.py","file_name":"iterable_and_iterator.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"121137885","text":"#!/usr/bin/env python\n#\n#\n#\nimport argparse\nimport sys\nimport rclpy\nfrom rclpy.node import Node\n\nimport tf2_ros\nfrom rclpy.qos import qos_profile_default, qos_profile_sensor_data\nfrom tf2_geometry_msgs import PoseStamped\nfrom geometry_msgs.msg import PoseStamped\n\nclass TargetPublisher(Node):\n def __init__(self,custom_qos_profile):\n super().__init__('target_publisher')\n self.custom_qos_profile = custom_qos_profile\n self.pub = self.create_publisher(PoseStamped, 'move_base_simple/goal', qos_profile=self.custom_qos_profile)\n\n self.sub = create_subscription(PoseStamped, 'detected_point' , self.callback,qos_profile=self.custom_qos_profile)\n\n def callback(self,msg):\n\n\n # Let src_pt be the point you want to transform.\n tf_buf = tf2_ros.Buffer()\n tf_listener = tf2_ros.TransformListener(tf_buf)\n target_pt = tf_buf.transform(msg, \"map\")\n\ndef main(argv=sys.argv[1:]):\n # parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n # parser.add_argument(\n # '-s',dest='stat', choices=['msg_loss' , 'jitter' , 'latency' , 'frequency', 'throughput'], action='store',\n # help='set the network statistic to be plotted')\n # parser.add_argument(\n # '-t',dest='topic',action='store',\n # help='set the topic where the statistics are calculated from')\n # parser.set_defaults(stat='latency')\n # parser.add_argument(\n # 'argv', nargs=argparse.REMAINDER,\n # help='Pass arbitrary arguments to the executable')\n # args = parser.parse_args(argv)\n # parser.print_help()\n rclpy.init(args=args.argv)\n\n # print (args.topic)\n # print (args.stat)\n custom_qos_profile = qos_profile_default\n node = TargetPublisher(custom_qos_profile)\n\n try:\n rclpy.spin(node)\n except KeyboardInterrupt:\n print (\"Shutting down\")\n node.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"detection/scripts/target_publisher.py","file_name":"target_publisher.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"18173757","text":"from __future__ import unicode_literals\n\nimport numpy as np\nfrom PIL import Image\nimport os\nimport json\nimport cv2\nimport cPickle\nfrom wrappers import *\nfrom tiny import n_clusters\nfrom sklearn import preprocessing\nimport scipy.sparse\n\n\nclass SearchModel:\n with open(os.path.dirname(os.path.realpath(__file__)) + '/../data/kmean_model.pkl', 'r') as f:\n model = cPickle.load(f)\n with open(os.path.dirname(os.path.realpath(__file__)) + '/../data/lshforest_combine.pkl', 'r') as f:\n lsh = cPickle.load(f)\n with open(os.path.dirname(os.path.realpath(__file__)) + '/../data/vectorizer.pkl', 'r') as f:\n vectorizer = cPickle.load(f)\n with open(os.path.dirname(os.path.realpath(__file__)) + '/../data/sift_names.pkl', 'r') as f:\n names = cPickle.load(f)\n\n def sdd(self, uploaded_file):\n \"\"\"\n Find similar image base on SDD of histogram\n \"\"\"\n img = Image.open(uploaded_file).convert('L')\n img.thumbnail((32, 32))\n img_hist = img.histogram()\n hists = np.load(os.path.dirname(os.path.realpath(__file__)) + '/hists.pkl')\n s = np.sum((hists - img_hist) ** 2, axis=1)\n s = s.argsort()[:15]\n with open('hists/names.json', 'r') as f:\n names = json.load(f)\n s = [names[str(i)] for i in s]\n return s\n\n @elapsed()\n def cosine_sift(self, uploaded_file):\n \"\"\"\n Find similar image base on sift features\n \"\"\"\n name = uploaded_file.name\n name = name.replace('img', 'descr')\n name1 = name.replace('.jpg', '.txt')\n\n name = os.path.dirname(os.path.realpath(__file__)) + '/../shopping/queryimages/' + name1\n if not os.path.exists(name):\n name = os.path.dirname(os.path.realpath(__file__)) + '/../shopping/images/' + name1\n filenames = [name]\n text_hist = SearchModel.vectorizer.transform(filenames).tocsr()\n preprocessing.normalize(text_hist, copy=False)\n\n sift = cv2.xfeatures2d.SIFT_create()\n nparr = np.fromstring(uploaded_file.read(), np.uint8)\n img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n kp, des = sift.detectAndCompute(img, None)\n\n v = SearchModel.model.predict(des)\n sift_hist = np.histogram(v, bins=n_clusters, range=(0, n_clusters))[0]\n sift_hist = np.reshape(sift_hist, (1, len(sift_hist)))\n\n lamb = .5\n histogram = scipy.sparse.hstack([text_hist * lamb, sift_hist * (1-lamb)]).toarray()\n preprocessing.normalize(histogram, copy=False)\n\n indices = SearchModel.lsh.kneighbors(histogram, n_neighbors=24)[1][0]\n names = [SearchModel.names[i] for i in indices]\n return names","sub_path":"hists/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"45361266","text":"import pytest\n\n\ndef test_event_dates(client, seeder, utils):\n user_id, admin_unit_id = seeder.setup_base()\n seeder.create_event(admin_unit_id)\n seeder.create_event(admin_unit_id, draft=True)\n au_short_name = \"meinecrew\"\n\n url = utils.get_url(\"widget_event_dates\", au_short_name=au_short_name)\n response = utils.get_ok(url)\n utils.assert_response_contains(response, \"widget.css\")\n\n event_url = utils.get_url(\"widget_event_date\", au_short_name=au_short_name, id=1)\n utils.assert_response_contains(response, event_url)\n\n draft_url = utils.get_url(\"widget_event_date\", au_short_name=au_short_name, id=2)\n utils.assert_response_contains_not(response, draft_url)\n\n url = utils.get_url(\n \"widget_event_dates\", au_short_name=au_short_name, keyword=\"name\"\n )\n utils.get_ok(url)\n\n url = utils.get_url(\n \"widget_event_dates\", au_short_name=au_short_name, category_id=1\n )\n utils.get_ok(url)\n\n url = utils.get_url(\n \"widget_event_dates\",\n au_short_name=au_short_name,\n coordinate=\"51.9077888,10.4333312\",\n distance=500,\n )\n utils.get_ok(url)\n\n url = utils.get_url(\n \"widget_event_dates\",\n au_short_name=au_short_name,\n date_from=\"2020-10-03\",\n date_to=\"2021-10-03\",\n )\n utils.get_ok(url)\n\n # Unverified\n au_short_name = \"unverifiedcrew\"\n _, _, unverified_id = seeder.create_event_unverified()\n url = utils.get_url(\"widget_event_dates\", au_short_name=au_short_name)\n response = utils.get_ok(url)\n\n unverified_date_id = seeder.get_event_date_id(unverified_id)\n unverified_url = utils.get_url(\n \"widget_event_date\", au_short_name=au_short_name, id=unverified_date_id\n )\n utils.assert_response_contains_not(response, unverified_url)\n\n\ndef test_event_dates_oneDay(client, seeder, utils):\n from project.dateutils import create_berlin_date\n\n user_id, admin_unit_id = seeder.setup_base()\n au_short_name = \"meinecrew\"\n\n start = create_berlin_date(2020, 10, 3, 10)\n end = create_berlin_date(2020, 10, 3, 11)\n name = \"Spezialveranstaltung\"\n seeder.create_event(admin_unit_id, name=name, start=start, end=end)\n\n url = utils.get_url(\n \"widget_event_dates\",\n au_short_name=au_short_name,\n date_from=\"2020-10-03\",\n date_to=\"2020-10-03\",\n )\n response = utils.get_ok(url)\n utils.assert_response_contains(response, name)\n\n\ndef test_event_date(client, seeder, utils, app, db):\n user_id, admin_unit_id = seeder.setup_base(log_in=False)\n seeder.create_event(admin_unit_id)\n au_short_name = \"meinecrew\"\n\n with app.app_context():\n from colour import Color\n\n from project.models import AdminUnit\n\n admin_unit = AdminUnit.query.get(admin_unit_id)\n admin_unit.widget_font = \"Arial\"\n admin_unit.widget_background_color = Color(\"#F5F5F5\")\n admin_unit.widget_primary_color = Color(\"#000000\")\n admin_unit.widget_link_color = Color(\"#FF0000\")\n db.session.commit()\n\n url = utils.get_url(\"widget_event_date\", au_short_name=au_short_name, id=1)\n response = utils.get_ok(url)\n utils.assert_response_contains(response, \"widget.css\")\n\n seeder.create_event(admin_unit_id, draft=True)\n url = utils.get_url(\"widget_event_date\", au_short_name=au_short_name, id=2)\n response = utils.get(url)\n utils.assert_response_unauthorized(response)\n\n # Unverified\n au_short_name = \"unverifiedcrew\"\n _, _, unverified_id = seeder.create_event_unverified()\n unverified_date_id = seeder.get_event_date_id(unverified_id)\n url = utils.get_url(\n \"widget_event_date\", au_short_name=au_short_name, id=unverified_date_id\n )\n utils.assert_response_unauthorized(response)\n\n\ndef test_event_date_co_organizers(client, seeder, utils, app, db):\n user_id, admin_unit_id = seeder.setup_base(log_in=False)\n event_id, organizer_a_id, organizer_b_id = seeder.create_event_with_co_organizers(\n admin_unit_id\n )\n au_short_name = \"meinecrew\"\n\n url = utils.get_url(\"widget_event_date\", au_short_name=au_short_name, id=event_id)\n response = utils.get(url)\n response = utils.get_ok(url)\n utils.assert_response_contains(response, \"Organizer A\")\n utils.assert_response_contains(response, \"Organizer B\")\n\n\ndef get_create_data():\n return {\n \"accept_tos\": \"y\",\n \"name\": \"Vorschlag\",\n \"start\": [\"2030-12-31\", \"23:59\"],\n \"contact_name\": \"Vorname Nachname\",\n \"contact_email\": \"vorname@nachname.de\",\n \"contact_email_notice\": \"y\",\n \"event_place_id\": \"Freitext Ort\",\n \"organizer_id\": \"Freitext Organisator\",\n }\n\n\n@pytest.mark.parametrize(\"db_error\", [True, False])\n@pytest.mark.parametrize(\"free_text\", [True, False])\n@pytest.mark.parametrize(\"free_text_suffix\", [True, False])\n@pytest.mark.parametrize(\"missing_preview_field\", [True, False])\ndef test_event_suggestion_create_for_admin_unit(\n client,\n app,\n seeder,\n utils,\n mocker,\n db_error,\n free_text,\n free_text_suffix,\n missing_preview_field,\n):\n user_id = seeder.create_user()\n admin_unit_id = seeder.create_admin_unit(user_id, \"Meine Crew\")\n au_short_name = \"meinecrew\"\n\n url = utils.get_url(\n \"event_suggestion_create_for_admin_unit\", au_short_name=au_short_name\n )\n response = utils.get_ok(url)\n utils.assert_response_contains(response, \"widget.css\")\n\n data = get_create_data()\n if not free_text:\n data[\"event_place_id\"] = seeder.upsert_default_event_place(admin_unit_id)\n data[\"organizer_id\"] = seeder.upsert_default_event_organizer(admin_unit_id)\n\n elif free_text_suffix:\n data[\"event_place_id_suffix\"] = \"Place address\"\n data[\"organizer_id_suffix\"] = \"Organizer address\"\n\n if db_error:\n utils.mock_db_commit(mocker)\n\n mail_mock = utils.mock_send_mails(mocker)\n\n if missing_preview_field:\n del data[\"accept_tos\"]\n\n # preview post\n preview_response = utils.post_form(\n url + \"?preview=True\",\n response,\n data,\n )\n\n if missing_preview_field:\n utils.assert_response_error_message(preview_response)\n return\n\n utils.assert_response_ok(preview_response)\n\n # real post\n response = utils.post_form(\n url,\n response,\n data,\n )\n\n if db_error:\n utils.assert_response_db_error(response)\n return\n\n with app.app_context():\n from project.models import EventReviewStatus, EventSuggestion\n\n suggestion = (\n EventSuggestion.query.filter(EventSuggestion.admin_unit_id == admin_unit_id)\n .filter(EventSuggestion.name == \"Vorschlag\")\n .first()\n )\n assert suggestion is not None\n assert suggestion.review_status == EventReviewStatus.inbox\n suggestion_id = suggestion.id\n\n utils.assert_response_redirect(\n response, \"event_suggestion_review_status\", event_suggestion_id=suggestion_id\n )\n utils.assert_send_mail_called(mail_mock, \"test@test.de\")\n\n\ndef test_event_suggestion_create_for_admin_unit_allday(\n client,\n app,\n seeder,\n utils,\n):\n user_id = seeder.create_user()\n admin_unit_id = seeder.create_admin_unit(user_id, \"Meine Crew\")\n au_short_name = \"meinecrew\"\n\n url = utils.get_url(\n \"event_suggestion_create_for_admin_unit\", au_short_name=au_short_name\n )\n response = utils.get_ok(url)\n\n data = get_create_data()\n data[\"allday\"] = \"y\"\n response = utils.post_form(\n url,\n response,\n data,\n )\n\n with app.app_context():\n from project.models import EventSuggestion\n\n suggestion = (\n EventSuggestion.query.filter(EventSuggestion.admin_unit_id == admin_unit_id)\n .filter(EventSuggestion.name == \"Vorschlag\")\n .first()\n )\n assert suggestion is not None\n assert suggestion.allday\n suggestion_id = suggestion.id\n\n utils.assert_response_redirect(\n response, \"event_suggestion_review_status\", event_suggestion_id=suggestion_id\n )\n\n\ndef test_event_suggestion_create_for_admin_unit_emptyFreeText(\n client, app, seeder, utils, mocker\n):\n user_id = seeder.create_user()\n seeder.create_admin_unit(user_id, \"Meine Crew\")\n au_short_name = \"meinecrew\"\n\n url = utils.get_url(\n \"event_suggestion_create_for_admin_unit\", au_short_name=au_short_name\n )\n response = utils.get_ok(url)\n\n data = get_create_data()\n data[\"event_place_id\"] = \" \"\n data[\"organizer_id\"] = \" \"\n\n response = utils.post_form(\n url,\n response,\n data,\n )\n utils.assert_response_error_message(response)\n\n\ndef test_event_suggestion_create_for_admin_unit_invalidEventPlaceId(\n client, app, seeder, utils, mocker\n):\n user_id = seeder.create_user()\n seeder.create_admin_unit(user_id, \"Meine Crew\")\n au_short_name = \"meinecrew\"\n\n url = utils.get_url(\n \"event_suggestion_create_for_admin_unit\", au_short_name=au_short_name\n )\n response = utils.get_ok(url)\n\n data = get_create_data()\n data[\"event_place_id\"] = \"\\u00B2\" # unicode for ²\n\n response = utils.post_form(\n url,\n response,\n data,\n )\n assert response.status_code == 302\n\n\ndef test_event_suggestion_create_for_admin_unit_notEnabled(client, app, seeder, utils):\n user_id = seeder.create_user()\n seeder.create_admin_unit(user_id, \"Meine Crew\", suggestions_enabled=False)\n au_short_name = \"meinecrew\"\n\n url = utils.get_url(\n \"event_suggestion_create_for_admin_unit\", au_short_name=au_short_name\n )\n response = utils.get(url)\n utils.assert_response_notFound(response)\n","sub_path":"tests/views/test_widget.py","file_name":"test_widget.py","file_ext":"py","file_size_in_byte":9676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"223601556","text":"\n\nimport sys\nfrom PyQt5.QtWidgets import QWidget, QMessageBox, QApplication\n\n\n# QMessageBox提供两套接口来实现,\n# 一种是static functions(静态方法调用),\n# 另外一种 the property-base API(基于属性的API)。\n# 直接调用静态方法是一种比较简单的途径,但是没有基于属性API的方式灵活。\n# 在QT的官网上推荐使用the property-base API。\n\n# 有一篇关于这个的文章,具体可以去看\n# http://blog.csdn.net/zhulove86/article/details/52524735\nclass Example(QWidget):\n\tdef __init__(self):\n\t\tsuper(Example, self).__init__()\n\t\tself.initUI()\n\n\tdef initUI(self):\n\t\t\n\t\tself.setGeometry(200, 200, 300, 300)\n\t\tself.setWindowTitle('message box')\n\t\tself.show()\n\n\t# 这是一个抽象的方法,也就是在闭关窗口时会触发的事件函数\n\t# 有一个参数就是 event ,这个事件对象 \n\tdef closeEvent(self, e):\n\t\treply = QMessageBox.question(self, 'Message',\"Are you sure to quit?\",\n\t\t\t\tQMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n\n\t\t# 如果点击确认\n\t\tif reply == QMessageBox.Yes:\n\t\t\te.accept() # 同意该事件发生\n\t\telse:\n\t\t\te.ignore() # 不同意该事件发生\n\n\nif __name__ == '__main__':\n\tapp = QApplication(sys.argv)\n\tex = Example()\n\tsys.exit(app.exec_())\n\n\n","sub_path":"Python/py.qt.study/06first.messagebox.py","file_name":"06first.messagebox.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"222067378","text":"from kafka import KafkaConsumer\nimport json\nimport sys\nimport configparser\n\nconfig = configparser.ConfigParser()\nconfig.read(r'./config.cfg')\n\nkafka_server = config.get('kafka', 'server')\nkafka_topic = config.get('kafka', 'topic')\n\ntweet_download_file = config.get('consumer_file', 'file_name')\n\nfileHandle = open(tweet_download_file, \"a+\")\n\nconsumer = KafkaConsumer(bootstrap_servers=kafka_server, group_id='app1', auto_offset_reset='latest')\nconsumer.subscribe([kafka_topic])\nfor message in consumer:\n try:\n #msg = message.value.get(\"user\",\"\").get(\"name\", \"\") + \"\\t\" + message.value.get(\"created_at\",\"\") + \"\\t\" + message.value.get(\"text\", \"\") + \"\\t\" + message.value.get(\"retweet_count\",\"\")\n print(message.value)\n fileHandle.write(message.value.decode(\"utf-8\") + \"\\n\")\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n pass\n\n\nfileHandle.close()","sub_path":"kafka/producer-consumer/python/tweet_consumer.py","file_name":"tweet_consumer.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"29829398","text":"#!/usr/bin/python3\n\"\"\" BasicCache module\n\"\"\"\nBaseCaching = __import__('base_caching').BaseCaching\n\n\nclass LIFOCache(BaseCaching):\n \"\"\" a caching system\n \"\"\"\n def __init__(self):\n super().__init__()\n self.keys = []\n\n def put(self, key, item):\n \"\"\" Add an item in the cache\n \"\"\"\n if key and item:\n self.cache_data[key] = item\n if key in self.keys:\n self.keys.remove(key)\n self.keys.append(key)\n if len(self.keys) > BaseCaching.MAX_ITEMS:\n popped = self.keys[BaseCaching.MAX_ITEMS - 1]\n print(\"DISCARD: \" + popped)\n self.cache_data.pop(popped)\n self.keys.remove(popped)\n\n def get(self, key):\n \"\"\" Get an item by key\n \"\"\"\n if key and key in self.cache_data:\n return self.cache_data[key]\n return None\n","sub_path":"0x03-caching/2-lifo_cache.py","file_name":"2-lifo_cache.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"486406420","text":"from pylatex import Document, Section, Command\nfrom pylatex.utils import NoEscape\n\nfrom get_user_input import schools, name\n\n\ndef dict_doc_append(d):\n with doc.create(Section('Experience')):\n for k, v in d.items():\n if isinstance(v, dict):\n dict_doc_append(v)\n else:\n doc.append(v)\n\n\ndef fill_document(doc):\n \"\"\"\n Add a section, a subsection, and some text to the document.\n\n :param doc: the document\n :type doc: :class:`pylatex.document.Document` instance\n \"\"\"\n dict_doc_append(schools)\n\n\nif __name__ == '__main__':\n doc = Document('basic')\n fill_document(doc)\n\n doc.generate_pdf(clean_tex=False)\n doc.generate_tex()\n\n # Document with `\\maketitle` command activated\n doc = Document()\n\n doc.preamble.append(Command('title', 'Resume'))\n doc.preamble.append(Command('author', name))\n doc.append(NoEscape(r'\\maketitle'))\n\n fill_document(doc)\n\n doc.generate_pdf('basic_maketitle', clean_tex=False)\n\n text = doc.dumps()\n","sub_path":"resume_generator/create_document.py","file_name":"create_document.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"635946939","text":"from itertools import starmap\nfrom operator import itemgetter\nfrom typing import (AbstractSet,\n Any,\n List,\n Optional)\n\nfrom hypothesis import strategies\nfrom sqlalchemy.schema import (Column,\n Constraint)\n\nfrom hypothesis_sqlalchemy import constrained\nfrom hypothesis_sqlalchemy.hints import (RecordType,\n Strategy)\nfrom . import values\n\n\ndef factory(columns: List[Column],\n **fixed_columns_values: Strategy) -> Strategy[RecordType]:\n def to_plain_values_strategy(column: Column) -> Strategy[Any]:\n result = values.factory(column)\n if column.nullable:\n # putting simpler strategies first\n # more info at\n # https://hypothesis.readthedocs.io/en/latest/data.html#hypothesis.strategies.one_of\n result = strategies.none() | result\n return result\n\n if fixed_columns_values:\n def to_values_strategy(column: Column) -> Strategy[Any]:\n column_name = column.name\n if column_name in fixed_columns_values:\n return fixed_columns_values[column_name]\n else:\n return to_plain_values_strategy(column)\n else:\n to_values_strategy = to_plain_values_strategy\n return strategies.tuples(*map(to_values_strategy, columns))\n\n\ndef lists_factory(columns: List[Column],\n constraints: AbstractSet[Constraint],\n *,\n min_size: int = 0,\n max_size: Optional[int] = None,\n **fixed_columns_values: Strategy\n ) -> Strategy[List[RecordType]]:\n values_tuples = factory(columns,\n **fixed_columns_values)\n columns_indices = {column: index for index, column in enumerate(columns)}\n unique_indices = [[columns_indices[column]\n for column in constraint.columns]\n for constraint in constraints\n if isinstance(constraint, constrained.UNIQUE_TYPES)\n and constraint.columns]\n\n if unique_indices:\n # Create a tuple of functions, each function asserting the uniqueness\n # of a single column value\n unique_by = tuple(starmap(itemgetter, unique_indices))\n else:\n unique_by = None\n\n return strategies.lists(values_tuples,\n min_size=min_size,\n max_size=max_size,\n unique_by=unique_by)\n","sub_path":"hypothesis_sqlalchemy/columnar/records.py","file_name":"records.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"103145227","text":"from django.conf import settings\nfrom django.conf.urls import patterns\nfrom django.shortcuts import render\nfrom django.http import Http404\nfrom django.template import TemplateDoesNotExist\nfrom django.utils.translation import ugettext as _\n\n\ndef help_view(request, template=None, language=None):\n language = language or getattr(request, 'LANGUAGE_CODE',\n settings.LANGUAGE_CODE)\n template_name = 'help/{}/{}'.format(language, template)\n try:\n return render(request, template_name)\n except TemplateDoesNotExist:\n pass\n if len(language) > 2:\n return help_view(request, template=template, language=language[:2])\n if not settings.LANGUAGE_CODE.startswith(language):\n return help_view(request, template=template,\n language=settings.LANGUAGE_CODE)\n raise Http404\n\n\nurlpatterns = patterns(\"\",\n (r'^$', help_view, {'template': 'index.html'}, 'help-index'),\n # Translators: URL part of /help/\n (r'^%s/$' % _('about'), help_view, {'template': 'about.html'},\n 'help-about'),\n # Translators: URL part of /help/\n (r'^%s/$' % _('terms'), help_view, {'template': 'terms.html'},\n 'help-terms'),\n # Translators: URL part of /help/\n (r'^%s/$' % _('privacy'), help_view, {'template': 'privacy.html'},\n 'help-privacy'),\n # Translators: URL part of /help/\n (r'^%s/$' % _('making-requests'), help_view,\n {'template': 'making-requests.html'}, 'help-making_requests'),\n # Translators: URL part of /help/\n (r'^%s/$' % _('your-privacy'), help_view,\n {'template': 'your-privacy.html'}, 'help-your_privacy'),\n # Translators: URL part of /help/\n (r'^%s/$' % _('for-foi-officers'), help_view,\n {'template': 'foi-officers.html'}, 'help-foi_officers'),\n (r'^%s/$' % _('donate'), help_view, {'template': 'donate.html'},\n 'help-donate'),\n)\n","sub_path":"froide/help_urls.py","file_name":"help_urls.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"460029537","text":"import os\nfrom flask import Flask, send_from_directory\nfrom flask_cors import CORS\nfrom flask_migrate import Migrate\nfrom flask_login import LoginManager\nfrom flask_wtf.csrf import CSRFProtect, generate_csrf\n\nfrom .config import Configuration\nfrom .models import db, User\nfrom .seeds import seed_commands\nfrom .api.auth_routes import auth_routes\nfrom .api.directory_routes import directory_routes\nfrom .api.document_routes import document_routes\n\n\napp = Flask(__name__)\n\n\nlogin = LoginManager(app)\nlogin.login_view = \"auth.unauthorized\"\n\n\n@login.user_loader\ndef load_user(id):\n return User.query.get(int(id))\n\n\napp.cli.add_command(seed_commands)\n\n\napp.config.from_object(Configuration)\napp.register_blueprint(auth_routes, url_prefix=\"/api/auth\")\napp.register_blueprint(directory_routes, url_prefix=\"/api/directories\")\napp.register_blueprint(document_routes, url_prefix=\"/api/documents\")\ndb.init_app(app)\nmigrate = Migrate(app, db)\n\nCORS(app)\n\n\n@app.after_request\ndef inject_csrf_token(response):\n response.set_cookie(\"csrf_token\",\n generate_csrf(),\n secure=True if os.environ.get(\n \"FLASK_ENV\") == \"production\" else False,\n samesite=\"Strict\" if os.environ.get(\n \"FLASK_ENV\") == \"production\" else None,\n httponly=True\n )\n return response\n\n\n@app.route(\"/\", defaults={\"path\": \"\"})\n@app.route(\"/\")\ndef react_root(path):\n print(\"path\", path)\n if path == \"favicon.ico\":\n return send_from_directory(\"static\", \"favicon.ico\")\n return send_from_directory(\"static\", \"index.html\")\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"163511244","text":"# 5. В массиве найти максимальный отрицательный элемент. Вывести на экран его значение и позицию (индекс) в массиве.\n\nfrom random import random\n\n# готовим массив.\nquantity = int(input('Укажите длину масссива: '))\ndata_array = []\n\nfor i in range(quantity):\n data_array.append(int(random() * 100) - 100)\n\nprint(data_array)\n\ni = 0\nmax_negative = -1\n\nwhile i < quantity:\n\n if data_array[i] < 0 and max_negative == -1:\n max_negative = i # первый встретившийся отрицательный элемент\n\n elif data_array[i] < 0 and data_array[i] > data_array[max_negative]:\n max_negative = i\n\n i += 1\n\nprint(f'Макс. минимум равен {data_array[max_negative]} он на {max_negative + 1} месте в массиве')\n","sub_path":"Lesson_3/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"582426454","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass XicidailiSpider(scrapy.Spider):\n name = 'xicidaili' #爬虫名字\n allowed_domains = ['xicidaili.com'] #允许采集的域名\n start_urls = ['http://www.qydaili.com/free/'] #开始采集的网站\n #解析响应数据 提取数据 或者网站等 rseponse就是网页源码\n def parse(self, response):\n\n selectors = response.xpath(\"//table[@class='table table-bordered table-striped']/tbody/tr\") #选择所有的tr标签\n for selector in selectors:\n item = {}\n item['ip'] = selector.xpath(\"./td[1]/text()\").extract_first()\n item['server_addr'] = selector.xpath(\"./td[5]/text()\").extract_first()\n item['conceal'] = selector.xpath(\"./td[3]/text()\").extract_first()\n item['type'] = selector.xpath(\"./td[4]/text()\").extract_first()\n item['ceck_time'] = selector.xpath(\"./td[7]/text()\").extract_first()\n # print(item)\n yield item\n next_url = response.xpath(\"//a[@aria-label='Next']/@href\").extract_first()\n if next_url != '?action=china&page=1281':\n new_next_url = 'http://www.qydaili.com/free/' + next_url\n yield scrapy.Request(\n new_next_url,\n callback=self.parse,\n dont_filter=True,\n )\n\n","sub_path":"sicidailiSpider/sicidailiSpider/spiders/xicidaili.py","file_name":"xicidaili.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"560126054","text":"from bs4 import BeautifulSoup\nfrom bs4.element import Tag\nimport sys\nimport os\nimport locale\n\n\ndef is_tag(tag):\n return isinstance(tag, Tag)\n\n\ndef process_file(file_name):\n\n raw_soup = BeautifulSoup(open(file_name), from_encoding=\"utf-8\")\n\n h2_tags = raw_soup.find_all('h2')\n\n for h2_tag in h2_tags:\n x = 0\n children = h2_tag.find_all(True, recursive=False)\n for child in children:\n if child.has_attr('class'):\n if child['class'] != ['ipa']:\n x = 1\n else:\n x = 1\n\n if x == 1:\n f.write(str(h2_tag))\n\n\n\n return\n\n\ndef process_all(files_list):\n for i, file_name in enumerate(files_list):\n if i % 11000 == 0:\n print(\"Progress: \" + str(int(i * 100 / len(files_list))) + \"%\")\n return\n\n\nif __name__ == '__main__':\n\n source_path = \"H2_Sandbox.html\"\n out_path = \"H2_Irregular.html\"\n\n f = open(out_path, 'a')\n process_file(source_path)\n f.close()\n\n print(\"Valmis!\")\n","sub_path":"py/sandbox_extract_irregular.py","file_name":"sandbox_extract_irregular.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"212292749","text":"from enum import Enum\nimport numpy as np\n\nclass HorT(Enum):\n \"\"\"result of the coin flip\"\"\"\n HEAD = 0\n TAIL = 1\n\n\nclass Game(object):\n \"\"\"running the 20 flip game, single player\"\"\"\n def __init__(self, id, head_prob):\n self._id = id # Player number\n self._headProb = head_prob # Probability H\n self._HorT = HorT.HEAD # Default start H, meaningless\n\n self._TCount = 0 # T count, starts at zero\n self._WinCount = 0 # Win count, starts at zero\n self._TotalFlips = 20 # 20 flips per game\n self._FlipCount = 1 # Flip count, starts at one\n\n self._rnd = np.random # Declaring random number generator, will be changed\n self._rnd.seed(self._id * self._FlipCount) # Repeatable seed of random number generator for flip\n\n\n def flip_advance(self):\n # If the prior toss was T\n if self._HorT == HorT.TAIL:\n # determine if the toss was H during this time-step\n if self._rnd.random_sample() < self._headProb:\n if self._TCount >= 2:\n self._WinCount += 1 # If T-->H follows ≥2T, update win count\n self._HorT = HorT.HEAD # Change status to H\n self._TCount = 0 # Reset T count\n\n # determine if the toss was T during this time-step\n if self._rnd.random_sample() > self._headProb:\n self._HorT = HorT.TAIL # Keep status T (could delete)\n self._TCount += 1 # Update T count\n\n # If the prior toss was H # First pass defaults here\n if self._HorT == HorT.HEAD:\n # determine if the toss was H during this time-step\n if self._rnd.random_sample() < self._headProb:\n self._HorT = HorT.HEAD # Keep status H (could delete)\n self._TCount = 0 # Reset T count (could delete, already 0)\n\n # determine if the toss was T during this time-step\n if self._rnd.random_sample() > self._headProb:\n self._HorT = HorT.TAIL # Change status to H\n self._TCount = 1 # Update T count to 1\n\n self._FlipCount += 1 # Update flip count\n\n\n def run_game(self):\n for i in range(1, self._TotalFlips+1): # Game of 20 tosses\n self._rnd = np.random\n self._rnd.seed(self._id * self._FlipCount)\n\n self.flip_advance()\n\n\n def game_reward(self):\n self.run_game() # Call run game\n\n self._reward = -250 # Ticket price\n self._reward += 100*self._WinCount # Update with winnings\n\n return self._reward\n\n\n# Testing with single player\n#TestSubject = Game(id=120, head_prob=0.5)\n#print(TestSubject.game_reward())\n\n\n# Defining the simulation cohort\nclass Cohort:\n def __init__(self, id, pop_size, head_prob):\n \"\"\" define the cohort\n\n :param id: Cohort identifier\n :param pop_size: Number of players\n :param head_prob: Probability H \"\"\"\n\n self._players = [] # List of players\n n = 1 # Current population\n\n while n <= pop_size:\n # create new player (use id * pop_size + n as patient id)\n player = Game(id=id * pop_size + n, head_prob=head_prob)\n\n self._players.append(player) # Add player to cohort\n n += 1 # Increase cohort population size\n\n\n def simulate(self):\n \"\"\" run sumulation on cohort \"\"\"\n game_rewards = [] # List of simulated game rewards\n\n for player in self._players: # Run game for all players, add to list\n game_rewards.append(player.game_reward())\n\n return sum(game_rewards)/len(game_rewards) # Return average game reward\n\n\n\n# Testing cohort\n# mimicking fair coin, P(H) = 0.5\nTestCohort = Cohort(id=1, pop_size=1000, head_prob=0.5)\nprint('Average game reward in dollars:', TestCohort.simulate())\n\nTestCohort = Cohort(id=2, pop_size=1000, head_prob=0.5)\nprint('Average game reward in dollars:', TestCohort.simulate())\n\n# running with P(H) = 0.4\nTestCohort = Cohort(id=1, pop_size=1000, head_prob=0.4)\nprint('Average game reward in dollars:', TestCohort.simulate())\n\nTestCohort = Cohort(id=2, pop_size=1000, head_prob=0.4)\nprint('Average game reward in dollars:', TestCohort.simulate())","sub_path":"P2_Model.py","file_name":"P2_Model.py","file_ext":"py","file_size_in_byte":5022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"536997739","text":"import tensorflow as tf\r\nimport numpy as np\r\n# and function\r\natributes =[ \r\n[0,0]\r\n,[0,1]\r\n,[1,0]\r\n,[1,1]\r\n]\r\n\r\nlabels =[ \r\n0\r\n,0\r\n,0\r\n,1\r\n]\r\n\r\ndata=np.array(atributes,'float32')\r\ntarget=np.array(labels,'float32')\r\nfeature_columns = [tf.contrib.layers.real_valued_column(\"\")]\r\nlearningRate = 0.1\r\nepoch= 10000 #learning time\r\n\r\nclassifier= tf.contrib.learn.DNNClassifier(\r\n\t\tfeature_columns = feature_columns\r\n\t\t, hidden_units =[3] #2 attributes and 1 hidden \r\n\t\t,activation_fn = tf.nn.sigmoid\r\n\t\t,optimizer=tf.train.GradientDescentOptimizer(learningRate)\r\n)\r\n\r\nclassifier.fit(data,target,steps=epoch)\r\n\r\ndef test_set():\r\n return np.array(atributes, np.float32)\r\n\r\npredictions = classifier.predict_classes(input_fn = test_set)\r\n\r\nindex = 0\r\nfor i in predictions:\r\n print(data[i], \"-> actual: \" , target[index] , \", predict :\",i)\r\n index = index + 1\r\n \r\n ","sub_path":"1and.py","file_name":"1and.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"649728080","text":"# -*- coding: utf-8 -*-\n\"\"\"\n###################\nPygments Formatters\n###################\n\nModified Pygments formatters.\n\n\n\"\"\"\n\n\nfrom __future__ import print_function\n\nimport os\nimport os.path\n\nfrom pygments.token import Token\nfrom pygments.formatters.html import HtmlFormatter, _escape_html_table\n\nfrom django.core.validators import URLValidator, ValidationError\n\n\nurl_validator = URLValidator()\n\n\nclass HtmlLinkifyFormatter(HtmlFormatter):\n \"\"\"Add anchor tags to hyperlinks in code.\"\"\"\n\n def _format_lines(self, tokensource):\n \"\"\"\n Just format the tokens, without any wrapping tags.\n Yield individual lines.\n \"\"\"\n nocls = self.noclasses\n lsep = self.lineseparator\n # for lookup only\n getcls = self.ttype2class.get\n c2s = self.class2style\n escape_table = _escape_html_table\n tagsfile = self.tagsfile\n\n lspan = ''\n line = []\n for ttype, value in tokensource:\n if nocls:\n cclass = getcls(ttype)\n while cclass is None:\n ttype = ttype.parent\n cclass = getcls(ttype)\n cspan = cclass and '' % c2s[cclass][0] or ''\n else:\n cls = self._get_css_classes(ttype)\n cspan = cls and '' % cls or ''\n\n parts = value.translate(escape_table).split('\\n')\n\n # ================================================================\n # MODIFIED\n # ================================================================\n # Make URLs clickable\n # ====================\n #\n # Converts valid urls to::\n #\n #
\n # \"{{ url }}\"\n # \n #\n if ttype == Token.Literal.String.Double:\n v = value.strip('\"')\n try:\n url_validator(v)\n parts = [(''\n '"%s"'\n '') % (v, v.translate(escape_table))]\n except ValidationError:\n pass\n # ================================================================\n\n if tagsfile and ttype in Token.Name:\n filename, linenumber = self._lookup_ctag(value)\n if linenumber:\n base, filename = os.path.split(filename)\n if base:\n base += '/'\n filename, extension = os.path.splitext(filename)\n url = self.tagurlformat % {'path': base, 'fname': filename,\n 'fext': extension}\n parts[0] = \"%s\" % \\\n (url, self.lineanchors, linenumber, parts[0])\n parts[-1] = parts[-1] + \"\"\n\n # for all but the last line\n for part in parts[:-1]:\n if line:\n # print(line)\n if lspan != cspan:\n line.extend(((lspan and ''), cspan, part,\n (cspan and ''), lsep))\n else: # both are the same\n line.extend((part, (lspan and ''), lsep))\n # out = 1, ''.join(line)\n yield 1, ''.join(line)\n line = []\n elif part:\n yield 1, ''.join((cspan, part, (cspan and ''),\n lsep))\n else:\n yield 1, lsep\n # for the last line\n if line and parts[-1]:\n if lspan != cspan:\n line.extend(((lspan and ''), cspan, parts[-1]))\n lspan = cspan\n else:\n line.append(parts[-1])\n elif parts[-1]:\n line = [cspan, parts[-1]]\n lspan = cspan\n # else we neither have to open a new span nor set lspan\n\n if line:\n line.extend(((lspan and ''), lsep))\n yield 1, ''.join(line)\n","sub_path":"api_browser/pygments_formatters.py","file_name":"pygments_formatters.py","file_ext":"py","file_size_in_byte":4382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"587771642","text":"def read_power_line(line):\n elements = line.strip().split(',')\n mean, median, standard_deviation, rank, name = elements\n return { 'mean': float(mean), 'name': name }\n\ndef read_performance_line(line):\n elements = line.strip().split(',')\n strength_of_record, game_control, name = elements\n return {\n 'strength_of_record': float(strength_of_record),\n 'game_control': float(game_control),\n 'name': name\n }\n\ndef process_power_name(name):\n result = name\n if name.endswith(\"St\"):\n result = name + \"ate\"\n return result\n","sub_path":"processing/readers.py","file_name":"readers.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"611677432","text":"import json\nimport datetime\n\nversion = 1.02\n\nname = 'koala-intelligence-agency'\n\nlowest = 99999999\nhighest = -9999999\n\ndef calc_traits_rank(cName):\n f = open(f'{cName}.json')\n jsondata = json.load(f)\n s = set()\n for i in jsondata : \n #print(i)\n for j in jsondata[i] : \n #print(j)\n s.add((j[2] , j[0] , j[1]))\n s = sorted(s)\n global highest\n global lowest\n rare_dict = {}\n for i in s:\n cnt = i[0]\n tag = (i[1],i[2])\n lowest = min(lowest , cnt)\n highest = max(highest , cnt)\n rare_dict[tag] = cnt\n return rare_dict\n\ndef calc_Traits_point(ranks):\n point_dict = {}\n y1 = 1000000\n y2 = 5\n x1 = lowest\n x2 = highest\n m = (y1 - y2) / (x1 -x2)\n #print(m)\n for rank in ranks:\n points = y1 + m*(ranks[rank] - x1)\n if ranks[rank] == x1:\n points = 10000000\n point_dict[rank] = points\n\n #print(rank , ranks[rank] , points)\n \n # print(lowest,highest)\n return point_dict\n\ndef get_points_of_token(cName, token , point_dict):\n f = open(f'{cName}.json')\n jsondata = json.load(f)\n zed_points = 0\n for i in jsondata[str(token)]:\n tag = (i[0],i[1])\n \n zed_points = zed_points + point_dict[tag]\n print(zed_points,token)\n return zed_points\n \ntrait_ranks = calc_traits_rank(name)\ntrait_points = calc_Traits_point(trait_ranks)\n#print(trait_points)\nrank = []\n\nfor i in range(10000):\n b = get_points_of_token(name,i,trait_points)\n rank.append((b,i))\nprint('done')\nrank.sort(reverse=True)\nq = open(f'{name}_result.json', 'w', encoding='utf-8')\njson.dump(rank, q, ensure_ascii=False, indent=4)\n\n\n#calc_Traits_point(calc_traits_rank('koala-intelligence-agency'))\n\n#print(calc_traits_rank('koala-intelligence-agency'))\n","sub_path":"calculate_rarity_v2.py","file_name":"calculate_rarity_v2.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"180697759","text":"import numpy as np\nimport random\nfrom functools import lru_cache\nfrom gym.core import Env\nfrom minimax_ai import MinimaxAI\n\nfrom gym import spaces\n\n\n\nclass TicTacToe(Env):\n\n def __init__(self):\n\n '''\n __init__ method that helps us define properties for our objects.\n\n Args:\n board:takes new position\n player:start with x turn end with 0 turn\n \n '''\n self.board = np.zeros([3,3], dtype=np.int8)\n self.x_next = True if random.random() > 0.5 else False\n self.o_ai = MinimaxAI(self)\n self.action_space = spaces.Discrete(9)\n self.observation_space = spaces.Box(low=-1, high=1, dtype=np.int8, shape=(3,3))\n\n\n def reset(self):\n self.board = np.zeros([3, 3])\n self.x_next = True if random.random() > 0.5 else False\n if not self.x_next:\n row, column = self.o_ai.decide_turn()\n assert self.try_make_turn(row, column)\n\n return self.board\n\n def step(self, action):\n \"\"\"\n :param action: a discrete action from 0 to 8. Cells are enumerated from left to right,\n with columns continuing from top to bottom, like below:\n\n 0 | 1 | 2\n - + - + -\n 3 | 4 | 5\n - + - + -\n 6 | 7 | 8\n\n :return: observation, reward, done?, None (info)\n \"\"\"\n assert self.x_next\n row = action // 3\n column = action % 3\n info = {}\n\n if self.try_make_turn(row,column):\n result = self.evaluate(self.board)\n if result is None:\n row, column = self.o_ai.decide_turn()\n assert self.try_make_turn(row, column)\n result = self.evaluate(self.board)\n\n if result is None:\n return self.board, 0, False, {}\n else:\n return self.board, result, True, {}\n\n else:\n return self.board, -0.5, False, {}\n\n\n def try_make_turn(self, row, column):\n\n if self.board[row,column] == 0:\n\n self.board[row, column] = 1 if self.x_next else -1\n self.x_next = not self.x_next\n return True\n\n else:\n return False\n\n @staticmethod\n def evaluate(board):\n board_as_tuple = tuple(tuple(board[row]) for row in range(3))\n return TicTacToe._evaluate(board_as_tuple)\n\n\n\n def loop(self):\n while self.evaluate(self.board) is None:\n ai = self.x_ai if self.x_next else self.o_ai\n row, column = ai.decide_turn()\n assert self.try_make_turn(row, column)\n return self.evaluate(self.board)\n\n\n @property\n def board_as_tuple(self):\n return tuple(tuple(self.board[row]) for row in range(3))\n\n\n @staticmethod\n @lru_cache(maxsize=2**16)\n def _evaluate(board):\n\n sums = []\n sums += [sum(board[row]) for row in range(3)]\n sums += [sum([board[i][column] for i in range(3)]) for column in range(3)]\n sum_main_diag = sum([board[i][i] for i in range(3)])\n sum_opp_diag = sum([board[i][2 - i] for i in range(3)])\n\n sums.append(sum_main_diag)\n sums.append(sum_opp_diag)\n\n if 3 in sums:\n return 1\n elif -3 in sums:\n return -1\n else:\n n_empty = sum([1 for row in range(3) for cell in board[row] if cell == 0])\n if n_empty == 0:\n return 0\n else:\n return None\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":3495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"122317035","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 10 18:36:56 2015\n\n@author: ajaver\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport statsmodels.stats.multitest as smm\nimport matplotlib.pylab as plt\n#rank sum test\nfrom scipy.stats import ranksums, ttest_ind\n\ndef getPValues(feat_mean, strain_list, feat_list): \n strain_groups = feat_mean.groupby('Strain');\n features_N2 = strain_groups.get_group('N2');\n \n pvalue_table = pd.DataFrame(np.nan, index = feat_list, columns = strain_list, dtype = np.float64)\n for strain in pvalue_table.columns.values:\n features_S = strain_groups.get_group(strain);\n for feat in pvalue_table.index.values:\n x, y = features_N2[feat].values, features_S[feat].values\n dd, p_value = ttest_ind(x,y, equal_var=False)\n #dd, p_value = ranksums(x,y)\n \n #p_value positive if N2 is larger than the strain\n pvalue_table.loc[feat, strain] = p_value\n \n good = ~np.isnan(pvalue_table[strain])\n #correct for false discovery rate using 2-stage Benjamini-Krieger-Yekutieli\n reject, pvals_corrected, alphacSidak, alphacBonf = \\\n smm.multipletests(pvalue_table.loc[good,strain].values, method = 'fdr_tsbky')\n pvalue_table.loc[good,strain] = pvals_corrected\n return pvalue_table\n\ndef getZStats(feat_mean, strain, strain_ref = 'N2'):\n #%%\n strain_index = (feat_mean['Strain']==strain)\n N2_index = (feat_mean['Strain']==strain_ref);\n \n good = strain_index | N2_index\n subfeat = feat_mean[good]\n strain_names = subfeat['Strain'].copy()\n \n del subfeat['Strain']\n nan_feat = subfeat.isnull().any()\n nan_feat = nan_feat | (subfeat==0).all()\n subfeat = subfeat.loc[:,~nan_feat]\n \n subfeat_z = (subfeat - subfeat.mean())/subfeat.std()\n subfeat_z['Strain'] = strain_names\n \n z_stats = subfeat_z.groupby('Strain').agg([np.mean, np.std, 'count'])\n z_stats = z_stats.transpose()\n \n idx = pd.IndexSlice\n \n z_mean = z_stats.loc[(idx[:,'mean']), :]\n z_mean.index = z_mean.index.droplevel(1)\n\n z_std = z_stats.loc[(idx[:,'std']), :]\n z_std.index = z_std.index.droplevel(1)\n \n z_count = z_stats.loc[(idx[:,'count']), :]\n z_count.index = z_count.index.droplevel(1)\n \n z_err = z_std/np.sqrt(z_count)\n #%%\n dd = {'mean':z_mean[strain], 'mean_ref':z_mean[strain_ref],\n 'err':z_err[strain], 'err_ref':z_err[strain_ref]}\n z_stats = pd.DataFrame(data=dd)\n #%%\n z_values = {}\n\n dd = subfeat_z[subfeat_z['Strain'] == strain]\n dd = dd.drop('Strain', axis = 1)\n z_values['feat'] = np.tile(dd.columns.values, (len(dd), 1)).flatten()\n z_values['values'] = dd.values.flatten()\n \n dd = subfeat_z[subfeat_z['Strain'] == strain_ref]\n dd = dd.drop('Strain', axis = 1)\n z_values['feat_ref'] = np.tile(dd.columns.values, (len(dd), 1)).flatten()\n z_values['values_ref'] = dd.values.flatten()\n #%%\n return z_stats, nan_feat, z_values\n\ndef plotZStats(z_stats, p_values, z_values, save_name):\n #%%\n \n ord_p = np.abs(np.log(p_values))\n ord_p[z_stats['mean']0.05)[0]\n lim1_p = bad_p[0]-1\n lim2_p = bad_p[-1]+1\n limX = plt.xlim()\n plt.plot(limX, [lim1_p, lim1_p], '--r')\n plt.plot(limX, [lim2_p, lim2_p], '--r')\n \n \n plt.yticks(x, feat_names)\n ax = plt.gca()\n ax.tick_params(axis='y', which='major', labelsize=5, gridOn=True)\n plt.tight_layout()\n plt.savefig(save_name, dpi=1200)\n \n plt.close()\n#%%\n \nif __name__ == '__main__':\n #plates_file = '/Users/ajaver/Desktop/Gecko_compressed/Results/PlateFeatures.hdf5'\n plates_file = '/Volumes/behavgenom$/GeckoVideo/Ana_Strains/PlateFeatures.hdf5'\n #plates_file = '/Volumes/behavgenom$/GeckoVideo/Ana_Strains/PlateFeatures_MED.hdf5'\n with pd.HDFStore(plates_file, 'r') as plates_fid:\n feat_mean = plates_fid['/avg_feat_per_plate']\n video_feat = plates_fid['/video_features']\n \n feat_mean.index = feat_mean['Base_Name'].values\n del feat_mean['Base_Name']\n \n video_feat.index = video_feat['Base_Name'].values\n del video_feat['Base_Name']\n \n dates = [x.split('_')[2] for x in video_feat.index]\n dates = [x if x != '16062015' else '15062015' for x in dates ]\n video_feat['Dates'] = dates\n feat_mean['Dates'] = dates\n \n strain_list = [x for x in feat_mean['Strain'].unique() if x != 'N2']\n \n \n #filter for only the main feat_mean.columns (no subdivision)\n feat_list = [feat for feat in feat_mean.columns if not any(x in feat \\\n for x in ['_Pos', '_Neg', '_Abs', '_Backward', '_Foward', '_Paused'])]\n feat_mean = feat_mean.loc[:, feat_list]\n \n feat_list = [feat for feat in feat_mean.columns if not any(x in feat for x in ['Strain', 'Dates'])]\n feat_list = sorted(feat_list)\n \n #%%\n tot_rows = len(feat_list)*len(feat_mean)\n feat_col = np.zeros(tot_rows, np.dtype(('U',50)))\n date_col = np.zeros(tot_rows, np.dtype(('U',6)))\n strain_col = np.zeros(tot_rows, np.dtype(('U',20)))\n value_col = np.zeros(tot_rows, np.float)\n z_col = np.zeros(tot_rows, np.float)\n \n tot_plates = len(feat_mean.index)\n for ii, feat in enumerate(feat_list):\n bot = ii*tot_plates\n top = (ii+1)*tot_plates\n feat_col[bot:top] = feat\n date_col[bot:top] = feat_mean['Dates']\n strain_col[bot:top] = feat_mean['Strain']\n value_col[bot:top] = feat_mean[feat] \n \n dd = feat_mean[feat]\n dd = (dd - np.nanmean(dd))/np.nanstd(dd)\n z_col[bot:top] = dd \n \n dat_lab = pd.DataFrame({'Features': feat_col, 'Dates': date_col, 'Values': value_col, 'Z_Values': z_col, 'Strain' : strain_col})\n #%%\n import seaborn as sns\n \n for strain in feat_mean['Strain'].unique(): \n strain_data = dat_lab[dat_lab['Strain']==strain] \n \n sns.set_context(\"paper\")\n plt.figure(figsize=(6, 36))\n \n sns.boxplot(y = 'Features', x = 'Z_Values', hue = 'Dates', data = strain_data)\n plt.title(strain)\n #%%\n #feat_mean.boxplot('Primary_Wavelength_Foward', by='Strain')\n# \n# #%%\n#\n# good = (video_feat['Total_Frames']>170000) & (video_feat['Total_Frames']<200000)\n# #good = good & (video_feat['Dates'] != '19062015')&\n# video_feat = video_feat[good]\n# feat_mean = feat_mean[good]\n#\n# \n#\n# all_pvalues = {}\n# all_pvaluesC = {}\n# for strain in strain_list:\n# all_pvalues[strain] = pd.DataFrame()\n# all_pvaluesC[strain] = pd.DataFrame()\n# #%%\n# date_list = list(video_feat['Dates'].unique()) + ['All']\n# for ii_date, date in enumerate(date_list):\n# if date != 'All': \n# good = (video_feat['Dates'] == date)\n# video_feat_sub = video_feat[good]\n# feat_mean_sub = feat_mean[good]\n# else:\n# video_feat_sub = video_feat\n# feat_mean_sub = feat_mean\n# \n# tot_samples = feat_mean_sub['Strain'].value_counts()\n# print(date)\n# print(tot_samples)\n# #%%\n# pvalue_table = getPValues(feat_mean_sub, strain_list, feat_list)\n# \n# save_prefix = 'zstat_'\n# for ii_strain, strain in enumerate(strain_list):\n# #%%\n# z_stats, nan_feat, z_values = getZStats(feat_mean_sub, strain, strain_ref = 'N2')\n# \n# p_values = pvalue_table.loc[~nan_feat, strain]\n# all_pvalues[strain][date] = p_values\n# \n# # correct for false discovery rate using 2-stage Benjamini-Krieger-Yekutieli\n# #reject, pvals_corrected, alphacSidak, alphacBonf = \\\n# #smm.multipletests(p_values.values, method = 'fdr_tsbky')\n# #p_values = pd.Series(data = pvals_corrected, index = p_values.index)\n# #all_pvaluesC[strain][date] = p_values\n# \n# save_name = strain +'_' + date + '.pdf'\n# plotZStats(z_stats, p_values, z_values, save_name)\n# #print(z_stats['ord_p'])\n# \n# plt.figure(ii_strain)\n# strC = 'brgk'\n# #strC = 'b' if date != 'All' else 'k'\n# plt.plot(np.sort(all_pvalues[strain][date]),'.'+strC[ii_date], label = date)\n# #plt.plot(np.log10(np.sort(all_pvaluesC[strain][date])),'.g')\n# #yy = np.log10(0.05)\n# plt.plot(plt.xlim(), [0.05, 0.05], 'k:')\n# plt.gca().set_yscale('log')\n# plt.title(strain)\n# plt.xlabel('Features')\n# plt.ylabel('p-values')\n# plt.gca().legend(loc=4)\n# \n# for ii_strain, strain in enumerate(strain_list):\n# plt.figure(ii_strain)\n# plt.savefig('p-values_%s.pdf' % strain)\n# plt.close()\n# \n# #%%\n# for strain in strain_list: \n# strain_pvalues = all_pvalues[strain];\n# aa = (strain_pvalues<0.05).sum();\n# print(strain)\n# print(aa)\n# \n# \n# #%%\n# pd.set_option('display.max_rows', len(feat_list))\n# for strain in strain_list: \n# strain_pvalues = all_pvalues[strain]\n# max_pvalue = strain_pvalues.drop(['All'], axis=1).max(axis=1);\n# #max_pvalue = (strain_pvalues.drop(['All'], axis=1)<0.05).sum(axis=1)>=2;\n# max_pvalue = max_pvalue[max_pvalue<0.05]\n# max_pvalue.sort()\n# print(strain)\n# print(max_pvalue)\n# pd.reset_option('display.max_rows')\n# \n##%%\n##import seaborn as sns\n# \n##%%\n## pd.set_option('display.max_rows', len(feat_list))\n## for strain in strain_list: \n## strain_pvalues = all_pvalues[strain]['All']\n## strain_pvalues = strain_pvalues[strain_pvalues<0.05]\n## strain_pvalues.sort()\n## print(strain)\n## print(strain_pvalues)\n## pd.reset_option('display.max_rows')\n#\n##aa = pvalue_table[strain].sort(inplace=False) \n#\n#\n##%%\n##import matplotlib.pylab as plt\n##plt.plot(np.sort(np.log10(pvalue_table['ZR1'].values)))\n##plt.plot(np.sort(np.log10(pvalue_table['BR1941'].values)))","sub_path":"work_in_progress/_old/Features_analysis/plates_analysis/daily_variation.py","file_name":"daily_variation.py","file_ext":"py","file_size_in_byte":11002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"142925646","text":"import numpy as np # For working with arrays\nimport random \nimport os # For interacting with the operating system\nimport cv2 # Open source computer vision library for computer vision tasks\nfrom keras.models import Sequential # To group linear stack of layers into a keras Model\nfrom keras.layers import Flatten, Dense, Conv2D, MaxPooling2D\nfrom keras import backend as K\nfrom keras.preprocessing.image import load_img, img_to_array\n\ndef euclidean_distance(vectors): # Function to compute the Euclidean distance between two vectors\n vector1, vector2 = vectors\n sum_square = K.sum(K.square(vector1 - vector2), axis=1, keepdims=True) # Calculate sum of squared differences\n return K.sqrt(K.maximum(sum_square, K.epsilon())) # Square root of sum of squared differences\n\ndef contrastive_loss(Y_true, D): # Function for calculating the contrastive loss\n margin = 1\n return K.mean(Y_true * K.square(D) + (1 - Y_true) * K.maximum((margin-D),0))\n\ndef accuracy(y_true, y_pred):\n return K.mean(K.equal(y_true, K.cast(y_pred < 0.5, y_true.dtype)))\n\ndef create_pairs(X,Y, num_classes): # Generate negative (different subject) and positive (same subject) \n # pairs of images for training a Siamese neural network\n pairs, labels = [], []\n # index of images in X and Y for each class\n class_idx = [np.where(Y==i)[0] for i in range(num_classes)]\n \n # The minimum number of images across all classes\n min_images = min(len(class_idx[i]) for i in range(num_classes)) - 1\n \n for c in range(num_classes):\n for n in range(min_images):\n \n # create positive pair\n img1 = X[class_idx[c][n]] # img1 from c class\n img2 = X[class_idx[c][n+1]] # img2 from c class\n pairs.append((img1, img2)) # Appending pair of images in pair list\n labels.append(1) # Appending label in label list (1: Same subject)\n \n # create negative pair\n # first, create list of classes that are different from the current class\n neg_list = list(range(num_classes))\n neg_list.remove(c)\n # select a random class from the negative list. \n # this class will be used to form the negative pair\n neg_c = random.sample(neg_list,1)[0]\n img1 = X[class_idx[c][n]]\n img2 = X[class_idx[neg_c][n]]\n pairs.append((img1,img2))\n labels.append(0)\n\n return np.array(pairs), np.array(labels)\n\ndef create_shared_network(input_shape): # Used to create a Siamese neural network in Keras\n model = Sequential(name='Shared_Conv_Network')\n model.add(Conv2D(filters=64, kernel_size=(3,3), activation='relu', input_shape=input_shape)) # 2D convolution layer \n model.add(MaxPooling2D()) # MaxPooling to select higher intensity values\n model.add(Conv2D(filters=64, kernel_size=(3,3), activation='relu')) # 2D convolution layer \n model.add(Flatten()) # Convert output of conv layer into 1D feature vector\n # Desnse layer expects data in one dimention\n model.add(Dense(units=128, activation='sigmoid')) \n return model\n\ndef get_data(dir): # To load the respective raw images into NumPy arrays\n X_train, Y_train = [], [] # Training data\n X_test, Y_test = [], [] # Testing data\n subfolders = sorted([file.path for file in os.scandir(dir) if file.is_dir()]) #Sorted list of folders\n for idx, folder in enumerate(subfolders):\n for file in sorted(os.listdir(folder)):\n img = load_img(folder+\"/\"+file, color_mode='grayscale') # Load image from folder\n img = img_to_array(img).astype('float32')/255 # Normalize image\n img = img.reshape(img.shape[0], img.shape[1],1) # Reshape image array\n if idx < 35: # First 35 data in train\n X_train.append(img)\n Y_train.append(idx)\n else: # From 35 to 40 in test\n X_test.append(img)\n Y_test.append(idx-35)\n # Convert into numpy array\n X_train = np.array(X_train) \n X_test = np.array(X_test)\n Y_train = np.array(Y_train)\n Y_test = np.array(Y_test)\n return (X_train, Y_train), (X_test, Y_test)\n\ndef write_on_frame(frame, text, text_x, text_y): # To write on frames\n (text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=2)[0]\n box_coords = ((text_x, text_y), (text_x+text_width+20, text_y-text_height-20))\n cv2.rectangle(frame, box_coords[0], box_coords[1], (255, 255, 255), cv2.FILLED) # Draw rectangle\n cv2.putText(frame, text, (text_x, text_y-10), cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0,0,0), thickness=2) # Put text\n return frame\n\n\n","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"499528385","text":"Dictionary={}\r\nwith open(\"Dictionary.txt\") as Dictionary_File:\r\n for word in Dictionary_File:\r\n #Number of character dials\r\n if len(word.strip())==4:\r\n Dictionary[word.strip().upper()]= True\r\n\r\n#All letters available\r\na='cgflbptsmd'\r\nb='earthiuoyl'\r\nc='ealtoinsrm'\r\nd='plyamdkesx'\r\n\r\nD2={}\r\nfor letter1 in a:\r\n for letter2 in b:\r\n for letter3 in c:\r\n for letter4 in d:\r\n word=(letter1+letter2+letter3+letter4).upper()\r\n if word in Dictionary:\r\n D2[word]=True\r\n\r\nprint(D2)\r\nwhile True:\r\n x = input(\"Type a word and see if it can be spelled: \").upper()\r\n if x in D2:\r\n print(x in D2)\r\n\r\n\r\n","sub_path":"Lock Generator.py","file_name":"Lock Generator.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"43510890","text":"#!/usr/bin/env python\n\n# Copyright 2017 The LUCI Authors. All rights reserved.\n# Use of this source code is governed under the Apache License, Version 2.0\n# that can be found in the LICENSE file.\n\n\"\"\"Bootstrap script to clone and forward to the recipe engine tool.\n\n*******************\n** DO NOT MODIFY **\n*******************\n\nThis is a copy of https://chromium.googlesource.com/infra/luci/recipes-py/+/master/doc/recipes.py.\nTo fix bugs, fix in the googlesource repo then run the autoroller.\n\"\"\"\n\nimport argparse\nimport json\nimport logging\nimport os\nimport random\nimport subprocess\nimport sys\nimport time\nimport urlparse\n\nfrom collections import namedtuple\n\nfrom cStringIO import StringIO\n\n# The dependency entry for the recipe_engine in the client repo's recipes.cfg\n#\n# url (str) - the url to the engine repo we want to use.\n# revision (str) - the git revision for the engine to get.\n# path_override (str) - the subdirectory in the engine repo we should use to\n# find it's recipes.py entrypoint. This is here for completeness, but will\n# essentially always be empty. It would be used if the recipes-py repo was\n# merged as a subdirectory of some other repo and you depended on that\n# subdirectory.\n# branch (str) - the branch to fetch for the engine as an absolute ref (e.g.\n# refs/heads/master)\n# repo_type (\"GIT\"|\"GITILES\") - An ignored enum which will be removed soon.\nEngineDep = namedtuple('EngineDep',\n 'url revision path_override branch repo_type')\n\n\nclass MalformedRecipesCfg(Exception):\n def __init__(self, msg, path):\n super(MalformedRecipesCfg, self).__init__('malformed recipes.cfg: %s: %r'\n % (msg, path))\n\n\ndef parse(repo_root, recipes_cfg_path):\n \"\"\"Parse is a lightweight a recipes.cfg file parser.\n\n Args:\n repo_root (str) - native path to the root of the repo we're trying to run\n recipes for.\n recipes_cfg_path (str) - native path to the recipes.cfg file to process.\n\n Returns (as tuple):\n engine_dep (EngineDep|None): The recipe_engine dependency, or None, if the\n current repo IS the recipe_engine.\n recipes_path (str) - native path to where the recipes live inside of the\n current repo (i.e. the folder containing `recipes/` and/or\n `recipe_modules`)\n \"\"\"\n with open(recipes_cfg_path, 'rU') as fh:\n pb = json.load(fh)\n\n try:\n if pb['api_version'] != 2:\n raise MalformedRecipesCfg('unknown version %d' % pb['api_version'],\n recipes_cfg_path)\n\n # If we're running ./doc/recipes.py from the recipe_engine repo itself, then\n # return None to signal that there's no EngineDep.\n if pb['project_id'] == 'recipe_engine':\n return None, pb.get('recipes_path', '')\n\n engine = pb['deps']['recipe_engine']\n\n if 'url' not in engine:\n raise MalformedRecipesCfg(\n 'Required field \"url\" in dependency \"recipe_engine\" not found',\n recipes_cfg_path)\n\n engine.setdefault('revision', '')\n engine.setdefault('path_override', '')\n engine.setdefault('branch', 'refs/heads/master')\n recipes_path = pb.get('recipes_path', '')\n\n # TODO(iannucci): only support absolute refs\n if not engine['branch'].startswith('refs/'):\n engine['branch'] = 'refs/heads/' + engine['branch']\n\n engine.setdefault('repo_type', 'GIT')\n if engine['repo_type'] not in ('GIT', 'GITILES'):\n raise MalformedRecipesCfg(\n 'Unsupported \"repo_type\" value in dependency \"recipe_engine\"',\n recipes_cfg_path)\n\n recipes_path = os.path.join(\n repo_root, recipes_path.replace('/', os.path.sep))\n return EngineDep(**engine), recipes_path\n except KeyError as ex:\n raise MalformedRecipesCfg(ex.message, recipes_cfg_path)\n\n\nGIT = 'git.bat' if sys.platform.startswith(('win', 'cygwin')) else 'git'\n\n\ndef _subprocess_call(argv, **kwargs):\n logging.info('Running %r', argv)\n return subprocess.call(argv, **kwargs)\n\n\ndef _git_check_call(argv, **kwargs):\n argv = [GIT]+argv\n logging.info('Running %r', argv)\n subprocess.check_call(argv, **kwargs)\n\n\ndef _git_output(argv, **kwargs):\n argv = [GIT]+argv\n logging.info('Running %r', argv)\n return subprocess.check_output(argv, **kwargs)\n\n\ndef parse_args(argv):\n \"\"\"This extracts a subset of the arguments that this bootstrap script cares\n about. Currently this consists of:\n * an override for the recipe engine in the form of `-O recipe_engin=/path`\n * the --package option.\n \"\"\"\n PREFIX = 'recipe_engine='\n\n p = argparse.ArgumentParser(add_help=False)\n p.add_argument('-O', '--project-override', action='append')\n p.add_argument('--package', type=os.path.abspath)\n args, _ = p.parse_known_args(argv)\n for override in args.project_override or ():\n if override.startswith(PREFIX):\n return override[len(PREFIX):], args.package\n return None, args.package\n\n\ndef checkout_engine(engine_path, repo_root, recipes_cfg_path):\n dep, recipes_path = parse(repo_root, recipes_cfg_path)\n if dep is None:\n # we're running from the engine repo already!\n return os.path.join(repo_root, recipes_path)\n\n url = dep.url\n\n if not engine_path and url.startswith('file://'):\n engine_path = urlparse.urlparse(url).path\n\n if not engine_path:\n revision = dep.revision\n subpath = dep.path_override\n branch = dep.branch\n\n # Ensure that we have the recipe engine cloned.\n engine = os.path.join(recipes_path, '.recipe_deps', 'recipe_engine')\n engine_path = os.path.join(engine, subpath)\n\n with open(os.devnull, 'w') as NUL:\n # Note: this logic mirrors the logic in recipe_engine/fetch.py\n _git_check_call(['init', engine], stdout=NUL)\n\n try:\n _git_check_call(['rev-parse', '--verify', '%s^{commit}' % revision],\n cwd=engine, stdout=NUL, stderr=NUL)\n except subprocess.CalledProcessError:\n _git_check_call(['fetch', url, branch], cwd=engine, stdout=NUL,\n stderr=NUL)\n\n try:\n _git_check_call(['diff', '--quiet', revision], cwd=engine)\n except subprocess.CalledProcessError:\n _git_check_call(['reset', '-q', '--hard', revision], cwd=engine)\n\n return engine_path\n\n\ndef main():\n if '--verbose' in sys.argv:\n logging.getLogger().setLevel(logging.INFO)\n\n args = sys.argv[1:]\n engine_override, recipes_cfg_path = parse_args(args)\n\n if recipes_cfg_path:\n # calculate repo_root from recipes_cfg_path\n repo_root = os.path.dirname(\n os.path.dirname(\n os.path.dirname(recipes_cfg_path)))\n else:\n # find repo_root with git and calculate recipes_cfg_path\n repo_root = (_git_output(\n ['rev-parse', '--show-toplevel'],\n cwd=os.path.abspath(os.path.dirname(__file__))).strip())\n repo_root = os.path.abspath(repo_root)\n recipes_cfg_path = os.path.join(repo_root, 'infra', 'config', 'recipes.cfg')\n args = ['--package', recipes_cfg_path] + args\n\n engine_path = checkout_engine(engine_override, repo_root, recipes_cfg_path)\n\n return _subprocess_call([\n sys.executable, '-u',\n os.path.join(engine_path, 'recipes.py')] + args)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"infra/bots/recipes.py","file_name":"recipes.py","file_ext":"py","file_size_in_byte":7109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"477930961","text":"import os\nfrom sys import stdin\ncwd = os.path.dirname(__file__)\nos.chdir(cwd)\ndef write_input(file_name):\n list_of_files = os.listdir()\n if file_name in list_of_files:\n myfile = open(file_name,'w')\n for line in stdin:\n if line == '\\n':\n break\n myfile.write(line) \n \n else:\n with open(file_name,'w') as myfile:\n pass\n for line in stdin:\n if line == '\\n':\n break\n myfile.write(line)\n myfile.close()\n\ndef read_input(file_name):\n myfile = open(file_name,'r')\n temp_inst = myfile.readlines()\n for i in range(len(temp_inst)):\n temp_inst[i] = temp_inst[i].strip('\\n')\n temp_inst[i] = temp_inst[i].replace('\\t',' ')\n\n myfile.close()\n return temp_inst\n\n","sub_path":"SimpleSimulator/inp.py","file_name":"inp.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"454471084","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n#\n# This script, when run, parses the file \"swagger.yaml\" and strips it down to only\n# include those paths and methods specified in the included variable.\n#\n# As of now, it is called with every \"jekyll build\" - see jekyll-freme/_plugins/jekyll-pages-directory.rb\n# line: \"exec(python swagger/yamlscript.py)\"\n#\n# To be able to import yaml, on linux, run \"sudo pip install PyYAML\"\n#\n# Author: Jonathan Sauder (jonathan_paul.sauder@dfki.de)\n#\n\n\ndef main():\n\timport yaml\n\ttry:\n\t\twith open(\"swagger/swagger.yaml\",\"r\") as f:\n\t\t\tfull=yaml.load(f.read())\n\texcept IOError:\n\t\tprint(\"swagger/swagger.yaml could not be found. A simple-API-Doc was not created\")\n\t\treturn 0\n\t\t\n\tincluded={\n\t\"/e-entity/freme-ner/documents\": [\"post\"],\n\t\"/e-entity/dbpedia-spotlight/documents\": [\"post\"],\n\t\"/e-publishing/html\": [\"post\"],\n\t\"/e-link/documents/\": [\"post\"],\n\t\"/e-translation/tilde\": [\"post\"],\t\n\t}\n\t\n\tfor path in full[\"paths\"].keys():\n\t\tif path not in included:\n\t\t\tdel full[\"paths\"][path]\n\t\telse:\n\t\t\tfor method in included[path]:\n\t\t\t\tif method not in full[\"paths\"][path].keys():\n\t\t\t\t\tdel full[\"paths\"][path][method]\n\t\n\twith open(\"swagger/simple.yaml\",'w') as f:\n\t\tf.write(yaml.dump(full))\n\treturn 0\n\nif __name__ == '__main__':\n\tmain()\n\n","sub_path":"swagger/yamlscript.py","file_name":"yamlscript.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"131597255","text":"from bcc import BPF\nimport glob\nimport os\nimport re\nimport time\nimport argparse\nfrom time import sleep\nimport signal\n\ndebug = 0\n\ndef signal_ignore(signal, frame):\n print()\n\nclass SignalInterrupt(Exception):\n def __init__(self, message):\n super(SignalInterrupt, self).__init__(message)\n\ndef signal_stop(signal, frame):\n raise SignalInterrupt(\"Interrupted!\")\n\nbpf_text = \"\"\"\n#include \n#include \n#include \n#include \n#include \n\ntypedef struct dev_key_s {\n u64 dev;\n u64 slot;\n} dev_key_t;\n\nBPF_HISTOGRAM(reads, dev_key_t);\nBPF_HISTOGRAM(writes, dev_key_t);\nBPF_HISTOGRAM(discards, dev_key_t);\n\n// This sucks, but we have no better solution\nstatic dev_t get_devt(struct request *req)\n{\n struct gendisk *disk = req->rq_disk;\n return disk->part0.__dev.devt;\n}\n\n// time block I/O\nint trace_req_start(struct pt_regs *ctx, struct request *req)\n{\n dev_t device = get_devt(req);\n int major = MAJOR(device);\n int minor = MINOR(device);\n\n if (req->__data_len == 0)\n return 0;\n\n if (!(CONDITIONALS))\n return 0;\n\n dev_key_t key = {\n .dev = device,\n .slot = bpf_log2l(req->__data_len),\n };\n\n if (req->cmd_flags & REQ_DISCARD)\n discards.increment(key);\n else if ((req->cmd_flags & 1) != 0)\n writes.increment(key);\n else\n reads.increment(key);\n return 0;\n}\n\"\"\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-d\", \"--device\",\n help=\"Trace this device only\")\nargs = parser.parse_args()\n\ndisks = []\nif args.device:\n disks.append({'name': os.path.basename(args.device)})\nelse:\n dev_patterns = ['sd.*', 'nvme.*', 'nbd.*', 'md.*', \"fio*\", \"etherd*\"]\n for device in glob.glob(\"/sys/block/*\"):\n for pattern in dev_patterns:\n if re.compile(pattern).match(os.path.basename(device)):\n if pattern == \"etherd*\":\n disks.append({'name': os.path.basename(device).replace('!', '/')})\n else:\n disks.append({'name': os.path.basename(device)})\nif debug:\n print(disks)\n\nfirst = True\nconditional_template = \"(major == MAJOR && minor == MINOR)\"\nconditionals = \"\"\nfor disk in disks:\n stinfo = os.stat('/dev/{}'.format(disk['name']))\n disk['major'] = os.major(stinfo.st_rdev)\n disk['minor'] = os.minor(stinfo.st_rdev)\n tmp = conditional_template.replace('MAJOR', \"{}\".format(disk['major']))\n tmp = tmp.replace('MINOR', \"{}\".format(disk['minor']))\n if not first:\n conditionals += \" || \"\n first = False\n conditionals += tmp\n\nif conditionals == \"\":\n conditionals = \"1\"\nbpf_text = bpf_text.replace('CONDITIONALS', conditionals)\n\nif debug:\n print(bpf_text)\n\n# load BPF program\nb = BPF(text=bpf_text)\nb.attach_kprobe(event=\"blk_start_request\", fn_name=\"trace_req_start\")\nb.attach_kprobe(event=\"blk_mq_start_request\", fn_name=\"trace_req_start\")\n\nreads = b.get_table(\"reads\")\nwrites = b.get_table(\"writes\")\ndiscards= b.get_table(\"discards\")\n\nprint(\"Tracing, hit Ctrl+C to exit\")\nsignal.signal(signal.SIGINT, signal_stop)\ntry:\n sleep(99999999)\nexcept SignalInterrupt:\n signal.signal(signal.SIGINT, signal_ignore)\nexcept KeyboardInterrupt:\n signal.signal(signal.SIGINT, signal_ignore)\n\ndef print_device(dev):\n MINORBITS = 20\n MINORMASK = (1 << MINORBITS) - 1\n major = dev >> MINORBITS\n minor = dev & MINORMASK\n for disk in disks:\n if disk['major'] == major and disk['minor'] == minor:\n return disk['name']\n return \"%d-%d\" % (major, minor)\n\nreads.print_log2_hist(\"Reads\", \"dev\", section_print_fn=print_device)\nwrites.print_log2_hist(\"Writes\", \"dev\", section_print_fn=print_device)\ndiscards.print_log2_hist(\"Discards\", \"dev\", section_print_fn=print_device)\n","sub_path":"blk-request-sizes.py","file_name":"blk-request-sizes.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572491020","text":"import unittest\nimport mock\nfrom mapek_framework.utils import Ticker\n\n\nclass UtilsTestCase(unittest.TestCase):\n\n def test_ticker_init(self):\n node_name = 'foo_name'\n topic = 'foo_topic'\n rate = 42\n\n ticker_instance = Ticker(node_name, topic, rate)\n self.assertEqual(ticker_instance._node_name, node_name)\n self.assertEqual(ticker_instance._topic, topic)\n self.assertEqual(ticker_instance._rate, rate)\n\n def test_ticker_spin(self):\n node_name = 'foo_name'\n topic = 'foo_topic'\n rate = 42\n\n def is_shutdown():\n if not hasattr(is_shutdown, 'already_executed'):\n is_shutdown.already_executed = True\n return False\n\n return True\n\n ticker_instance = Ticker(node_name, topic, rate)\n\n with \\\n mock.patch('rospy.is_shutdown', side_effect=is_shutdown) as mock_is_shutdown, \\\n mock.patch('rospy.Rate') as mock_rate, \\\n mock.patch('rospy.init_node') as mock_init_node, \\\n mock.patch('rospy.Publisher'):\n\n ticker_instance.spin()\n self.assertEqual(mock_init_node.call_count, 1)\n mock_init_node.assert_called_with(node_name)\n self.assertEqual(mock_rate.call_count, 1)\n mock_rate.assert_called_with(rate)\n self.assertEqual(mock_is_shutdown.call_count, 2)\n","sub_path":"mapek_framework/src/test_mapek_framework/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"278425439","text":"from rh_renderer.tilespec_renderer import TilespecRenderer\nfrom rh_renderer.models import AffineModel, Transforms\nfrom .datasource import DataSource\nfrom urllib.error import HTTPError\nfrom copy import deepcopy\nimport numpy as np\nimport dataspec\nimport logging\nimport json\nimport glob\nimport os\nfrom bfly.logic import core\n\nclass Tilespecs(DataSource):\n\n def __init__(self, core, datapath):\n '''\n @override\n '''\n\n if not dataspec.can_load(datapath):\n raise HTTPError(\n None, 404,\n \"Failed to load %s as multibeam data source\" % datapath,\n [], None)\n\n super(Tilespecs, self).__init__(core, datapath)\n\n def index(self):\n '''\n @override\n '''\n\n self.layer_ts = {}\n self.layer_renderer = {}\n self.min_x = np.inf\n self.max_x = - np.inf\n self.min_y = np.inf\n self.max_y = - np.inf\n self.min_z = np.inf\n self.max_z = - np.inf\n ts_fnames = glob.glob(os.path.join(self._datapath, '*.json'))\n for ts_fname in ts_fnames:\n # Load the tilespecs from the file\n tilespecs = None\n with open(ts_fname, 'r') as data:\n tilespecs = json.load(data)\n\n\n\n layer = tilespecs[0][\"layer\"]\n self.min_z = min(self.min_z, layer)\n self.max_z = max(self.max_z, layer)\n self.layer_ts[layer] = tilespecs\n for ts in tilespecs:\n x_min, x_max, y_min, y_max = ts[\"bbox\"]\n self.min_x = min(self.min_x, x_min)\n self.max_x = max(self.max_x, x_max)\n self.min_y = min(self.min_y, y_min)\n self.max_y = max(self.max_y, y_max)\n\n self.layer_renderer[layer] = TilespecRenderer(tilespecs, self.dtype)\n\n self.tile_width = ts[\"width\"]\n self.tile_height = ts[\"height\"]\n self.blocksize = np.array((4096, 4096))\n\n\n super(Tilespecs, self).index()\n\n def get_type(self):\n '''\n @override\n '''\n return self.load(0,0,0,0).single_tiles[0].render()[0].dtype\n\n def load_cutout(self, x0, x1, y0, y1, z, w):\n '''\n @override\n '''\n cutout_bounds = np.array([x0, y0, x1, y1])/(2.0 ** w)\n cutout_bounds = cutout_bounds.astype(np.uint32)-(0,0,1,1)\n img = self.load(0,0,z,w).single_tiles[0].crop(*cutout_bounds)[0]\n return img\n\n def load(self, x, y, z, w):\n '''\n @override\n '''\n plane_rendered = deepcopy(self.layer_renderer[z])\n if w > 0:\n model = AffineModel(m=np.eye(3) / 2.0 ** w)\n plane_rendered.add_transformation(model)\n\n return plane_rendered\n\n def get_boundaries(self):\n\n return self.max_x - self.min_x, self.max_y - self.min_y, self.max_z\n","sub_path":"bfly/input/tilespecs.py","file_name":"tilespecs.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"598027496","text":"\nimport csv\n\ndef convert_to_dict(filename):\n \"\"\"\n Convert a CSV file to a list of Python dictionaries.\n \"\"\"\n datafile = open(filename, newline='')\n\n my_reader = csv.DictReader(datafile)\n\n list_of_dicts = []\n for row in my_reader:\n list_of_dicts.append( dict(row) )\n\n datafile.close()\n return list_of_dicts\n","sub_path":"parks/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"234075663","text":"#!/usr/bin/python\n# -*-coding:utf-8-*-\n\nfrom PIL import Image, ImageDraw, ImageFont, ImageOps\n\n\nclass ImageConverter:\n @staticmethod\n def image2bmp(path):\n img = Image.open(path)\n img.thumbnail((384, 9999), Image.ANTIALIAS) # Reduce to fit and maintain aspect ratio.\n img = ImageOps.invert(img) # Invert image for printing\n img = img.convert('1') # Convert to 1-bit with dither\n return img.tobytes()\n\n\nclass TextConverter:\n @staticmethod\n def text2bmp(text, font_path, font_size=40):\n img = Image.new('1', (384, 100), 0) # Black background\n draw = ImageDraw.Draw(img)\n font = ImageFont.truetype(font_path, font_size)\n draw.text((0, 0), text, 1, font=font) # White text\n return img.tobytes()\n","sub_path":"image_process.py","file_name":"image_process.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"306352322","text":"from subprocess import Popen, PIPE\nfrom collections import deque\n\n\np = Popen([\"ls\", \"-lR\", \"/usr/local/lib\"], stdout=PIPE, universal_newlines=True)\n\n#for line in p.stdout:\n# if \"inet \" in line:\n# print(line.split()[1])\n\n#lines = p.stdout.readlines()\n#print(lines[-5:])\n\nclass RingBuffer:\n def __init__(self, size):\n self.queue = deque()\n self.size = size\n self.index = 0\n\n def push(self, data):\n self.queue.append(data)\n self.index += 1\n if self.index >= self.size:\n self.queue.popleft()\n\n def __str__(self):\n return \"\".join(self.queue)\n\nqueue = RingBuffer(5)\n\nfor line in p.stdout:\n queue.push(line)\n\nprint(queue)\nret = p.wait()\n","sub_path":"Learning/Network_process_WA/Day1/2020_Jul23/subprocess/get_output_ringbuffer_old.py","file_name":"get_output_ringbuffer_old.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"107937103","text":"\n\nfrom xai.brain.wordbase.nouns._consolation import _CONSOLATION\n\n#calss header\nclass _CONSOLATIONS(_CONSOLATION, ):\n\tdef __init__(self,): \n\t\t_CONSOLATION.__init__(self)\n\t\tself.name = \"CONSOLATIONS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"consolation\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_consolations.py","file_name":"_consolations.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"73425344","text":"print('Please think a number between 0 and 100!')\n\nhigh = 100\nlow = 0\nans = (high + low) / 2.0\n\nwhile True:\n ans = (high + low) // 2\n print('Is your secret number ' + str(ans) + '?')\n print('Enter \\'h\\' to indicate the guess is too high.', end='')\n print(' Enter \\'l\\' to indicate the guess is too low.', end='')\n print('Enter \\'c\\' to indicate I guessed correctly. ', end='')\n char = input('')\n \n if char == 'c':\n break\n elif char == 'h':\n high = ans\n elif char == 'l':\n low = ans\n else:\n print('Sorry, I did not understand your input.')\n continue\n\nprint('Game over. Your secret number was:', ans)","sub_path":"guess_my_number.py","file_name":"guess_my_number.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"595936662","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 24 21:39:36 2021\n\n@author: subject F\n\"\"\"\nimport numpy as np\nfrom scipy import ndimage\nfrom skimage import color, io\nimport glob\nimport logging\nimport time\nfrom tqdm import tqdm\n\ndef compute_eight_dimensional_feature(image):\n \"\"\" A function to compute the 8-dimensional features for an image as used in:\n O. Tuzel, F. Porikli and P. Meer,\n \"Pedestrian Detection via Classification on Riemannian Manifolds\",\n in IEEE Transactions on Pattern Analysis and Machine Intelligence,\n vol. 30, no. 10, pp. 1713-1727, Oct. 2008.\n doi: 10.1109/TPAMI.2008.75,\n\n at eq. (11) p. 1716.\n\n Usage: image_features = compute_eight_dimensional_feature(image)\n Inputs:\n * image = a numpy array of shape (h, w) corresponding to the image.\n Outputs:\n * image_features = a numpy array of shape (h, w, 8) corresponding\n to the tensor of image features.\"\"\"\n\n x = np.arange(image.shape[1])\n y = np.arange(image.shape[0])\n X, Y = np.meshgrid(x,y)\n Ix = ndimage.sobel(image,axis=1,mode='constant')\n Ixx = ndimage.sobel(Ix,axis=1,mode='constant')\n Iy = ndimage.sobel(image,axis=0,mode='constant')\n Iyy = ndimage.sobel(Iy,axis=0,mode='constant')\n I_abs = np.hypot(np.abs(Ix), np.abs(Iy))\n A = np.arctan2(np.abs(Iy), np.abs(Ix))\n\n return np.dstack([X, Y, np.abs(Ix), np.abs(Iy),\n I_abs, np.abs(Ixx), np.abs(Iyy), A])\n\ndef vech(mat):\n # Gets Fortran-order\n return mat.T.take(_triu_indices(len(mat)))\n\ndef _triu_indices(n):\n rows, cols = np.triu_indices(n)\n return rows * n + cols\n\ndef get_features(dataset_path):\n image_paths = glob.glob(dataset_path)\n N = len(image_paths)\n cov_list = np.zeros((N, 8, 8))\n euc_list = np.zeros((N, 36))\n for i in range(N):\n im = color.rgb2gray(io.imread(image_paths[i]))\n im_features = compute_eight_dimensional_feature(im)\n w, h, d = im_features.shape\n cov_matrix = np.cov(im_features.reshape(w*h, d).T)\n cov_list[i, :, :] = cov_matrix\n euc_list[i, :] = vech(cov_matrix)\n\n return euc_list, cov_list\n\ndef process_classes_list(list_of_classes):\n samples = []\n samples_cov = []\n labels = []\n \n for i in range(len(list_of_classes)):\n print('Processing class ', i)\n data, data_cov = get_features(list_of_classes[i])\n samples.append(data)\n samples_cov.append(data_cov)\n labels.append(np.ones(data.shape[0]) * i)\n \n return samples, samples_cov, labels\n\ndef compute_overlap_percent(region_1, region_2):\n \"\"\" Function to compute the overlap in percent between two regions.\n\n Usage: overlap_percent = compute_overlap_percent(region_1, region_2)\n Inputs:\n * region_1 = an list of ints corresponding to x_1, y_1, w_1, h_1\n * region_2 = an list of ints corresponding to x_2, y_2, w_2, h_2\n Outputs:\n * overlap_percent = a float corresponding to the overlap in percent.\n \"\"\"\n x_1, y_1, w_1, h_1 = region_1\n x_2, y_2, w_2, h_2 = region_2\n\n if x_1 < x_2:\n w_j_tilde = w_1\n else:\n w_j_tilde = w_2\n\n if y_1 < y_2:\n h_j_hat = h_1\n else:\n h_j_hat = h_2\n\n if (np.abs(x_1-x_2) < w_j_tilde) and ( np.abs(y_1-y_2) overlap_threshold:\n is_not_overlapping = False\n break\n if is_not_overlapping:\n sub_regions_list.append( (x_j, y_j, w_j, h_j) )\n j = j + 1\n if progress:\n pbar.update(1)\n if j < N_R:\n logging.warning(\"Timed out after %.2f minutes and %d sub_regions\", (time.time()-t_beginning)/60, j)\n logging.warning(\"The remainder of generated sub-regions won't have the overlap constraint\")\n for index in range(j, N_R):\n x_j = rng.randint(0, w - int(np.ceil(w/n_w)))\n y_j = rng.randint(0, h - int(np.ceil(h/n_h)))\n w_j = rng.randint(int(np.ceil(w/n_w)), w - x_j)\n h_j = rng.randint(int(np.ceil(h/n_h)), h - y_j)\n sub_regions_list.append( (x_j, y_j, w_j, h_j) )\n if progress:\n pbar.update(1)\n\n return sub_regions_list\n\ndef generate_sub_regions_random(N_R, h, w, n_h, n_w, seed=None, progress=False):\n \"\"\" Function to obtain a uniformly random sampling for the sub_regions.\n\n Usage: sub_regions_list = generate_sub_regions_random(N_R, h, w, n_h, n_w, seed, progress)\n Inputs:\n * N_R = an int corresponding to number of regions to generate.\n * h = an int corresponding to height of the image.\n * w = an int corresponding to width of the image.\n * n_w = an int so that the sub_regions are of width that is minimum \\ceil{w/n_w}.\n * n_h = an int so that the sub_regions are of height that is minimum \\ceil{h/n_h}.\n * seed = an int which is the seed for rng so that it is reproducible.\n * progress = a boolean to show or not a progress bar for the generation.\n Outputs:\n * sub_regions_list = a list of ints [x_j, y_j, w_j, h_j] where:\n - (x_j, y_j) are the coordinates of the left corner of the region\n - (w_j, h_j) are the width and heigth of the region\n \"\"\"\n\n\n if seed is None:\n rng = np.random.RandomState(seed)\n else:\n rng = seed\n\n sub_regions_list = []\n if progress:\n pbar = tqdm(total=N_R)\n for j in range(N_R):\n x_j = rng.randint(0, w - int(np.ceil(w/n_w)))\n y_j = rng.randint(0, h - int(np.ceil(h/n_h)))\n w_j = rng.randint(int(np.ceil(w/n_w)), w - x_j)\n h_j = rng.randint(int(np.ceil(h/n_h)), h - y_j)\n sub_regions_list.append( (x_j, y_j, w_j, h_j) )\n if progress:\n pbar.update(1)\n return sub_regions_list","sub_path":"summer_project/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"70178789","text":"import File_Control as Control\r\nimport time\r\nimport copy\r\nimport re\r\n\r\n\r\n# 创建数据块,一个块存放8长度数据\r\ndef creat_memory():\r\n for i in range(1000):\r\n block = Control.Message_Block(i)\r\n memory.append(block)\r\n\r\n\r\n# 获取栈中空闲内存块数量\r\ndef get_control_stack_free_size():\r\n return block_control_stack[0]\r\n\r\n\r\n# 块进行分组\r\ndef grouping_memory():\r\n group_member = []\r\n count = 0\r\n for number in range(len(memory)):\r\n group_member.append(memory[number].num)\r\n count += 1\r\n # 100个一组\r\n if count == stack_size:\r\n # 索引块存入改组索引\r\n memory[number].set_group(group_member)\r\n memory[number].group = copy.copy(group_member)\r\n memory[number].group.reverse()\r\n memory_group_leader.append(memory[number].num)\r\n group_member.clear()\r\n count = 0\r\n\r\n\r\ndef get_next_block_group(last_block):\r\n next_num = last_block.next_group\r\n next_block = memory[next_num]\r\n next_group = next_block.group\r\n for block in next_group:\r\n block_control_stack.append(block)\r\n block_control_stack[0] = stack_size\r\n last_block.group.clear()\r\n del memory_group_leader[0]\r\n\r\n\r\ndef write_last_block(last_block, write_message, surplus_size):\r\n text = write_message\r\n textArr = re.findall('.{' + str(surplus_size) + '}', text)\r\n textArr.append(text[(len(textArr) * surplus_size):])\r\n last_message = textArr[0]\r\n del textArr[0]\r\n write_message = ''.join(textArr)\r\n last_block.message = last_block.message + last_message\r\n last_block.used_size = last_block.size\r\n textArr = re.findall('.{' + str(last_block.size) + '}', write_message)\r\n textArr.append(write_message[(len(textArr) * last_block.size):])\r\n if len(textArr[len(textArr) - 1]) == 0:\r\n del textArr[len(textArr) - 1]\r\n return textArr\r\n\r\n\r\ndef get_block():\r\n block_number = block_control_stack.pop()\r\n try:\r\n remove_index = memory[block_control_stack[1]].group.index(block_number)\r\n del memory[block_control_stack[1]].group[remove_index]\r\n except:\r\n remove_index = memory[block_number].group.index(block_number)\r\n del memory[block_number].group[remove_index]\r\n block = memory[block_number]\r\n return block\r\n\r\n\r\ndef get_file():\r\n user_name = input('输入用户名\\n')\r\n if user_name not in user_dict.keys():\r\n print('没有该用户\\n')\r\n return None\r\n user = user_dict[user_name]\r\n user_file_dict = user.user_file\r\n file_name = input('输入文件名\\n')\r\n try:\r\n file = user_file_dict[file_name]\r\n except:\r\n print('没有该文件')\r\n return None\r\n return file\r\n\r\n\r\ndef write_block_message(message, front_block, file):\r\n if block_control_stack[0] > 1:\r\n block = get_block()\r\n block.sign = True\r\n block.message = message\r\n front_block.next_block = block.num\r\n if len(message) < block.size:\r\n block.used_size = len(message)\r\n else:\r\n block.used_size = block.size\r\n file.last_block = block.num\r\n block_control_stack[0] = block_control_stack[0] - 1\r\n else:\r\n block = get_block()\r\n get_next_block_group(block)\r\n block.sign = True\r\n block.message = message\r\n front_block.next_block = block.num\r\n if len(message) < block.size:\r\n block.used_size = len(message)\r\n else:\r\n block.used_size = block.size\r\n file.last_block = block.num\r\n return block\r\n\r\n\r\n# 释放内存时修改组\r\ndef change_group_leader(block_num):\r\n # 栈还没有满\r\n if block_control_stack[0] < stack_size:\r\n block_control_stack.append(block_num)\r\n leader = block_control_stack[1]\r\n leader_block = memory[leader]\r\n leader_block.group.append(block_num)\r\n block_control_stack[0] += 1\r\n # 栈满了\r\n else:\r\n block = memory[block_num]\r\n leader = block_control_stack[1]\r\n while block_control_stack[0] != 0:\r\n block_control_stack.pop()\r\n block_control_stack[0] = block_control_stack[0] - 1\r\n block_control_stack.append(block_num)\r\n block.next_group = leader\r\n block.group.append(block_num)\r\n block_control_stack[0] = 1\r\n memory_group_leader.insert(0, block_num)\r\n\r\n\r\ndef show_each_block(file):\r\n block_num = file.first_block\r\n file_memory = {}\r\n # 把文件的所有内存块读入\r\n while block_num != -1:\r\n block = memory[block_num]\r\n file_memory[block_num] = block\r\n block_num = block.next_block\r\n for key in file_memory.keys():\r\n print('Block Number:{0} Block Message:{1}'.format(key, file_memory[key].message))\r\n return file_memory\r\n\r\n\r\ndef create_file():\r\n user_name = input('输入用户名\\n')\r\n if user_name not in user_dict.keys():\r\n print('没有该用户\\n')\r\n return None\r\n user = user_dict[user_name]\r\n file_name = input('输入新建文件名字\\n')\r\n if file_name in user.user_file:\r\n print('文件已存在创建失败')\r\n return None\r\n elif len(file_name) == 0:\r\n print('文件名不能为空')\r\n return None\r\n file = Control.File(file_name)\r\n # 弹出一个块给他存放信息\r\n # 可用块大于一个\r\n if block_control_stack[0] > 1:\r\n block = get_block()\r\n block.sign = True\r\n file.first_block = block.num\r\n file.last_block = block.num\r\n block_control_stack[0] = block_control_stack[0] - 1\r\n user.user_file[file_name] = file\r\n # 可用块只剩最后一个进行切换下一组块\r\n else:\r\n block = get_block()\r\n get_next_block_group(block)\r\n block.sign = True\r\n file.first_block = block.num\r\n file.last_block = block.num\r\n block_control_stack[0] = stack_size\r\n user.user_file[file_name] = file\r\n\r\n\r\ndef read_file():\r\n file = get_file()\r\n if file is None:\r\n return None\r\n block_num = file.first_block\r\n message = ''\r\n while block_num != -1:\r\n block = memory[block_num]\r\n message = message + block.message\r\n block_num = block.next_block\r\n file.visit = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\r\n print(file.show())\r\n print('File Message:\\n' + message)\r\n print('Block Detail Message:')\r\n show_each_block(file)\r\n\r\n\r\ndef write_file():\r\n file = get_file()\r\n if file is None:\r\n return None\r\n block_num = file.last_block\r\n last_block = memory[block_num]\r\n block_size = last_block.size\r\n surplus_size = block_size - last_block.used_size\r\n write_message = input('输入写入文件的数据\\n')\r\n if len(write_message) > surplus_size:\r\n surplus_message = write_last_block(last_block, write_message, surplus_size)\r\n front_block = last_block\r\n for message in surplus_message:\r\n front_block = write_block_message(message, front_block, file)\r\n file.block_sum += 1\r\n file.modify = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\r\n else:\r\n last_block.message = last_block.message + write_message\r\n file.modify = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\r\n file.block_sum += 1\r\n file.size += len(write_message)\r\n\r\n\r\ndef freely_write():\r\n file = get_file()\r\n if file is None:\r\n return None\r\n file_memory = show_each_block(file)\r\n op = input('选择修改类型:\\n'\r\n '1.change\\n'\r\n '2.insert\\n')\r\n if op == '1':\r\n modify_num = int(input('输入修改的盘块号\\n'))\r\n modify_message = input('输入修改后的数据\\n')\r\n modify_file(file, modify_num, modify_message)\r\n file.modify = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\r\n else:\r\n insert_num = int(input('输入插入数据的盘块位置\\n'))\r\n insert_message = input('输入插入的数据\\n')\r\n insert_file(file, insert_num, insert_message)\r\n file.modify = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\r\n\r\n\r\ndef freely_write_block(message, front_block):\r\n if block_control_stack[0] > 1:\r\n block = get_block()\r\n block.sign = True\r\n block.message = message\r\n front_block.next_block = block.num\r\n if len(message) < block.size:\r\n block.used_size = len(message)\r\n else:\r\n block.used_size = block.size\r\n block_control_stack[0] = block_control_stack[0] - 1\r\n else:\r\n block = get_block()\r\n get_next_block_group(block)\r\n block.sign = True\r\n block.message = message\r\n front_block.next_block = block.num\r\n if len(message) < block.size:\r\n block.used_size = len(message)\r\n else:\r\n block.used_size = block.size\r\n return block\r\n\r\n\r\ndef modify_file(file, block_num, message):\r\n block = memory[block_num]\r\n if len(message) > block.size:\r\n textArr = re.findall('.{' + str(block.size) + '}', message)\r\n textArr.append(message[(len(textArr) * block.size):])\r\n block.message = textArr[0]\r\n del textArr[0]\r\n message = ''.join(textArr)\r\n # 改为插入方式\r\n insert_file(file, block_num, message)\r\n else:\r\n block.message = message\r\n\r\n\r\ndef insert_file(file, block_num, message):\r\n file.size = file.size + len(message)\r\n block = memory[block_num]\r\n last = block.next_block\r\n textArr = re.findall('.{' + str(block.size) + '}', message)\r\n textArr.append(message[(len(textArr) * block.size):])\r\n front_block = block\r\n for mess in textArr:\r\n front_block = freely_write_block(mess, front_block)\r\n file.block_sum += 1\r\n front_block.next_block = last\r\n\r\n\r\ndef drop_file():\r\n user_name = input('输入用户名\\n')\r\n if user_name not in user_dict.keys():\r\n print('没有该用户\\n')\r\n return None\r\n user = user_dict[user_name]\r\n user_file_dict = user.user_file\r\n file_name = input('输入文件名\\n')\r\n try:\r\n file = user_file_dict[file_name]\r\n except:\r\n print('没有该文件')\r\n return None\r\n if file.backup_sum == 1:\r\n block_num = file.first_block\r\n while block_num != -1:\r\n block = memory[block_num]\r\n block.used_size = 0\r\n block.message = ''\r\n block.sign = False\r\n change_group_leader(block_num)\r\n block_num = block.next_block\r\n block.next_block = -1\r\n file.block_sum -= 1\r\n else:\r\n file.backup_sum -= 1\r\n del user_file_dict[file_name]\r\n\r\n\r\ndef freely_drop():\r\n file = get_file()\r\n if file is None:\r\n return None\r\n file_memory = show_each_block(file)\r\n indexs = list(file_memory.keys())\r\n drop = input('输入要删除的块:\\n')\r\n drop = drop.split(' ')\r\n for number in drop:\r\n ind = indexs.index(int(number))\r\n if int(number) == file.first_block:\r\n first = memory[int(number)]\r\n # 修改文件信息\r\n file.first_block = first.next_block\r\n file.block_sum -= 1\r\n file.size = file.size - first.used_size\r\n # 处理内存块\r\n first.used_size = 0\r\n first.message = ''\r\n first.sign = False\r\n change_group_leader(first.num)\r\n elif int(number) == file.last_block:\r\n last = memory[int(number)]\r\n # 修改文件信息\r\n front = memory[indexs[ind-1]]\r\n file.last_block = front.num\r\n file.block_sum -= 1\r\n file.size = file.size - last.used_size\r\n # 处理内存块\r\n last.used_size = 0\r\n last.message = ''\r\n last.sign = False\r\n change_group_leader(last.num)\r\n else:\r\n # 获取该块的前后两个块\r\n this = memory[indexs[ind]]\r\n front = memory[indexs[ind - 1]]\r\n back = memory[indexs[ind + 1]]\r\n # 修改文件信息\r\n file.block_sum -= 1\r\n file.size = file.size - this.used_size\r\n # 处理内存块\r\n this.used_size = 0\r\n this.message = ''\r\n this.sign = False\r\n front.next_block = back.num\r\n change_group_leader(this.num)\r\n\r\n\r\ndef creat_backup_file():\r\n user_name = input('输入用户名\\n')\r\n if user_name not in user_dict.keys():\r\n print('没有该用户\\n')\r\n return None\r\n user = user_dict[user_name]\r\n user_file_dict = user.user_file\r\n root_user_name = input('输入源文件用户名\\n')\r\n if root_user_name not in user_dict.keys():\r\n print('没有该用户\\n')\r\n return None\r\n root_user = user_dict[root_user_name]\r\n root_dict = root_user.user_file\r\n file_name = input('输入文件名\\n')\r\n try:\r\n file = root_dict[file_name]\r\n except:\r\n print('没有该文件')\r\n return None\r\n user_file_dict[file_name] = file\r\n file.backup_sum += 1\r\n\r\n\r\ndef create_user():\r\n user_name = input('输入用户名称\\n')\r\n user = Control.User(user_name)\r\n user_dict[user_name] = user\r\n\r\n\r\ndef show_users():\r\n if len(user_dict.keys()) == 0:\r\n print('没有用户')\r\n print(user_dict.keys())\r\n\r\n\r\ndef show_files():\r\n user_name = input('输入用户名\\n')\r\n user = user_dict[user_name]\r\n user.show_file()\r\n\r\n\r\ndef show_stack():\r\n print('Group List: ', memory_group_leader)\r\n print('Block Control Stack:', block_control_stack)\r\n\r\n\r\nif __name__ == '__main__':\r\n # 存放用户\r\n user_dict = {}\r\n stack_size = 3\r\n # 内存\r\n memory = []\r\n creat_memory()\r\n block_control_stack = [stack_size]\r\n # 存放每个组组员的那个块的编号\r\n memory_group_leader = []\r\n grouping_memory()\r\n for num in range(len(memory_group_leader)):\r\n try:\r\n memory[memory_group_leader[num]].next_group = memory[memory_group_leader[num + 1]].num\r\n except:\r\n memory[memory_group_leader[num]].next_group = 0\r\n # 初始化栈\r\n for member in memory[memory_group_leader[0]].group:\r\n block_control_stack.append(member)\r\n while True:\r\n print('输入操作:\\n'\r\n '1.create file\\n'\r\n '2.read file\\n'\r\n '3.write file\\n'\r\n '4.freely write\\n'\r\n '5.drop file\\n'\r\n '6.freely drop\\n'\r\n '7.creat backup file\\n'\r\n '8.create user\\n'\r\n '9.show users\\n'\r\n '10.show user`s files\\n'\r\n '11.show stack\\n')\r\n op = input('输入操作编号\\n')\r\n if op == '1':\r\n create_file()\r\n elif op == '2':\r\n read_file()\r\n elif op == '3':\r\n write_file()\r\n elif op == '4':\r\n freely_write()\r\n elif op == '5':\r\n drop_file()\r\n elif op == '6':\r\n freely_drop()\r\n elif op == '7':\r\n creat_backup_file()\r\n elif op == '8':\r\n create_user()\r\n elif op == '9':\r\n show_users()\r\n elif op == '10':\r\n show_files()\r\n elif op == '11':\r\n show_stack()\r\n else:\r\n print('输入操作有误')\r\n","sub_path":"File_Memory_Control.py","file_name":"File_Memory_Control.py","file_ext":"py","file_size_in_byte":15339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"125280318","text":"from mpl_toolkits.mplot3d import Axes3D\nimport time\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nparams = {'figure.subplot.left': 0.0,\n 'figure.subplot.right': 1.0,\n 'figure.subplot.bottom': 0.0,\n 'figure.subplot.top': 1.0}\nplt.rcParams.update(params)\n\nfig = plt.figure()\nax = fig.gca(projection='3d')\n\nimage = mpimg.imread('./mailru.jpg')\ndata = image.reshape((image.shape[0] * image.shape[1], 3))\n\nclusters = 64\nd_min = np.min(data)\nd_max = np.max(data)\nt0 = time.time()\n\n\ndef init_list_of_objects(size):\n list_of_objects = list()\n for i in range(0, size):\n list_of_objects.append(list())\n return list_of_objects\n\nd_depth = data.shape[1]\ncntrs = np.random.randint(data.min(), data.max(), [clusters, d_depth])\n\nnew_cnt = 1\nwhile new_cnt > 0:\n t1 = time.time()\n new_cnt = 0\n cent_data = init_list_of_objects(clusters)\n for dot in range(len(data)):\n clust_dist = np.sum(np.abs(cntrs - np.array([data[dot]] * 64)) ** 2, axis=-1) ** (1. / 2)\n min_ind = clust_dist.argmin()\n cent_data[min_ind].append(dot)\n # ax.scatter(cntrs.T[0], cntrs.T[1], cntrs.T[2])\n print(time.time() - t1)\n\n for string_n in range(len(cent_data)):\n if len(cent_data[string_n]) > 0:\n new_cen = np.zeros([1, d_depth])\n for elem_s in range(len(cent_data[string_n])):\n new_cen = new_cen + data[cent_data[string_n][elem_s]]\n dif_cen = (new_cen / (len(cent_data[string_n]))).astype(int)\n if (cntrs[string_n] - dif_cen != np.zeros([1, d_depth])).all():\n new_cnt += 1\n cntrs[string_n] = dif_cen\n\nnew_data = np.zeros(data.shape)\nfor row in range(len(cent_data)):\n print(row, cntrs[row], len(cent_data[row]))\n for item in range(len(cent_data[row])):\n new_data[item] = cntrs[row]\n\nprint(time.time() - t0)\nprint(cntrs)\n\n# ax.scatter(centers.T[0], centers.T[1], centers.T[2])\n# ax.set_zlim(d_min, d_max)\n# plt.xlabel(\"X\")\n# plt.ylabel(\"Y\")\n# plt.legend()\n# plt.show()\n","sub_path":"SM3_Clusters/KMeansTry.py","file_name":"KMeansTry.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"313946209","text":"from __future__ import absolute_import\n\nfrom django.core.urlresolvers import reverse\n\nfrom sentry.assistant.guides import GUIDES\nfrom sentry.testutils import APITestCase\n\n\nclass AssistantActivity(APITestCase):\n def setUp(self):\n super(AssistantActivity, self).setUp()\n self.login_as(user=self.user)\n self.path = reverse('sentry-api-0-assistant')\n\n def test_invalid_inputs(self):\n # Invalid guide id.\n resp = self.client.put(self.path, {\n 'guide_id': 1938,\n })\n assert resp.status_code == 400\n\n # Invalid status.\n resp = self.client.put(self.path, {\n 'guide_id': 1,\n 'status': 'whats_my_name_again',\n })\n assert resp.status_code == 400\n\n def test_activity(self):\n resp = self.client.get(self.path)\n assert resp.status_code == 200\n assert resp.data == GUIDES\n\n # Dismiss the guide and make sure it is not returned again.\n resp = self.client.put(self.path, {\n 'guide_id': 2,\n 'status': 'dismissed',\n })\n assert resp.status_code == 201\n resp = self.client.get(self.path)\n assert resp.status_code == 200\n assert resp.data == {k: v for k, v in GUIDES.items() if v['id'] != 2}\n","sub_path":"tests/sentry/api/endpoints/test_assistant.py","file_name":"test_assistant.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"89952565","text":"#! /Users/xiaotongli/anaconda3/bin/python\n# -*- coding: utf-8 -*-\n# @Time : 10/6/18 10:49 AM\n# @Author : Xiaotong Li\n# @School : University of California, Santa Cruz\n# @FileName: isPalindrome.py\n# @Software: PyCharm\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def isPalindrome(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n rev = None\n slow = fast = head\n while fast and fast.next:\n fast = fast.next.next\n rev, rev.next, slow = slow, rev, slow.next\n if fast:\n slow = slow.next\n while rev and rev.val == slow.val:\n slow = slow.next\n rev = rev.next\n return not rev\n","sub_path":"Python/LinkedList/isPalindrome.py","file_name":"isPalindrome.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"317775444","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Author: Gillett Hernandez\n# @Date: 2016-07-14 17:06:40\n# @Last Modified by: Gillett Hernandez\n# @Last Modified time: 2017-08-10 12:39:06\n\nfrom euler_funcs import timed\n\ndef count_routes(n):\n result = 1\n\n for i in range(1, n+1):\n result = ((n+i) * result) // i\n\n return result\n\n@timed\ndef main():\n print(count_routes(20))\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python/problem_15.py","file_name":"problem_15.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"34027765","text":"## A function that checks for winning bingo cards. A crossed off number will be\n# defined as the number 0, and a winning bingo card will have a line of five\n# zeros, either vertical, horizontal or diagonal. Parameters: A dictionary\n# simulating a bingo card. Returns: True or False (for a win).\n# main() program will demonstrate function buy creating several bingo cards,\n# displaying them and indicating whether or not they contain a winning line.\n# at leaset one of which does not contain a winning line. ##\n\n\ndef bingoCheck(card):\n win = False\n\n # Vertical line check.\n for letter in card:\n if sum(card[letter]) == 0:\n win = True\n\n # horizontal line check\n if not win:\n index = 0\n while index < 5:\n count = 0\n for letter in card:\n count += card[letter][index]\n if count == 0:\n win = True\n index += 1\n\n # diagonal line check:\n if not win:\n index = 0\n tl_br = 0\n bl_tr = 0\n for letter in card: # first checking top-left to bottom-right\n tl_br += card[letter][index]\n index += 1\n index -= 1 # setting index to 4\n for letter in card: # then checking bottom left to top-right\n bl_tr += card[letter][index]\n index -= 1\n if tl_br == 0 or bl_tr == 0:\n win = True\n\n return win\n\ndef main():\n from Ex_138 import genBingoCard\n from Ex_138 import displayBingoCard\n from random import randint\n\n # Test a vertical win\n vert_win = genBingoCard()\n bingo = \"BINGO\"\n letter = bingo[randint(0,4)]\n vert_win.update({letter:[0,0,0,0,0]})\n print(bingoCheck(vert_win))\n displayBingoCard(vert_win)\n\n # Test a horizontal win\n horz_win = genBingoCard()\n index = randint(0,4)\n for letter in horz_win:\n horz_win[letter][index] = 0\n print(bingoCheck(horz_win))\n displayBingoCard(horz_win)\n\n # Test a diagonal win\n diag_win = genBingoCard()\n tl_br = 0\n bl_tr = 0\n choice = randint(0,1)\n if choice == 0:\n index = 0\n for letter in diag_win:\n diag_win[letter][index] = 0\n index += 1\n else:\n index = 4\n for letter in diag_win:\n diag_win[letter][index] = 0\n index -= 1\n print(bingoCheck(diag_win))\n displayBingoCard(diag_win)\n\n # Test a no win\n no_win = genBingoCard()\n print(bingoCheck(no_win))\n displayBingoCard(no_win)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"6 Dictionary exercises/Ex_139.py","file_name":"Ex_139.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"67995912","text":"class LinkList:\n # write your __init__ method here that should store a 'head' value which the first Node in the LinkedList and a 'length' value which is the total number of Nodes in the LinkedList\n def __init__(self, head = None):\n self.head = head\n self.size = 0\n\n def get_size(self):\n return self.size\n\n#Remove node from end\n #def pop(self, data):\n this_node = self.head\n prev_node = None\n while this_node:\n if this_node.get_data() == data:\n if prev_node:\n prev_node.set_next(this_node.get_next_node())\n else:\n self.head = this_node\n self.size -= 1\n return True\n else:\n prev_node = this_node\n this_node = this_node.get_next_node() \n return False \n\n#Add node from end\n def push(self, data):\n new_node = Node(data, self.head)\n self.head = new_node\n self.size += 1 \n\n def remove(self, data):\n this_node = self.head\n prev_node = None\n while this_node:\n if this_node.get_data() == data:\n if prev_node:\n prev_node.set_next(this_node.get_next())\n else:\n self.head = this_node\n self.length -= 1\n return True\n else:\n prev_node = this_node\n this_node = this_node.get_next()\n return False\n\n\n def get(self, element_to_get):\n this_node = self.head\n while this_node:\n if this_node.get_data() == element_to_get:\n return element_to_get\n else:\n this_node = this_node.get_data()\n return None\n \n\n# ----- Node ------\n\nclass Node:\n def __init__(self, data, next_node = None):\n self.data = data\n self.next_node = next_node\n def get_next_node(self):\n return self.next_node\n def set_next_node(self, next_node):\n self.next_node = next_node\n def get_data(self):\n return self.data\n def set_data(self, data):\n self.data = data\n\n\n\nx = LinkList([1,2,3])\nprint(x.remove([1,2,3]))\n","sub_path":"python/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"98509352","text":"import re\nfrom typing import Iterable, Iterator, MutableMapping\nfrom urllib.parse import urlparse\n\n# from tartley/colorama\nANSI_CSI_RE = re.compile(\"\\001?\\033\\\\[((?:\\\\d|;)*)([a-zA-Z])\\002?\")\n\nODOO_LOG_RE = re.compile(\n r\"^\"\n r\"(?P\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}) \"\n r\"(?P\\d+) \"\n r\"(?P\\w+) \"\n r\"(?P\\S+) \"\n r\"(?P\\S+): \"\n r\"(?P.*)\"\n r\"$\"\n)\n\n\ndef parse_stream(\n stream: Iterable[str], include_raw: bool = False\n) -> Iterator[MutableMapping[str, str]]:\n \"\"\"Parse a stream of Odoo log lines and return an iterator of log records.\n\n Log records have the following keys:\n - asctime: timestamp\n - pid: process or thread id\n - dbname: database name\n - logger: python logger name\n - levelname: python logging level name\n - message: the rest of the line\n \"\"\"\n record = None\n for line in stream:\n line = ANSI_CSI_RE.sub(\"\", line)\n mo = ODOO_LOG_RE.match(line)\n if mo:\n # we got a match, yield previous record and create a new one\n if record:\n yield record\n record = mo.groupdict()\n if include_raw:\n record[\"raw\"] = line\n else:\n if record:\n # irregular line in the middle of the log file: assume\n # it is a continuation of the current record (a typical\n # example is a multi-line stack trace)\n record[\"message\"] += \"\\n\" + line.strip()\n if include_raw:\n record[\"raw\"] += line\n else:\n # irregular lines at the beginning, yield them independently\n r = {\"message\": line.strip()}\n if include_raw:\n r[\"raw\"] = line\n yield r\n if record:\n yield record\n\n\nODOO_WERKZEUG_RE = re.compile(\n r\"^(?P\\S+)\"\n r\" .+? .+? \\[.*?\\]\"\n r\" \\\"(?P\\S+) (?P\\S+) .*?\\\"\"\n r\" (?P\\S+) \\S+\"\n r\"( (?P\"\n r\"(?P\\d+) \"\n r\"(?P\\d*\\.\\d+) \"\n r\"(?P\\d*\\.\\d+)\"\n r\"))?\"\n r\".*$\"\n)\n\n\ndef _convert_field(d, k, converter):\n if k in d:\n try:\n d[k] = converter(d[k])\n except Exception:\n del d[k]\n\n\ndef enrich_werkzeug(\n records: Iterable[MutableMapping[str, str]]\n) -> Iterator[MutableMapping[str, str]]:\n \"\"\"Enrich werkzeug (http requests) log records\"\"\"\n for record in records:\n if record.get(\"logger\") == \"werkzeug\":\n mo = ODOO_WERKZEUG_RE.match(record.get(\"message\", \"\"))\n if mo:\n record.update(\n (k, v) for k, v in mo.groupdict().items() if v is not None\n )\n record[\"request_path\"] = urlparse(record[\"request_uri\"]).path\n _convert_field(record, \"sql_count\", int)\n _convert_field(record, \"sql_time\", float)\n _convert_field(record, \"other_time\", float)\n if \"sql_time\" in record and \"other_time\" in record:\n record[\"total_time\"] = record[\"sql_time\"] + record[\"other_time\"]\n yield record\n\n\ndef enrich(\n records: Iterable[MutableMapping[str, str]]\n) -> Iterator[MutableMapping[str, str]]:\n return enrich_werkzeug(records)\n","sub_path":"src/loog/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"263571253","text":"from PyQt4.QtCore import SIGNAL\nfrom PyQt4.QtGui import QAction, QIcon\n\nclass SfAction(QAction):\n \n def __init__(self, text, parent, icon=None, slot=None, shortcut=None,\n tip=None, checkable=False, signal=\"triggered()\", actionGroup = None, checked=False):\n super(SfAction, self).__init__(text, parent)\n \n if icon is not None:\n self.setIcon(QIcon(\":\" + icon))\n if shortcut is not None:\n self.setShortcut(shortcut)\n if tip is not None:\n self.setToolTip(tip)\n self.setStatusTip(tip)\n if slot is not None:\n self.connect(self, SIGNAL(signal), slot)\n if checkable:\n self.setCheckable(True)\n if actionGroup is not None:\n self.setActionGroup(actionGroup)\n if checked:\n self.setChecked(True)\n \nclass SfSeparator(QAction):\n \n def __init__(self, parent):\n super(SfSeparator, self).__init__(parent)\n self.setSeparator(True)\n \n\n \n","sub_path":"frontend/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"54591242","text":"# Создать текстовый файл (не программно), построчно записать фамилии сотрудников и величину их окладов (не менее 10 строк).\n# Определить, кто из сотрудников имеет оклад менее 20 тыс., вывести фамилии этих сотрудников.\n# Выполнить подсчет средней величины дохода сотрудников.\n# Пример файла:\n# Иванов 23543.12\n# Петров 13749.32\n\ntry:\n my_f = open(\"text_3.txt\", \"r\", encoding=\"utf-8\")\n lst = my_f.read().split(\"\\n\")\n my_dict = dict()\n for line in lst:\n lst2 = line.split(' ')\n my_dict.update({lst2[0]: float(lst2[1])})\n print(f\"Средняя величина дохода = {sum(my_dict.values())/len(my_dict.values())}\")\n print('Сотрудники с окладом менее 20000: ', [i for i in my_dict if my_dict.get(i) < 20000])\nfinally:\n my_f.close()","sub_path":"Lesson-5/Task3.py","file_name":"Task3.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"333082840","text":"\"\"\"\nsecurity.py\nContains code which manages access to API routes\n\"\"\"\nimport logging\nfrom functools import wraps\nfrom flask import request, g\n\nlogger = logging.getLogger(__name__)\n\ndef secured(f):\n \"\"\"\n Decorator to check that a request has valid headers and matches the requirements\n \"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n \"\"\"\n Function to check headers are present and check their validity\n \"\"\"\n logger.info(\"Secured decorator has started\")\n logger.debug(\"Details passed to the secured decorator\", extra={\"headers\": request.headers})\n if \"x-remote-user\" in request.headers:\n username = request.headers[\"x-remote-user\"]\n logger.info(f\"User is {username}\")\n if \"x-remote-user-groups\" in request.headers:\n groups = request.headers[\"x-remote-user-groups\"]\n logger.info(f\"Groups from header: {groups}\")\n groups = groups.split(\",\")\n return f(username, groups, *args, **kwargs)\n else:\n logger.info(\"X-Remote-User-Groups header is missing from request\")\n return f(username, [], *args, **kwargs)\n else:\n logger.info(\"X-Remote-User header is missing from request\")\n return f(\"anonymous\", [], *args, **kwargs)\n \n return decorated_function","sub_path":"security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"140375761","text":"import os, sys\nimport subprocess\nimport pandas as pd\nfrom Bio.Seq import Seq\nfrom Bio import SeqIO\n\n# custom libraries\nsystem = str(input('\\n' + 'Local or Server (L or S):'))\n\nif system == 'S':\n sys.path.insert(0, '/home/strachan/master/')\nelse:\n sys.path.insert(0, '/Users/cameronstrachan/master/')\n\nfrom modules import seq_core_lin as sc\nfrom modules import seq_gen_lin as sg\n\n# This extracted contigs for the gene duplication analysis (Supp. Fig. 3)\n\ngenes_df = pd.read_csv('dataflow/00-meta/genomes_with_ant6_duplication.csv', low_memory=False)\ngenes = genes_df['Accession'].tolist()\n\nfile_obj = sc.Fasta('fig1_fig3_ncbi_nucl_hits.fasta', 'dataflow/01-nucl/')\nfile_obj.setOutputName('pathogens_duplicates.fasta')\nfile_obj.setOutputLocation('dataflow/01-nucl/')\nfile_obj.subsetfasta(seqlist = genes, headertag='_duplicate')\n\n# From the above file, the regions with 50kB of AadE-Ia or AadE-Ib were trimmed\n# out and extracted in genious, then the ORFs were predicted\n\nfile = \"duplicate_gene_diagrams_trimmed.fasta\"\n\nfile_obj = sc.Fasta(file, 'dataflow/01-nucl/')\nfile_obj.setOutputName(file)\nfile_obj.setOutputLocation('dataflow/01-prot/')\nfile_obj.runprodigal()\n\n# All ORFs were then blasted against each other to do the synteny analysis (sup fig 3)\n\nfile = \"duplicate_gene_diagrams_trimmed.fasta\"\nblastdbdir = 'dataflow/02-blast-db/'\n\nfile_obj = sc.Fasta(file, 'dataflow/01-prot/')\nfile_obj.setOutputName(file)\nfile_obj.setOutputLocation(blastdbdir)\nfile_obj.runmakeblastdb(dbtype='prot')\n\nindir = 'dataflow/01-prot/'\nblastdir = 'dataflow/02-blast/'\nfile = \"duplicate_gene_diagrams_trimmed.fasta\"\n\nfile_obj = sc.Fasta(file, indir)\nfile_obj.setOutputName(file)\nfile_obj.setOutputLocation(blastdir)\noutputfilename = \"duplicate_gene_diagrams_trimmed.txt\"\nfile_obj.setOutputName(outputfilename)\n\nblastdb = \"duplicate_gene_diagrams_trimmed.fasta\"\n\nfile_obj.runblast(blast='blastp', db=blastdb, dblocation=blastdbdir, max_target_seqs=5000, evalue=1e-3, num_threads = 40, max_hsps = 1)\n","sub_path":"ProjectCode/P6_duplication_diagrams.py","file_name":"P6_duplication_diagrams.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"260195677","text":"from django.core.management import BaseCommand\n\nfrom business_register.converter.declaration import DeclarationConverter\nfrom business_register.models.declaration_models import Vehicle\nfrom business_register.pep_scoring.declarations_fixes import DeclarationsFixSet\n\n\nclass Command(BaseCommand):\n help = '---'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.converter = DeclarationConverter()\n\n def add_arguments(self, parser):\n parser.add_argument('--declaration_nacp_id', nargs='?', type=str)\n parser.add_argument('--pep_id', nargs='?', type=int)\n\n def is_luxury_cars(self, cars):\n i = 0\n count = cars.count()\n for car in cars:\n i += 1\n self.stdout.write(f'\\rProgress: {i} of {count}', ending='')\n self.stdout.flush()\n self.converter.current_declaration = car.declaration\n car.is_luxury = self.converter.is_vehicle_luxury(car)\n car.save()\n self.stdout.write()\n self.stdout.write('Done!')\n\n def handle(self, *args, **options):\n declaration_nacp_id = options['declaration_nacp_id']\n pep_id = options['pep_id']\n if pep_id:\n cars = Vehicle.objects.filter(type=Vehicle.CAR, declaration__pep_id=pep_id)\n self.is_luxury_cars(cars)\n elif declaration_nacp_id:\n cars = Vehicle.objects.filter(\n type=Vehicle.CAR,\n declaration__nacp_declaration_id=declaration_nacp_id,\n )\n self.is_luxury_cars(cars)\n else:\n all_cars = Vehicle.objects.filter(type=Vehicle.CAR)\n self.is_luxury_cars(all_cars)\n\n self.stdout.write('Resave done. Start running all fixes')\n DeclarationsFixSet().run_all_fixes()\n self.stdout.write('All fixes applied')\n","sub_path":"business_register/management/commands/is_vehicle_luxury.py","file_name":"is_vehicle_luxury.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"492439941","text":"# Copyright 2016 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n# Represents status of the analysis result of a Chromium waterfall compile/test\n# failure or a Chrome crash.\nUNSPECIFIED = -1\nFOUND_CORRECT = 0\nFOUND_INCORRECT = 10\nNOT_FOUND_INCORRECT = 20\nFOUND_UNTRIAGED = 30\nNOT_FOUND_UNTRIAGED = 40\nNOT_FOUND_CORRECT = 50\nPARTIALLY_CORRECT_FOUND = 60\nFLAKY = 70\nUNSUPPORTED = 80\nFOUND_CORRECT_DUPLICATE = 1000\nFOUND_INCORRECT_DUPLICATE = 1010\n\nRESULT_STATUS_TO_DESCRIPTION = {\n FOUND_CORRECT: 'Correct - Found',\n FOUND_INCORRECT: 'Incorrect - Found',\n NOT_FOUND_INCORRECT: 'Incorrect - Not Found',\n FOUND_UNTRIAGED: 'Untriaged - Found',\n NOT_FOUND_UNTRIAGED: 'Untriaged - Not Found',\n NOT_FOUND_CORRECT: 'Correct - Not Found',\n PARTIALLY_CORRECT_FOUND: 'Partially Correct - Found',\n FLAKY: 'Flaky',\n UNSUPPORTED: 'Unsupported',\n FOUND_CORRECT_DUPLICATE: 'Correct(duplicate) - Found',\n FOUND_INCORRECT_DUPLICATE: 'Incorrect(duplicate) - Found'\n}\n","sub_path":"appengine/findit/model/result_status.py","file_name":"result_status.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"546436677","text":"'''\n@Time : 2019/8/19 11:11\n@Author : mjh\n@File : decisiontree.py\n'''\n\nimport math\n\nclass TreeNode(object):\n def __init__(self):\n '''树节点'''\n self.isleaf = False #判断是否是树叶\n self.kids = {} #所有的子节点 类别:子节点\n self.label = None #叶子节点的标签类别\n self.feature = None #非树叶节点选择的特征\n\nclass DecisionTree(object):\n def __init__(self, method = None, epsilon = 0):\n self.method = method #特征选择的方法,有ID3和C4.5\n self.epsilon = epsilon #信息增益的阈值\n self.__tree = None #训练好的决策树\n self.__treedic = {} # 特征名称与列的数字相对应\n\n def fit(self, dataset):\n #根据数据集生成决策树\n self.__tree = self.__buildTree(dataset) #建立决策树\n feature = dataset[0][0:-1]\n for key in feature:\n self.__treedic[key] = feature.index(key)\n\n def predict(self, feature):\n #预测数据\n result = []\n for data in feature:\n label = self.__searchTree(data)\n result.append(label)\n return result\n\n def __informationgain(self, feature, label):\n #计算信息增益, feature: 某个特征, dataset: 标签集\n\n totalentropy = self.__entropy(label)\n condentro = 0\n mydic = {}\n for i in feature:\n if i in mydic.keys():\n mydic.update({i:mydic[i]+1})\n else:\n mydic.update({i:1})\n for key in mydic.keys():\n childfeature = [label[i] for i in range(0, len(feature)) if feature[i] == key]\n condentro = condentro + (mydic[key]/len(feature))*self.__entropy(childfeature)\n return totalentropy-condentro\n\n def __infogainration(self, feature, label):\n a = self.__infogainration(feature, label)\n return a/self.__entropy(label)\n\n def __entropy(self, feature):\n #计算经验熵\n mydic = {}\n entro = 0\n for i in feature:\n if i in mydic.keys():\n mydic.update({i:mydic[i]+1})\n else:\n mydic.update({i:1})\n total = len(feature)\n for key in mydic.keys():\n num = mydic[key]\n entro = entro - (num/total)*math.log2(num/total)\n return entro\n\n def __buildTree(self, dataset):\n #建立树\n category = dataset[1][-1] #所有label都是同一类\n flag = False\n for i in range(1, len(dataset)):\n if category != dataset[i][-1]:\n flag = True\n break\n if flag == False:\n node = TreeNode()\n node.isleaf = True\n node.label = category\n return node\n\n if len(dataset[0]) == 1: #特征向量是空的\n node = TreeNode()\n node.isleaf = True\n node.label = self.__findmaxlabel(dataset[1:-1])\n return node\n\n node = TreeNode()\n infogain = []\n for i in range(0, len(dataset[0])-1): #计算每个特征的信息增益\n feature = [dataset[j][i] for j in range(1, len(dataset))]\n label = [dataset[j][-1] for j in range(1, len(dataset))]\n infogain.append(self.__informationgain(feature, label))\n chooseindex = infogain.index(max(infogain))\n node.feature = dataset[0][chooseindex] #确定最大信息增益的特征\n categorial = [] #确定划分的种类\n for i in range(1, len(dataset)):\n if dataset[i][chooseindex] not in categorial:\n categorial.append(dataset[i][chooseindex])\n for key in categorial: #递归产生树\n newdataset = []\n line = [i for i in dataset[0] if i != dataset[0][chooseindex]]\n newdataset.append(line)\n for i in range(1, len(dataset)):\n if dataset[i][chooseindex] == key:\n line = [dataset[i][j] for j in range(0, len(dataset[0])) if j != chooseindex]\n newdataset.append(line)\n node.kids[key] = self.__buildTree(newdataset)\n\n return node\n\n def __findmaxlabel(self, dataset):\n mydic = {}\n for i in dataset:\n if i in mydic.keys():\n mydic.update({i:mydic[i]+1})\n else:\n mydic.update({i:1})\n num = mydic[dataset[0]]\n label = dataset[0]\n for key in mydic.keys():\n if mydic[key] > num:\n num = mydic[key]\n label = key\n return label\n\n def __searchTree(self, data):\n '''寻找树中的类别'''\n plook = self.__tree\n while plook.isleaf == False:\n plook = plook.kids[data[self.__treedic[plook.feature]]]\n return plook.label\n\nif __name__=='__main__':\n ''''''\n data = [[\"年龄\", \"有工作\", \"有自己的房子\", \"信贷情况\", \"类别\"],\n [\"青年\", \"否\", \"否\", \"一般\", \"否\"],\n [\"青年\", \"否\", \"否\", \"好\", \"否\"],\n [\"青年\", \"是\", \"否\", \"好\", \"是\"],\n [\"青年\", \"是\", \"是\", \"一般\", \"是\"],\n [\"青年\", \"否\", \"否\", \"一般\", \"否\"],\n [\"中年\", \"否\", \"否\", \"一般\", \"否\"],\n [\"中年\", \"否\", \"否\", \"好\", \"否\"],\n [\"中年\", \"是\", \"是\", \"好\", \"是\"],\n [\"中年\", \"否\", \"是\", \"非常好\", \"是\"],\n [\"中年\", \"否\", \"是\", \"非常好\", \"是\"],\n [\"老年\", \"否\", \"是\", \"非常好\", \"是\"],\n [\"老年\", \"否\", \"是\", \"好\", \"是\"],\n [\"老年\", \"是\", \"否\", \"好\", \"是\"],\n [\"老年\", \"是\", \"否\", \"非常好\", \"是\"],\n [\"老年\", \"否\", \"否\", \"一般\", \"否\"]\n ]\n dt = DecisionTree()\n dt.fit(data)\n a = dt.predict([[\"老年\", \"否\", \"否\", \"一般\"]])\n print(a)","sub_path":"code/decisiontree.py","file_name":"decisiontree.py","file_ext":"py","file_size_in_byte":5978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"231721499","text":"# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\nimport cv2\nimport numpy as np\nfrom preprocessing import parse_annotation\nfrom utils import get_annoboxes, draw_boxes\nfrom frontend import YOLO\nimport json\nfrom timeit import default_timer as timer\nfrom PIL import Image\nfrom my_scripts.convert_to_xml import save_anno_xml\n\nclass predictor:\n\n def __init__(self, config_path, weights_path):\n with open(config_path) as config_buffer:\n config = json.loads(config_buffer.read())\n\n self.labels = config['model']['labels']\n\n self.yolo = YOLO(architecture = config['model']['architecture'],\n input_size = config['model']['input_size'],\n labels = self.labels,\n max_box_per_image = config['model']['max_box_per_image'],\n anchors = config['model']['anchors'])\n\n self.yolo.load_weights(weights_path)\n self.timing = [0, 0.]\n\n def _predict_one(self, image, threshold, decimals, draw_bboxes=True):\n\n t = timer()\n boxes = self.yolo.predict(image, threshold=threshold)\n image = draw_boxes(image, boxes, self.labels, decimals=decimals)\n t = timer() - t\n self.timing[0] += 1\n self.timing[1] += t\n print('{} boxes are found for {} s'.format(len(boxes), t))\n return image, boxes\n\n def predict_from_dir(self, path_to_dir, image_format, path_to_outputs = None, threshold=0.5, decimals=8, save_anno=False, draw_truth=False):\n if path_to_outputs and not os.path.exists(path_to_outputs):\n print('Creating output path {}'.format(path_to_outputs))\n os.mkdir(path_to_outputs)\n\n for image_filename in os.listdir(path_to_dir):\n # TODO: здесь надо сделать адекватную проверку, изображение ли это\n if image_filename.endswith(image_format):\n image = cv2.imread(os.path.join(path_to_dir, image_filename), cv2.IMREAD_COLOR)\n image_h = image.shape[0]\n image_w = image.shape[1]\n\n curr_time = timer()\n\n image, boxes = self._predict_one(image, threshold=threshold, decimals=decimals)\n\n curr_time = timer() - curr_time\n print(curr_time)\n\n boxes = get_annoboxes(image_w=image_w, image_h=image_h, boxes = boxes, labels=self.labels)\n\n if path_to_outputs:\n\n if save_anno:\n #\n save_anno_xml(dir=path_to_outputs + 'annotations/',\n img_name=image_filename[:-len(image_format) - 1],\n img_format=image_format,\n img_w=image.shape[1],\n img_h=image.shape[0],\n img_d=image.shape[2],\n boxes=boxes,\n quiet=False,\n minConf=threshold)\n\n retval = cv2.imwrite(path_to_outputs + 'images/' + image_filename, image)\n if retval:\n print('Изображение {} успешно сохранено в папку {}'.format(image_filename, path_to_outputs))\n else:\n print('В папке не только изображения - {}'.format(image_filename))\n\n print('Все изображения обработаны')\n print('Число изображений {}, общее время {}, среднее время на изображение {}'.format(self.timing[0], self.timing[1], self.timing[1]/self.timing[0]))\n\n\n def predict_from_webcam(self, threshold=0.5, fps=False, decimals=8):\n vid = cv2.VideoCapture(1)\n if not vid.isOpened():\n raise IOError((\"Couldn't open webcam. If you're trying to open a webcam, \"\n \"make sure you video_path is an integer!\"))\n\n # Compute aspect ratio of video\n vidw = vid.get(cv2.CAP_PROP_FRAME_WIDTH)\n vidh = vid.get(cv2.CAP_PROP_FRAME_HEIGHT)\n vidar = vidw / vidh\n\n\n accum_time = 0\n curr_fps = 0\n fps = \"FPS: ??\"\n prev_time = timer()\n\n while True:\n retval, orig_image = vid.read()\n if not retval:\n print(\"Done!\")\n return\n\n res_image = self._predict_one(orig_image, threshold=threshold, decimals=2)\n\n # Calculate FPS\n # This computes FPS for everything, not just the model's execution\n # which may or may not be what you want\n if fps:\n curr_time = timer()\n exec_time = curr_time - prev_time\n prev_time = curr_time\n accum_time = accum_time + exec_time\n curr_fps = curr_fps + 1\n if accum_time > 1:\n accum_time = accum_time - 1\n fps = \"FPS: \" + str(curr_fps)\n curr_fps = 0\n\n # Draw FPS in top left corner\n cv2.rectangle(res_image, (0, 0), (50, 17), (255, 255, 255), -1)\n cv2.putText(res_image, fps, (3, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 0), 1)\n\n cv2.imshow(\"YOLOv2 result\", res_image)\n pressedKey = cv2.waitKey(10)\n if pressedKey == 27: # ESC key\n break\n\n def predict_from_video(self, path_to_video, threshold=0.5, decimals=8, output_file='', crop=True, writeFPS=False, show=False):\n vid = cv2.VideoCapture(path_to_video)\n if not vid.isOpened():\n raise IOError((\"Couldn't open webcam. Make sure you video_path is an integer!\"))\n\n # Compute aspect ratio of video\n vidw = vid.get(cv2.CAP_PROP_FRAME_WIDTH)\n vidh = vid.get(cv2.CAP_PROP_FRAME_HEIGHT)\n\n n = 0\n if crop:\n n = int((vidw - vidh) * 0.5)\n vidw = vidh\n\n # Define the codec and create VideoWriter object\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter(output_file, fourcc, 20.0, (int(vidw), int(vidh)))\n\n accum_time = 0\n curr_fps = 0\n fps = \"FPS: ??\"\n prev_time = timer()\n\n while True:\n retval, orig_image = vid.read()\n if not retval:\n print(\"Done!\")\n return\n\n if crop:\n orig_image = orig_image[:, n:int(n + vidh), :]\n\n res_image, boxes = self._predict_one(orig_image, threshold=threshold, decimals=decimals)\n\n # Calculate FPS\n # This computes FPS for everything, not just the model's execution\n # which may or may not be what you want\n if writeFPS:\n curr_time = timer()\n exec_time = curr_time - prev_time\n prev_time = curr_time\n accum_time = accum_time + exec_time\n curr_fps = curr_fps + 1\n if accum_time > 1:\n accum_time = accum_time - 1\n fps = \"FPS: \" + str(curr_fps)\n curr_fps = 0\n\n # Draw FPS in top left corner\n cv2.rectangle(res_image, (0, 0), (50, 17), (255, 255, 255), -1)\n cv2.putText(res_image, fps, (3, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 0), 1)\n\n if show:\n cv2.imshow(\"YOLOv2 result\", res_image)\n\n if output_file:\n out.write(res_image)\n\n pressedKey = cv2.waitKey(10)\n if pressedKey == 27: # ESC key\n break\n\n out.release()\n\n\n\n# A simple example how to use.\n # Create an object-predictor with certain parameters (in config file) and weights\n\n'''\nlogdirs = (20, 21, 22)\nthres = 0.5\nvideodir = '/media/data/ObjectDetectionExperiments/Datasets/9_TestNeuroMobile/orig/ЗНАКИ/'\noutdir = '/media/data/ObjectDetectionExperiments/Results/NeuroMobile/Signs/'\nnames = os.listdir(videodir)\n\nfor logdir in logdirs:\n\n config = '../logs/{}/config2.json'.format(logdir)\n weights = '../logs/{}/weights.hdf5'.format(logdir)\n pred = predictor(config_path=config,\n weights_path=weights)\n\n for name in names:\n outvideo = outdir + 'log{}_'.format(logdir) + name\n pred.predict_from_video(path_to_video=videodir+name,\n threshold=thres,\n output_file=outvideo,\n decimals=2,\n writeFPS=False)\n'''\n\n'''\nlogdir = '15_signs_3'\nconfig = '../logs/{}/config.json'.format(logdir)\nweights = '../logs/{}/weights.hdf5'.format(logdir)\nthress = (0.25, 0.3, 0.35, 0.4)\nl = len(thress)\n\npred = predictor(config_path=config,\n weights_path=weights)\n\ndir = '/home/user/Desktop/VideosNew/sign/'\nout = '/home/user/Desktop/VideosNew/sign_res/'\nflist = os.listdir(dir)\nL = len(flist)\n\nfor i, f in enumerate(flist):\n videopath = dir + f\n\n for t, thres in enumerate(thress):\n resultpath = out + 'res_{}_{}.avi'.format(f[:f.rfind('.')], int(thres * 100))\n pred.predict_from_video(path_to_video=videopath,\n threshold=thres,\n output_file=resultpath,\n crop=False,\n decimals=2)\n print(\"{}/{}\\t{}/{}\".format(i+1, L, t+1, l))\n\n'''\n\n#main_path=\"/media/ivan/Seagate Backup Plus Drive/check/nissan_weights\"\n#configs = ['nissan_finetune_full', 'nissan_finetune_mobilenet', 'nissan_tiny_finetune', 'nissan_tiny_scratch']\n\n#c_dir = configs[0]\n\n#config = '{0}/{1}/config.json'.format(main_path, c_dir)\n#weights = '{0}/{1}/weights_last.hdf5'.format(main_path, c_dir)\n#thress = (0.25, 0.3, 0.35, 0.4)\n#l = len(thress)\n\n'''\ndirs = ['15_signs_0', '15_signs_01', '15_signs_2', '15_signs_02', '15_signs_3', '15_signs_04', '15_signs_5']\nwght = ['weights100.hdf5', 'weights100.hdf5', 'weights060.hdf5', 'weights100.hdf5', 'weights100.hdf5', 'weights500.hdf5', 'weights080.hdf5']\n\nfor ind in range(0, 7):\n config = '/media/ivan/Debian 7.8.0 i386 1/SvetaSigns/16_signs/{0}/config.json'.format(dirs[ind])\n weights = '/media/ivan/Debian 7.8.0 i386 1/SvetaSigns/16_signs/{0}/{1}'.format(dirs[ind], wght[ind])\n\n pred = predictor(config_path=config,\n weights_path=weights)\n\n img_for_pred = '/media/ivan/Debian 7.8.0 i386 1/SvetaSigns/tsyba/signs_2/images'\n img_results = '/media/ivan/Debian 7.8.0 i386 1/SvetaSigns/tsyba/signs2_out/{0}/'.format(dirs[ind])\n\n if not os.path.exists(img_results):\n os.makedirs(img_results)\n\n if not os.path.exists(img_results+'/annotations'):\n os.makedirs(img_results+'/annotations')\n\n if not os.path.exists(img_results + '/images'):\n os.makedirs(img_results + '/images')\n'''\n\nconfig = \"/home/ivan/RTV-VideoAnalytics/Database/Weights/Detection/yolo2-keras-docking-full/config_yolo_full.json\"\nweights = \"/home/ivan/RTV-VideoAnalytics/Database/Weights/Detection/yolo2-keras-docking-full/weights_yolo_full.hdf5\"\n\npred = predictor(config_path=config,\n weights_path=weights)\n\nimg_for_pred = \"/home/ivan/RTV-VideoAnalytics/Database/ImageSequences/Docking/node4\"\nimg_results = \"/home/ivan/4presentation/node4/\"\n\nif not os.path.exists(img_results):\n os.makedirs(img_results)\n\nif not os.path.exists(img_results + '/annotations'):\n os.makedirs(img_results + '/annotations')\n\nif not os.path.exists(img_results + '/images'):\n os.makedirs(img_results + '/images')\n\npred.predict_from_dir(path_to_dir=img_for_pred,\n path_to_outputs=img_results,\n image_format='jpeg',\n threshold=0.1,\n save_anno=True,\n decimals=2)\n\n\n\n'''\nimg_for_pred = '/media/data/ObjectDetectionExperiments/Datasets/5_RTSD/ORIG/testimages/'\nimg_results = '../logs/{}/out/'.format(logdir)\n\n#img_for_pred = '/media/data/ObjectDetectionExperiments/Datasets/10_Helmet/images/val/'.format(dataset, d)\n#img_results = '/media/data/ObjectDetectionExperiments/Projects/2_YOLOs/YOLOv2_Orlova/Experiencor/Results/{}/'.format(logdir)\n\npred.predict_from_dir(path_to_dir=img_for_pred,\n path_to_outputs=img_results,\n image_format='jpg',\n threshold=thres,\n save_anno=False,\n decimals=2)\n'''\nprint('Done!')\n\n","sub_path":"my_scripts/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":12626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"391293178","text":"#library imports\nimport uuid\nfrom decimal import Decimal\nfrom datetime import datetime\nfrom flask import request\nfrom sqlalchemy.sql.sqltypes import Integer\nfrom flask_restplus import Resource, reqparse, inputs\nfrom sqlalchemy_filters import apply_sort, apply_pagination, apply_filters\nfrom werkzeug.exceptions import BadRequest, NotFound\n\n#app imports\nfrom app.extensions import api, cache\nfrom app.api.utils.access_decorators import requires_role_mine_edit, requires_any_of, VIEW_ALL, MINESPACE_PROPONENT, is_minespace_user, MINE_EDIT\nfrom app.api.utils.resources_mixins import UserMixin\nfrom app.api.constants import MINE_MAP_CACHE\n\n#namespace imports\nfrom app.api.mines.response_models import MINE_LIST_MODEL, MINE_MODEL, MINE_SEARCH_MODEL\nfrom app.api.mines.permits.permit.models.permit import Permit\nfrom app.api.mines.permits.permit.models.mine_permit_xref import MinePermitXref\n\nfrom app.api.mines.mine.models.mine import Mine\nfrom app.api.mines.mine.models.mine_type import MineType\nfrom app.api.mines.mine.models.mine_type_detail import MineTypeDetail\nfrom app.api.mines.mine.models.mine_verified_status import MineVerifiedStatus\n\nfrom app.api.mines.status.models.mine_status import MineStatus\nfrom app.api.mines.status.models.mine_status_xref import MineStatusXref\n\nfrom .mine_map import MineMapResource\n\n\nclass MineListResource(Resource, UserMixin):\n parser = reqparse.RequestParser()\n parser.add_argument(\n 'mine_name', type=str, help='Name of the mine.', trim=True, required=True, location='json')\n parser.add_argument(\n 'mine_note',\n type=str,\n help='Any additional notes to be added to the mine.',\n trim=True,\n location='json')\n parser.add_argument(\n 'longitude',\n type=lambda x: Decimal(x) if x else None,\n help='Longitude point for the mine.',\n location='json')\n parser.add_argument(\n 'latitude',\n type=lambda x: Decimal(x) if x else None,\n help='Latitude point for the mine.',\n location='json')\n parser.add_argument(\n 'mine_status',\n action='split',\n help=\n 'Status of the mine, to be given as a comma separated string value. Ex: status_code, status_reason_code, status_sub_reason_code ',\n required=True,\n location='json')\n parser.add_argument(\n 'status_date', help='The date when the current status took effect', location='json')\n parser.add_argument(\n 'major_mine_ind',\n type=inputs.boolean,\n help='Indication if mine is major_mine_ind or regional. Accepts \"true\", \"false\", \"1\", \"0\".',\n location='json')\n parser.add_argument(\n 'mine_region',\n type=str,\n help='Region for the mine.',\n trim=True,\n required=True,\n location='json')\n parser.add_argument(\n 'ohsc_ind', type=bool, store_missing=False, help='Indicates if the mine has an OHSC.')\n parser.add_argument(\n 'union_ind', type=bool, store_missing=False, help='Indicates if the mine has a union.')\n parser.add_argument(\n 'government_agency_type_code',\n type=str,\n help='Government agency the mine belongs to.',\n trim=True,\n store_missing=True,\n location='json')\n parser.add_argument(\n 'exemption_fee_status_code',\n type=str,\n help='Exemption fee status code.',\n trim=True,\n store_missing=True,\n location='json')\n parser.add_argument(\n 'exemption_fee_status_note',\n type=str,\n help='Exemption fee status note.',\n trim=True,\n store_missing=True,\n location='json')\n parser.add_argument(\n 'work_status',\n action='split',\n help='Work status for the mine.',\n store_missing=False,\n location='json')\n\n @api.doc(\n params={\n 'per_page': 'The number of results to be returned per page.',\n 'page': 'The current page number to be displayed.',\n 'search': 'The search term.',\n 'commodity': 'A specific commodity to filter the mine list on.',\n 'status': 'A specific mine status to filter the mine list on.',\n 'work_status': 'A specific mine work status to filter the mine list on.',\n 'tenure': 'A specific mine tenure type to filter the mine list on.',\n 'region': 'A specific mine region to filter the mine list on.',\n 'major': 'Filters the mine list by major mines or regional mines.',\n 'tsf': 'Filters the mine list by mines with or without a TSF.',\n 'verified': 'Filters the mine list by verified mines.',\n 'sort_field':\n 'enum[mine_name, mine_no, mine_operation_status_code, mine_region] Default: mine_name',\n 'sort_dir': 'enum[asc, desc] Default: asc'\n },\n description='Returns a list of filtered mines.')\n @api.marshal_with(MINE_LIST_MODEL, code=200)\n @requires_any_of([VIEW_ALL, MINESPACE_PROPONENT])\n def get(self):\n\n paginated_mine_query, pagination_details = self.apply_filter_and_search(request.args)\n mines = paginated_mine_query.all()\n return {\n 'mines': mines,\n 'current_page': pagination_details.page_number,\n 'total_pages': pagination_details.num_pages,\n 'items_per_page': pagination_details.page_size,\n 'total': pagination_details.total_results,\n }\n\n @api.expect(parser)\n @api.doc(description='Creates a new mine.')\n @api.marshal_with(MINE_MODEL, code=201)\n @requires_role_mine_edit\n def post(self):\n data = self.parser.parse_args()\n lat = data.get('latitude')\n lon = data.get('longitude')\n if (lat and not lon) or (not lat and lon):\n raise BadRequest('latitude and longitude must both be empty, or both provided')\n\n # query the mine tables and check if that mine name exists\n _throw_error_if_mine_exists(data.get('mine_name'))\n mine = Mine(\n mine_name=data.get('mine_name'),\n mine_note=data.get('mine_note'),\n major_mine_ind=data.get('major_mine_ind'),\n mine_region=data.get('mine_region'),\n ohsc_ind=data.get('ohsc_ind'),\n union_ind=data.get('union_ind'),\n latitude=lat,\n longitude=lon,\n government_agency_type_code=data.get('government_agency_type_code'),\n exemption_fee_status_code=data.get('exemption_fee_status_code'),\n exemption_fee_status_note=data.get('exemption_fee_status_note'))\n\n mine_status = _mine_status_processor(data.get('mine_status'), data.get('status_date'), mine)\n mine.save()\n\n # Clear and rebuild the cache after committing changes to db\n if lat and lon:\n cache.delete(MINE_MAP_CACHE)\n MineMapResource.rebuild_map_cache_async()\n\n # generate & set hybrid_properties to include in response payload\n mine.init_on_load()\n return mine\n\n def apply_filter_and_search(self, args):\n sort_models = {'mine_name': 'Mine', 'mine_no': 'Mine', 'mine_region': 'Mine'}\n\n # Handle ListView request\n items_per_page = args.get('per_page', 25, type=int)\n page = args.get('page', 1, type=int)\n sort_field = args.get('sort_field', 'mine_name', type=str)\n sort_dir = args.get('sort_dir', 'asc', type=str)\n sort_model = sort_models.get(sort_field)\n search_term = args.get('search', None, type=str)\n\n # Filters to be applied\n commodity_filter_terms = args.getlist('commodity', type=str)\n status_filter_term = args.getlist('status', type=str)\n work_status_filter_term = args.getlist('work_status', type=str)\n tenure_filter_term = args.getlist('tenure', type=str)\n region_code_filter_term = args.getlist('region', type=str)\n major_mine_filter_term = args.get('major', None, type=str)\n tsf_filter_term = args.get('tsf', None, type=str)\n verified_only_term = args.get('verified', None, type=str)\n\n # Base query:\n mines_query = Mine.query\n\n # Filter by search_term if provided\n if search_term:\n search_term = search_term.strip()\n name_filter = Mine.mine_name.ilike('%{}%'.format(search_term))\n number_filter = Mine.mine_no.ilike('%{}%'.format(search_term))\n permit_filter = Permit.permit_no.ilike('%{}%'.format(search_term))\n mines_name_query = Mine.query.filter(name_filter | number_filter)\n\n permit_query = Mine.query.join(MinePermitXref).join(Permit).filter(\n permit_filter, Permit.deleted_ind == False, MinePermitXref.deleted_ind == False)\n mines_query = mines_name_query.union(permit_query)\n\n # Filter by Major Mine, if provided\n if major_mine_filter_term == \"true\" or major_mine_filter_term == \"false\":\n major_mine_filter = Mine.major_mine_ind.is_(major_mine_filter_term == \"true\")\n major_mine_query = Mine.query.filter(major_mine_filter)\n mines_query = mines_query.intersect(major_mine_query)\n\n # Filter by TSF, if provided\n if tsf_filter_term == \"true\" or tsf_filter_term == \"false\":\n tsf_filter = Mine.mine_tailings_storage_facilities != None if tsf_filter_term == \"true\" else \\\n Mine.mine_tailings_storage_facilities == None\n tsf_query = Mine.query.filter(tsf_filter)\n mines_query = mines_query.intersect(tsf_query)\n\n # Filter by region, if provided\n if region_code_filter_term:\n region_filter = Mine.mine_region.in_(region_code_filter_term)\n region_query = Mine.query.filter(region_filter)\n mines_query = mines_query.intersect(region_query)\n\n # Filter by commodity if provided\n if commodity_filter_terms:\n commodity_filter = MineTypeDetail.mine_commodity_code.in_(commodity_filter_terms)\n mine_type_active_filter = MineType.active_ind.is_(True)\n commodity_query = Mine.query \\\n .join(MineType) \\\n .join(MineTypeDetail) \\\n .filter(commodity_filter, mine_type_active_filter)\n mines_query = mines_query.intersect(commodity_query)\n\n # Create a filter on tenure if one is provided\n if tenure_filter_term:\n tenure_filter = MineType.mine_tenure_type_code.in_(tenure_filter_term)\n mine_type_active_filter = MineType.active_ind.is_(True)\n tenure_query = Mine.query \\\n .join(MineType) \\\n .filter(tenure_filter, mine_type_active_filter)\n mines_query = mines_query.intersect(tenure_query)\n\n # Create a filter on verified mine status\n if verified_only_term == \"true\" or verified_only_term == \"false\":\n verified_only_filter = MineVerifiedStatus.healthy_ind.is_(verified_only_term == \"true\")\n verified_only_query = Mine.query.join(MineVerifiedStatus).filter(verified_only_filter)\n mines_query = mines_query.intersect(verified_only_query)\n\n # Create a filter on mine status if one is provided\n if status_filter_term:\n status_filter = MineStatusXref.mine_operation_status_code.in_(status_filter_term)\n status_reason_filter = MineStatusXref.mine_operation_status_reason_code.in_(\n status_filter_term)\n status_subreason_filter = MineStatusXref.mine_operation_status_sub_reason_code.in_(\n status_filter_term)\n all_status_filter = status_filter | status_reason_filter | status_subreason_filter\n status_query = Mine.query \\\n .join(MineStatus) \\\n .join(MineStatusXref) \\\n .filter(all_status_filter, MineStatus.active_ind == True)\n mines_query = mines_query.intersect(status_query)\n\n if work_status_filter_term:\n work_status_query = Mine.query \\\n .filter(Mine.work_status.in_(work_status_filter_term))\n mines_query = mines_query.intersect(work_status_query)\n\n deleted_filter = [{'field': 'deleted_ind', 'op': '==', 'value': 'False'}]\n mines_query = apply_filters(mines_query, deleted_filter)\n\n # Apply sorting\n if sort_model and sort_field and sort_dir:\n sort_criteria = [{'model': sort_model, 'field': sort_field, 'direction': sort_dir}]\n mines_query = apply_sort(mines_query, sort_criteria)\n\n return apply_pagination(mines_query, page, items_per_page)\n\n\nclass MineResource(Resource, UserMixin):\n parser = reqparse.RequestParser()\n parser.add_argument(\n 'mine_name',\n type=str,\n help='Name of the mine.',\n trim=True,\n store_missing=False,\n location='json')\n parser.add_argument(\n 'mine_note',\n type=str,\n help='Any additional notes to be added to the mine.',\n trim=True,\n store_missing=False,\n location='json')\n parser.add_argument(\n 'longitude',\n type=lambda x: Decimal(x) if x else None,\n help='Longitude point for the mine.',\n store_missing=False,\n location='json')\n parser.add_argument(\n 'latitude',\n type=lambda x: Decimal(x) if x else None,\n help='Latitude point for the mine.',\n store_missing=False,\n location='json')\n parser.add_argument(\n 'mine_status',\n action='split',\n help=\n 'Status of the mine, to be given as a comma separated string value. Ex: status_code, status_reason_code, status_sub_reason_code ',\n store_missing=False,\n location='json')\n parser.add_argument(\n 'status_date', help='The date when the current status took effect', location='json')\n parser.add_argument(\n 'major_mine_ind',\n type=inputs.boolean,\n help='Indication if mine is major_mine_ind or regional. Accepts \"true\", \"false\", \"1\", \"0\".',\n store_missing=False,\n location='json')\n parser.add_argument(\n 'mine_region',\n type=str,\n help='Region for the mine.',\n trim=True,\n store_missing=False,\n location='json')\n parser.add_argument(\n 'ohsc_ind', type=bool, store_missing=False, help='Indicates if the mine has an OHSC.')\n parser.add_argument(\n 'union_ind', type=bool, store_missing=False, help='Indicates if the mine has a union.')\n parser.add_argument(\n 'exemption_fee_status_code',\n type=str,\n help='Fee exemption status for the mine.',\n store_missing=False,\n trim=True,\n location='json')\n parser.add_argument(\n 'exemption_fee_status_note',\n type=str,\n help='Fee exemption status note for the mine.',\n store_missing=False,\n trim=True,\n location='json')\n parser.add_argument(\n 'government_agency_type_code',\n type=str,\n help='Government agency the mine belongs to.',\n store_missing=False,\n trim=True,\n location='json')\n parser.add_argument(\n 'number_of_contractors',\n type=int,\n help='Number of contractors.',\n location='json')\n parser.add_argument(\n 'number_of_mine_employees',\n type=int,\n help='Number of mine employees.',\n location='json')\n\n @api.doc(description='Returns the specific mine from the mine_guid or mine_no provided.')\n @api.marshal_with(MINE_MODEL, code=200)\n @requires_any_of([VIEW_ALL, MINESPACE_PROPONENT])\n def get(self, mine_no_or_guid):\n\n mine = Mine.find_by_mine_no_or_guid(mine_no_or_guid)\n if not mine:\n raise NotFound('Mine not found.')\n\n return mine\n\n @api.expect(parser)\n @api.marshal_with(MINE_MODEL, code=200)\n @api.doc(description='Updates the specified mine.')\n @requires_any_of([MINE_EDIT, MINESPACE_PROPONENT])\n def put(self, mine_no_or_guid):\n mine = Mine.find_by_mine_no_or_guid(mine_no_or_guid)\n refresh_cache = False\n if not mine:\n raise NotFound(\"Mine not found.\")\n\n data = self.parser.parse_args()\n\n if is_minespace_user() is not True: \n lat = data.get('latitude')\n lon = data.get('longitude')\n if (lat and not lon) or (not lat and lon):\n raise BadRequest('latitude and longitude must both be empty, or both provided')\n\n # Mine Detail\n if 'mine_name' in data and mine.mine_name != data['mine_name']:\n _throw_error_if_mine_exists(data['mine_name'])\n mine.mine_name = data['mine_name']\n refresh_cache = True\n if 'mine_note' in data:\n mine.mine_note = data['mine_note']\n if 'major_mine_ind' in data:\n mine.major_mine_ind = data['major_mine_ind']\n if 'mine_region' in data:\n mine.mine_region = data['mine_region']\n if 'ohsc_ind' in data:\n mine.ohsc_ind = data['ohsc_ind']\n if 'union_ind' in data:\n mine.union_ind = data['union_ind']\n if 'latitude' in data and 'longitude' in data:\n mine.latitude = data['latitude']\n mine.longitude = data['longitude']\n refresh_cache = True\n if 'government_agency_type_code' in data:\n mine.government_agency_type_code = data.get('government_agency_type_code')\n if 'exemption_fee_status_code' in data:\n mine.exemption_fee_status_code = data.get('exemption_fee_status_code')\n if 'exemption_fee_status_note' in data:\n mine.exemption_fee_status_note = data.get('exemption_fee_status_note')\n if 'number_of_contractors' in data:\n mine.number_of_contractors = data.get('number_of_contractors')\n if 'number_of_mine_employees' in data:\n mine.number_of_mine_employees = data.get('number_of_mine_employees')\n\n mine.save()\n\n if 'mine_status' in data:\n _mine_status_processor(data.get('mine_status'), data.get('status_date'), mine)\n\n # refresh cache will need to be called for all supported fields, should more be added in the future\n if refresh_cache:\n cache.delete(MINE_MAP_CACHE)\n MineMapResource.rebuild_map_cache_async()\n\n return mine\n\n\nclass MineListSearch(Resource):\n @api.doc(\n params={\n 'name': 'Search term in mine name.',\n 'term': 'Search term in mine name, mine number, and permit.'\n })\n @requires_any_of([VIEW_ALL, MINESPACE_PROPONENT])\n @api.marshal_with(MINE_SEARCH_MODEL, code=200, envelope='mines')\n def get(self):\n name_search = request.args.get('name')\n search_term = request.args.get('term')\n major = None\n if 'major' in request.args:\n major = request.args.get('major')\n\n if search_term:\n result = Mine.find_by_name_no_permit(search_term, major=major)\n else:\n result = Mine.find_by_mine_name(name_search, major=major)\n\n return result\n\n\n# Functions shared by the MineListResource and the MineResource\ndef _mine_operation_code_processor(mine_status, index):\n try:\n return mine_status[index].strip()\n except IndexError:\n return None\n\n\ndef _mine_status_processor(mine_status, status_date, mine):\n if not mine_status:\n existing_status_date = mine.mine_status[0].status_date if mine.mine_status else None\n if status_date == existing_status_date:\n return mine.mine_status\n\n new_status = MineStatus(status_date=status_date)\n mine.mine_status.append(new_status)\n new_status.save()\n mine.save(commit=False)\n return new_status\n mine_status_xref = MineStatusXref.find_by_codes(\n _mine_operation_code_processor(mine_status, 0),\n _mine_operation_code_processor(mine_status, 1),\n _mine_operation_code_processor(mine_status, 2))\n if not mine_status_xref:\n raise BadRequest('Invalid status_code, reason_code, and sub_reason_code combination.')\n existing_status = mine.mine_status[0] if mine.mine_status else None\n if existing_status:\n if existing_status.mine_status_xref_guid == mine_status_xref.mine_status_xref_guid \\\n and str(status_date) == str(existing_status.status_date):\n return existing_status\n\n existing_status.expiry_date = datetime.today()\n existing_status.active_ind = False\n existing_status.save()\n if status_date == '':\n new_status = MineStatus(mine_status_xref_guid=mine_status_xref.mine_status_xref_guid)\n else:\n new_status = MineStatus(\n mine_status_xref_guid=mine_status_xref.mine_status_xref_guid, status_date=status_date)\n mine.mine_status.append(new_status)\n new_status.save()\n mine.save(commit=False)\n return new_status\n\n\ndef _throw_error_if_mine_exists(mine_name):\n # query the mine tables and check if that mine name exists\n if mine_name:\n name_filter = Mine.mine_name.ilike(mine_name.strip())\n mines_name_query = Mine.query.filter(name_filter).filter_by(deleted_ind=False)\n mines_with_name = mines_name_query.all()\n if len(mines_with_name) > 0:\n raise BadRequest(f'Mine No: {mines_with_name[0].mine_no} already has that name.')\n","sub_path":"services/core-api/app/api/mines/mine/resources/mine.py","file_name":"mine.py","file_ext":"py","file_size_in_byte":21426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"312332086","text":"'''\nResposta em Python\nGCC218 - 2019/02\nAtividade: Implementacao 2 (Exercicio1)\nGrupo:\n Lucas Neves, 14A, 201720357\n Davi Horner, 10A, 201720368\n Thiago Luigi, 10A, 201720364\nData: 20/09/2019 \n'''\n\nfrom collections import deque\n\n# Classe que representa o vertice\nclass Vertice:\n def __init__(self, i):\n # Indice do vertice\n self.indice = i\n self.cor = -1\n \n def get_indice(self):\n return self.indice\n\n def get_cor(self):\n return self.cor\n \n def set_cor(self, nova_cor):\n self.cor = nova_cor\n\n# Classe que representa o grafo\nclass Grafo:\n # n eh a quantidade de vertices\n def __init__(self, n):\n # Instancia a lista de adjacencia\n self.lista_adjacencia = []\n for i in range(n):\n self.lista_adjacencia.append([])\n # Define a quantidade de vertices do grafo\n self.quantidade_vertices = n\n # Instancia a lista de vertices\n self.lista_vertices = []\n for i in range(n):\n self.lista_vertices.append(Vertice(i))\n \n def get_quantidade_vertices(self):\n return self.quantidade_vertices\n\n def get_lista_adjacencia(self):\n return self.lista_adjacencia\n \n def get_lista_vertices(self):\n return self.lista_vertices\n\n# A cor 0 eh azul e a cor 1 eh rosa\ndef colore(grafo, vertice_inicial):\n grafo.get_lista_vertices()[vertice_inicial].set_cor(0)\n\n fila = deque()\n \n fila.append(vertice_inicial)\n\n while len(fila) > 0:\n u = fila.popleft()\n\n i = 0\n for vertice_adjacente in grafo.get_lista_adjacencia()[u]:\n v = grafo.get_lista_adjacencia()[u][i]\n\n if grafo.get_lista_vertices()[v].get_cor() == -1:\n # Isso faz com que a cor de v seja oposta da cor de u\n grafo.get_lista_vertices()[v].set_cor(1 - grafo.get_lista_vertices()[u].get_cor())\n\n #Adiciona o v na lista da BFS\n fila.append(v)\n \n i = i + 1\n\ndef checa_bipardido(grafo):\n for vertice in range(grafo.get_quantidade_vertices()):\n if grafo.get_lista_vertices()[vertice].get_cor() == -1:\n colore(grafo, vertice)\n\n for vertice in range(grafo.get_quantidade_vertices()):\n for adjacente in grafo.get_lista_adjacencia()[vertice]:\n if grafo.get_lista_vertices()[vertice].get_cor() == grafo.get_lista_vertices()[adjacente].get_cor():\n # Se dois vizinhos possuirem a mesma coisa, o grafo nao eh bipartido\n return False\n \n # Se nada estiver errado, o grafo eh bipartido\n return True\n\ndef leitura_arquivo(nome_arquivo):\n arquivo = open(nome_arquivo, \"r\")\n\n numero_vertices = int(arquivo.readline())\n numero_arestas = int(arquivo.readline())\n \n grafo = Grafo(numero_vertices)\n \n for i in range(numero_arestas):\n linha = arquivo.readline()\n valores = linha.split()\n grafo.get_lista_adjacencia()[int(valores[0])].append(int(valores[1]))\n \n return grafo\n\ndef executa(nome_arquivo):\n grafo = leitura_arquivo(nome_arquivo)\n if checa_bipardido(grafo):\n print(\"SIM\")\n else:\n print(\"NAO\")\n\n# Funcao principal\ndef main():\n executa(\"grafo1.txt\")\n executa(\"grafo2.txt\")\n executa(\"grafo3.txt\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"exercicios/atividade2/exercicio1.py","file_name":"exercicio1.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"641405021","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n@Author: yangwenhao\n@Contact: 874681044@qq.com\n@Software: PyCharm\n@File: compute_vad.py\n@Time: 2019/9/20 上午10:57\n@Overview: Implement VAD using python from kaldi.\n\nlibrosa package load wav data with float32, while in kaldi and scipy.io, it should be int16!!!\n\n\"\"\"\nimport argparse\nimport numpy as np\n# import librosa\n# from python_speech_features import fbank, delta, mfcc\nimport Process_Data.constants as c\n\n# parser = argparse.ArgumentParser(description='PyTorch Speaker Recognition')\n# parser.add_argument('--vad-energy-threshold', type=float, default=5.5, metavar='E',\n# help='number of epochs to train (default: 10)')\n# parser.add_argument('--vad-energy-mean-scale', type=float, default=0.5, metavar='E',\n# help='number of epochs to train (default: 10)')\n# parser.add_argument('--vad-proportion-threshold', type=float, default=0.12, metavar='E',\n# help='number of epochs to train (default: 10)')\n# parser.add_argument('--vad-frames-context', type=int, default=2, metavar='E',\n# help='number of epochs to train (default: 10)')\n# opts = parser.parse_args()\n\ndef ComputeVadEnergy(feats,\n energy_threshold=c.VAD_ENERGY_THRESHOLD,\n energy_mean_scale=c.VAD_ENERGY_MEAN_SCALE,\n frames_context=c.VAD_FRAMES_CONTEXT,\n proportion_threshold=c.VAD_PROPORTION_THRESHOLD):\n\n output_voiced = []\n T = len(feats)\n # output_voiced->Resize(T);\n\n if (T == 0):\n print(\"Empty features\")\n return\n\n # column zero is log - energy.\n log_energy = feats[:, 0]\n\n # CopyColFromMat(feats, 0); // column zero is log-energy.\n # energy_threshold = opts.vad_energy_threshold\n\n # if (opts.vad_energy_mean_scale != 0.0):\n # assert (opts.vad_energy_mean_scale > 0.0)\n # energy_threshold += opts.vad_energy_mean_scale * np.sum(log_energy) / T\n if ( energy_mean_scale != 0.0):\n assert(energy_mean_scale > 0.0)\n energy_threshold += energy_mean_scale * np.sum(log_energy) / T\n\n # assert (opts.vad_frames_context >= 0);\n # assert (opts.vad_proportion_threshold > 0.0 and opts.vad_proportion_threshold < 1.0)\n assert (frames_context >= 0)\n assert (proportion_threshold > 0.0 and proportion_threshold < 1.0)\n\n for t in range(0, T):\n # log_energy_data = log_energy[:][0]\n num_count = 0\n den_count = 0\n # context = opts.vad_frames_context\n\n for t2 in range(t-frames_context-1, t + frames_context):\n if (t2 >= 0 and t2 < T):\n den_count+=1\n\n if (log_energy[t2] > energy_threshold):\n num_count+=1\n\n # if (num_count >= den_count * opts.vad_proportion_threshold):\n if (num_count >= den_count * proportion_threshold):\n output_voiced.append(1.0)\n else:\n output_voiced.append(0.0)\n\n return output_voiced\n\n\n# fbank = np.load('Data/dataset/enroll/id10270/5r0dWxy17C8/00001.npy')\n#\n# audio, sr = librosa.load('Data/dataset/enroll/id10270/5r0dWxy17C8/00001.wav', sr=16000, mono=True)\n#\n# from scipy.io import wavfile\n# sample_rate, samples = wavfile.read('Data/dataset/enroll/id10270/5r0dWxy17C8/00001.wav')\n#\n# mfcc1 = mfcc(audio, samplerate=16000, numcep=30, winlen=0.025)\n# mfcc2 = mfcc(samples, samplerate=16000, numcep=30, winlen=0.025)\n#\n# voice1 = []\n# ComputeVadEnergy(mfcc1, voice1)\n#\n# voice2 = []\n# ComputeVadEnergy(mfcc2, voice2)\n\n# print(voice)","sub_path":"Process_Data/Compute_Feat/compute_vad.py","file_name":"compute_vad.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"100159432","text":"c = get_config()\n\nc.TerminalIPythonApp.display_banner = False\nc.TerminalInteractiveShell.confirm_exit = False\n\n\ndef hexon_ipython():\n \"\"\"Enable hex output formatting.\"\"\"\n formatter = get_ipython().display_formatter.formatters['text/plain']\n formatter.for_type(int, lambda n, p, cycle: p.text(\"0x%x\" % n))\n\n\ndef hexoff_ipython():\n \"\"\"Disable hex output formatting.\"\"\"\n formatter = get_ipython().display_formatter.formatters['text/plain']\n formatter.for_type(int, lambda n, p, cycle: p.text(\"%d\" % n))\n\n\nhexon = hexon_ipython\nhexoff = hexoff_ipython\n\n","sub_path":"python/.ipython/profile_default/ipython_config.py","file_name":"ipython_config.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"463818144","text":"#! /usr/bin/env python\n# -*- coding:utf-8 -*-\n# __author__ = HaiShenMing\n# 2017/2/9 16:07\n\nfrom socket import *\nimport time\n\nip_port = ('10.104.70.145', 8000)\n\nupd_server = socket(AF_INET, SOCK_DGRAM)\n\nupd_server.bind(ip_port)\n\nwhile True:\n print('等待接收数据...')\n data, addr = upd_server.recvfrom(1024)\n print('接收数据成功',data,addr)\n if not data:\n fmt = '%Y-%m-%d %X'\n else:\n fmt = data.decode('utf-8')\n\n print('正在获取时间...')\n upd_server.sendto('正在获取时间...'.encode('utf-8'),addr)\n re_data = time.strftime(fmt)\n print('正在发送时间...')\n upd_server.sendto('正在发送时间...'.encode('utf-8'), addr)\n upd_server.sendto(re_data.encode('utf-8'), addr)\n\nupd_server.close()","sub_path":"m5/d30/UDP_server.py","file_name":"UDP_server.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"69097262","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('social', '0037_auto_20150812_1130'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='notification',\n name='notif_type',\n field=models.CharField(choices=[('Wall Post', 'Wall Post'), ('Comment', 'Comment')], max_length=10, default='Wall Post'),\n ),\n ]\n","sub_path":"social/migrations/0038_notification_notif_type.py","file_name":"0038_notification_notif_type.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"623283485","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 2 14:29:41 2018\n\n@author: Patrick Lawlor \n\nMonitoring and validating tool for MS Access DB - compare source data CSV to relevant SQL \ninstruction output to ensure congruence. This procedure generates a CSV file output containing \ndiscrepancies at the show/agent/ticket price level, for further investigation by the \ndatabase administrator.\n\n\"\"\"\n\nimport pandas as pd\nimport pyodbc\nimport datetime\n\n#to do - implement graphical user interface to enable file selection at runtime (tkinter?)\n\n#get source data filepath\nfile_path1 = r'C:\\Users\\trevo\\Dropbox\\My projects\\git\\EOS database\\\\'\nfile_path2 = r'M:\\2018 FINANCE FOLDER\\Transaction Processing\\Ticketing\\DB\\Admin\\130818\\\\'\ncsv_date = '110818.csv'\n\n# DB_path1 = (\n# r'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};'\n# r'DBQ=C:\\Users\\trevo\\Dropbox\\My projects\\EOS database Access\\TicketingDB.accdb;'\n# )\n\n\n#DB filepath\nDB_path2 = (\n r'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};'\n r'DBQ=M:\\2018 FINANCE FOLDER\\Transaction Processing\\Ticketing\\DB\\TicketingDB.accdb;'\n )\n\nfile_path = file_path2\nconn_str = DB_path2\n\n\n# Function: Strip apostrophes from input - all apostrophes are removed from data on DB update\ndef stripAposts(inputStr):\n strList = list(str(inputStr))\n while \"'\" in strList:\n strList.remove(\"'\")\n newStr = \"\".join(strList)\n return newStr\n\n# Function: Convert datetime to date type with no time element\ndef dtConvert(inputDate):\n return datetime.date(inputDate.year, inputDate.month, inputDate.day)\n\n# Function: Convert string date to date object\ndef dtStringConvert(inputString):\n try:\n dt = datetime.datetime.strptime(inputString, '%d/%m/%Y')\n return dtConvert(dt)\n except:\n return inputString\n\n#initialise pyodbc DB Connection \ncon = pyodbc.connect(conn_str)\nrs = con.cursor()\n\n\n#output figures for inactive shows and total shows onscreen for validating final figures\n# 'Eos_Inactive = True' records have dropped out of EOS Financial Summary report (over 2 years old)\ninactive = list(rs.execute(\"SELECT SUM(Sales_Incr), SUM(Gross_Revenue_Incr), SUM(Total_Allocation_Incr) \" \n \"FROM Shows INNER JOIN Ticketing ON Shows.Show_ID = Ticketing.Show_ID \"\n \"WHERE Eos_Inactive = True\"))\n\ntotal = list(rs.execute(\"SELECT SUM(Sales_Incr), SUM(Gross_Revenue_Incr), SUM(Total_Allocation_Incr) \"\n \"FROM Shows INNER JOIN Ticketing ON Shows.Show_ID = Ticketing.Show_ID \"))\n\nInactiveTickets, InactiveRevenue, InactiveAllocation = inactive[0]\nTotalTickets, TotalRevenue, TotalAllocation = total[0] \n\n\n# Query aggregate ticketing and revenue figures grouped by artist, show/venue, \n#agent and ticket price into DataFrame\n\nSQLjoin = (\n \"((((Shows INNER JOIN Artists ON Shows.Artist_ID = Artists.Artist_ID) \"\n \"INNER JOIN Venues ON Shows.Venue_ID = Venues.Venue_ID) \"\n \"INNER JOIN Ticketing ON Shows.Show_ID = Ticketing.Show_ID) \"\n \"INNER JOIN Agents ON Agents.Agent_ID = Ticketing.Agent_ID) \"\n )\n\nSQL = list(rs.execute(\"SELECT Artist_Master, Show_Date, Venue_Master, Agents_Holds_Master, Ticket_Price, \"\n \"SUM(Sales_Incr), SUM(Gross_Revenue_Incr), SUM(Total_Allocation_Incr) \"\n \"FROM \" + SQLjoin + \" \"\n \"WHERE Eos_Inactive <> True \"\n \"GROUP BY Artist_Master, Show_Date, Venue_Master, Agents_Holds_Master, Ticket_Price\"))\n\npdSQL = pd.DataFrame.from_records(SQL, columns=['Artist_Master', 'Show_Date', 'Venue_Master', \n 'Agents_Holds_Master', 'Ticket_Price', 'Total Sales', \n 'Total Gross Revenue', 'Total Allocation']).fillna(0)\n\n#format Show Date to Date object and standardise upper/lower case of Agent \npdSQL['Show_Date'] = pdSQL['Show_Date'].apply(dtConvert)\npdSQL['Agents_Holds_Master'] = pdSQL['Agents_Holds_Master'].apply(lambda x: x.title())\npdSQL['Artist_Master'] = pdSQL['Artist_Master'].apply(lambda x: x.strip())\n\n#aggregate as sum by Artist/Show Date/Venue/Agent/Ticket Price\npdSQL2 = pdSQL.groupby(['Artist_Master','Show_Date', 'Venue_Master', 'Agents_Holds_Master', 'Ticket_Price']).sum()\n \n# Venue names in source data need to be remapped onto standardised DB entries\nVenueRemap = list(rs.execute(\"SELECT Venue, Venue_Master FROM Venues INNER JOIN VenueAlt ON Venues.Venue_ID = VenueAlt.Venue_ID\"))\nVenueRemap = {x: y for (x,y) in VenueRemap}\n\n# Bring in source data from csv\ncsvSource = pd.read_csv(file_path + csv_date, header = 5, encoding='ISO-8859-1')\n\n#clean/standardise csv data - merge Ticket Agent and Holds Description fields into one, apply Venue remap,\n# apply apostrophe strip function to relevant string fields, replace N/A's with 0, convert dates to Date object\ncsvSource.dropna(how='all')\ncsvSource['Artist'] = csvSource['Artist'].apply(stripAposts).apply(lambda x: x.strip())\ncsvSource['Ticket Agent'] = csvSource['Ticket Agent'].fillna(csvSource['Holds Description'])\ncsvSource['Ticket Agent'] = csvSource['Ticket Agent'].apply(stripAposts).apply(lambda x: x.title())\ncsvSource['Venue'] = csvSource['Venue'].apply(stripAposts)\ncsvSource['Venue'] = csvSource['Venue'].map(VenueRemap)\ncsvSource.fillna(0)\ncsvSource['Show Date'] = csvSource['Show Date'].apply(str).apply(dtStringConvert)\n\n#extract/rename/aggregate required columns from csv same as for DB output\ncsvSource2 = pd.DataFrame(csvSource[['Artist', 'Show Date', 'Venue', 'Ticket Agent', 'Ticket Price', 'Sales', 'Gross Revenue', 'Total Allocation']])\ncsvSource2.columns=['Artist_Master','Show_Date','Venue_Master','Agents_Holds_Master','Ticket_Price','Total Sales', 'Total Gross Revenue', 'Total Allocation']\ncsvSource2 = csvSource2.groupby(['Artist_Master','Show_Date','Venue_Master','Agents_Holds_Master','Ticket_Price']).sum()\n\n#get sum totals of Total Sales, Gross Revenue and Total Allocation from source data for laer\nCSVTickets = csvSource2['Total Sales'].sum()\nCSVGrossRevenue = csvSource2['Total Gross Revenue'].sum()\nCSVTotalAllocation = csvSource2['Total Allocation'].sum()\n\n\n#output csv of DB minus Source Data matching by Artist/Date/Venue/Agent/Price\n# with nulls replaced with 0, values rounded and output restricted to just rows with differences\nrec = csvSource2.sub(pdSQL2, fill_value=0)\nrec = rec.fillna(0)\nrec['Total Sales'] = rec['Total Sales'].apply(lambda x: round(x, 0))\nrec['Total Gross Revenue'] = rec['Total Gross Revenue'].apply(lambda x: round(x, 2))\nrec['Total Allocation'] = rec['Total Allocation'].apply(lambda x: round(x, 0))\n\nrec['Output'] = (rec['Total Sales'] != 0) | (rec['Total Gross Revenue'] != 0) | (rec['Total Allocation'] != 0)\nrec = rec[rec['Output'] == True]\n\n#get sum totals of Total Sales, Gross Revenue and Total Allocation from output data\nrecTickets = rec['Total Sales'].sum()\nrecGrossRevenue = rec['Total Gross Revenue'].sum()\nrecTotalAllocation = rec['Total Allocation'].sum()\n\n#print extracted totals from earlier with input CSV totals/output CSV totals and cross-check\nprint(\"\\nInput CSV: \")\nprint(\"\\tTickets:\\t\" + str(round(CSVTickets, 0)))\nprint(\"\\tRevenue:\\t\" + str(round(CSVGrossRevenue, 2)))\nprint(\"\\tAllocation:\\t\" + str(round(CSVTotalAllocation, 2)))\nprint(\"\\nInactive: \")\nprint(\"\\tTickets:\\t\" + str(round(InactiveTickets, 0)))\nprint(\"\\tRevenue:\\t\" + str(round(InactiveRevenue, 2)))\nprint(\"\\tAllocation:\\t\" + str(round(InactiveAllocation, 0)))\nprint(\"\\nOutput CSV: \")\nprint(\"\\tTickets:\\t\" + str(round(recTickets, 0)))\nprint(\"\\tRevenue:\\t\" + str(round(recGrossRevenue, 2)))\nprint(\"\\tAllocation:\\t\" + str(round(recTotalAllocation, 0)))\nprint(\"\\nCalculated Total:\")\nprint(\"\\tTickets:\\t\" + str(round(CSVTickets + InactiveTickets - recTickets, 0)))\nprint(\"\\tRevenue:\\t\" + str(round(CSVGrossRevenue + InactiveRevenue - recGrossRevenue, 2)))\nprint(\"\\tAllocation:\\t\" + str(round(CSVTotalAllocation + InactiveAllocation - recTotalAllocation, 0)))\nprint(\"\\nActual Total: \")\nprint(\"\\tTickets:\\t\" + str(round(TotalTickets, 0)))\nprint(\"\\tRevenue:\\t\" + str(round(TotalRevenue, 2)))\nprint(\"\\tAllocation:\\t\" + str(round(TotalAllocation, 0)))\nprint(\"\\nDifference:\")\nprint(\"\\tTickets:\\t\" + str(round(CSVTickets + InactiveTickets - recTickets - TotalTickets, 0)))\nprint(\"\\tRevenue:\\t\" + str(round(CSVGrossRevenue + InactiveRevenue - recGrossRevenue - TotalRevenue, 2)))\nprint(\"\\tAllocation:\\t\" + str(round(CSVTotalAllocation + InactiveAllocation - recTotalAllocation - TotalAllocation, 0)))\n\nrec.to_csv(file_path + 'rec_out.csv')\n\ncon.close()\n\n\n\n","sub_path":"EOS_DB_validate.py","file_name":"EOS_DB_validate.py","file_ext":"py","file_size_in_byte":8636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"493560331","text":"# Time Complexity: O(N*M) where N and M are rows and columns of the matrix\n# Space Complexity: O(1)\n# Did this code successfully run on Leetcode : Yes\n# Three line explanation of solution in plain english: traverse through the matrix using direction upwards\n# and downwards. Move along, making sure that the next element encountered is within array bounds. After traversing\n# one direction flip the direction.\n\nclass Solution:\n\n def findDiagonalOrder(self, matrix):\n\n # edge case - empty array\n if not matrix or not matrix[0]:\n return []\n\n # The dimensions of the matrix\n i = len(matrix)\n j = len(matrix[0])\n\n row = 0\n column = 0\n # to understand the direction of the traversal\n direction = 1\n result_array = []\n\n while row < i and column < j:\n # add current matrix\n result_array.append(matrix[row][column])\n\n new_row = row + (-1 if direction == 1 else 1)\n new_column = column + (1 if direction == 1 else -1)\n\n # if next element is within bounds\n if new_row < 0 or new_row == i or new_column < 0 or new_column == j:\n\n # check if direction is 1\n if direction:\n\n row += (column == j - 1)\n column += (column < j - 1)\n else:\n column += (row == i - 1)\n row += (row < i - 1)\n\n # Change the direction of traversal\n direction = 1 - direction\n else:\n row = new_row\n column = new_column\n\n return result_array\n\n\nr = Solution()\ninput_array = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n]\nprint(r.findDiagonalOrder(input_array))\n","sub_path":"arrayDiagonalTraverse.py","file_name":"arrayDiagonalTraverse.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"429300558","text":"\"\"\" =================================================================\nFile: SentimentDetector.py\nThis file contains code for the SentimentDetector class that can detect\nsentiment from text input using TextBlob.\nAuthors: Anh Nguyen, Lily Irvin, Ryan Specht\n ===================================================================\"\"\"\n\nfrom textblob import TextBlob\n\n\nclass SentimentDetector:\n \"\"\"Represents a sentiment detector object\"\"\"\n\n def getSentiment(self, sentence):\n \"\"\"Detects the sentiment of a sentence using TextBlob\"\"\"\n txt = TextBlob(sentence)\n sentiment = \"\"\n polarity = txt.sentiment.polarity\n\n # The 5 sentiments available are very positive, somewhat positive,\n # neutral, somewhat negative, and very negative\n if -0.2 <= polarity <= 0.2:\n sentiment = \"neutral\"\n elif 0.2 < polarity <= 0.6:\n sentiment = \"somewhat positive\"\n elif 0.6 < polarity <= 1.0:\n sentiment = \"very positive\"\n elif -0.6 <= polarity < -0.2:\n sentiment = \"somewhat negative\"\n elif -1 <= polarity < -0.6:\n sentiment = \"very negative\"\n\n return sentiment\n","sub_path":"SentimentDetector.py","file_name":"SentimentDetector.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"595987218","text":"import unittest\nfrom constants import CONDITIONS_PATH\nfrom cond.template_loader import (\n TemplateLoader,\n TypeTemplateLoader,\n StrHeader,\n IntHeader,\n ListHeader,\n)\n\n\nclass TestTemplateLoader(unittest.TestCase):\n\n def setUp(self):\n self.e = [\n 'number_code',\n 'title',\n 'stage',\n 'calc_type',\n 'activation_flags',\n 'shc_places',\n 'substates',\n ]\n self.t = TemplateLoader(CONDITIONS_PATH)\n self.t.add_headers(self.e)\n\n def tearDown(self):\n self.t.clear_headers()\n\n def test_add_header(self):\n t = TemplateLoader(CONDITIONS_PATH)\n self.assertFalse(t.headers)\n t.add_header('number_code')\n self.assertListEqual(['number_code'], t.get_headers())\n\n def test_add_headers(self):\n t = TemplateLoader(CONDITIONS_PATH)\n self.assertFalse(t.headers)\n t.add_headers(self.e)\n self.assertListEqual(self.e, t.headers)\n\n def test_load(self):\n self.assertTrue(self.t)\n self.assertTrue(self.t.wb)\n self.assertTrue(self.t.ws_conditions)\n\n def test_get_raw_headers(self):\n self.assertListEqual(self.e, self.t.get_raw_headers())\n\n def test_is_valid_headers(self):\n self.assertTrue(self.t.is_valid_headers())\n self.t.headers = ['XXX', 'YYY']\n self.assertEqual(self.e, self.t.get_raw_headers())\n self.assertFalse(self.t.is_valid_headers())\n\n\nclass TestTypeTemplateLoader(unittest.TestCase):\n\n def setUp(self):\n self.loader = TypeTemplateLoader(CONDITIONS_PATH)\n\n def tearDown(self):\n self.loader.clear_headers()\n\n def test_str_header(self):\n number_code = StrHeader('number_code')\n self.assertEqual('T1_000', number_code.get_value('T1_000'))\n\n self.loader.add_header(number_code)\n self.assertDictEqual({\n 'number_code': 'T1_000',\n }, self.loader.get_row_dict(('T1_000', )))\n\n def test_int_header(self):\n stage = IntHeader('stage')\n self.assertEqual(1, stage.get_value('1'))\n self.assertEqual(0, stage.get_value('_23_'))\n\n self.loader.add_header(stage)\n self.assertEquals({\n 'stage': 1\n }, self.loader.get_row_dict(('1', )))\n\n def test_list_header(self):\n expected_list = [\n 'tab_buses',\n 'single'\n ]\n activation_flags = ListHeader('activation_flags')\n self.assertEqual(expected_list, activation_flags.get_value('tab_buses, single'))\n self.assertEqual(expected_list, activation_flags.get_value('\\ttab_buses, \\nsingle\\n'))\n self.assertEqual([], activation_flags.get_value(''))\n\n self.loader.add_header(activation_flags)\n self.assertEquals({\n 'activation_flags': [\n 'tab_buses',\n 'single'\n ]\n }, self.loader.get_row_dict(('tab_buses, single', )))\n\n def test_construct_template(self):\n self.loader.add_headers([\n StrHeader('number_code'),\n StrHeader('title'),\n IntHeader('stage'),\n StrHeader('calc_type'),\n ListHeader('activation_flags'),\n ListHeader('shc_places'),\n StrHeader('substates'),\n ])\n\n expected = {\n 'number_code': 'D1_000',\n 'title': 'Отстройка 1',\n 'stage': 1,\n 'calc_type': 'detuning',\n 'activation_flags': [\n 'tab_buses',\n 'single',\n ],\n 'shc_places': [\n 'tab_buses',\n 'single',\n ],\n 'substates': 'empty',\n }\n\n self.assertDictEqual(expected, self.loader.load_template(1))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"tests/cond/test_template_loader.py","file_name":"test_template_loader.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"419689022","text":"from os import getcwd, listdir, path, remove\nfrom re import split as resplit\n\nfrom cv2 import (COLOR_BGR2GRAY, TM_CCOEFF_NORMED, Canny, copyTo, cvtColor,\n fillConvexPoly, imdecode, imshow, matchTemplate, minMaxLoc, resize, imshow, waitKey)\nfrom cv2 import split as cvsplit\nfrom cv2 import waitKey\nfrom numpy import array, fromfile, zeros, ndarray\n\n\ndef imreadCH(filename):\n if isinstance(filename, str):\n return imdecode(fromfile(filename,dtype=\"uint8\"),-1)\n elif isinstance(filename, ndarray):\n return filename\n\ndef find_template(im_source, im_search, threshold=0.5, rgb=False, bgremove=False):\n '''\n Locate image position with cv2.templateFind\n\n Use pixel match to find pictures.\n\n Args:\n im_source(string): 图像、素材\n im_search(string): 需要查找的图片\n threshold: 阈值,当相识度小于该阈值的时候,就忽略掉\n\n Returns:\n A tuple of found [(point, score), ...]\n\n Raises:\n IOError: when file read error\n '''\n #本函数来自于 https://github.com/NetEaseGame/aircv ,做了一定修改\n\n method = TM_CCOEFF_NORMED\n\n if rgb:\n s_bgr = cvsplit(im_search) # Blue Green Red\n i_bgr = cvsplit(im_source)\n weight = (0.3, 0.3, 0.4)\n resbgr = [0, 0, 0]\n for i in range(3): # bgr\n resbgr[i] = matchTemplate(i_bgr[i], s_bgr[i], method)\n res = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]\n else:\n s_gray = cvtColor(im_search, COLOR_BGR2GRAY)\n i_gray = cvtColor(im_source, COLOR_BGR2GRAY)\n # 边界提取(来实现背景去除的功能)\n if bgremove:\n s_gray = Canny(s_gray, 100, 200)\n i_gray = Canny(i_gray, 100, 200)\n\n res = matchTemplate(i_gray, s_gray, method)\n w, h = im_search.shape[1], im_search.shape[0]\n\n min_val, max_val, min_loc, max_loc = minMaxLoc(res)\n top_left = max_loc\n if max_val < threshold:\n return None\n # calculator middle point\n middle_point = (top_left[0]+w/2, top_left[1]+h/2)\n result = dict(\n result=middle_point,\n rectangle=(top_left, (top_left[0], top_left[1] + h), (top_left[0] + w, top_left[1]), (top_left[0] + w, top_left[1] + h)),\n confidence=max_val)\n return result\n\ndef picRead(pics):\n temp = []\n tempDict = dict()\n if isinstance(pics,list):\n for eachPic in pics:\n tempDict = dict()\n tempDict['pic'] = imreadCH(eachPic)\n tempDict['obj'] = resplit(r'[\\\\ /]', eachPic)[-1]\n temp.append(tempDict)\n return temp\n else:\n tempDict['pic'] = imreadCH(pics)\n tempDict['obj'] = resplit(r'[\\\\ /]', pics)[-1]\n return tempDict\n\ndef matchImg_roi(imgsrc, imgobj, roi, confidencevalue=0.8,targetSize=(1440, 810)):\n '返回roi中的位置'\n if isinstance(imgsrc,str):\n imsrc = imreadCH(imgsrc)\n else:\n imsrc = imgsrc\n\n if targetSize != (0,0):\n imsrc = resize(imsrc, targetSize)\n\n x0 = int(roi[0])\n x1 = int(roi[0] + roi[2])\n y0 = int(roi[1])\n y1 = int(roi[1] + roi[3])\n\n if x0 > imsrc.shape[1] or y0 > imsrc.shape[0]:\n return None\n if x1 > imsrc.shape[1]:\n x1 = imsrc.shape[1]\n if y1 > imsrc.shape[0]:\n y1 = imsrc.shape[0]\n\n imsrc = imsrc[y0:y1, x0:x1]\n return matchImg(imsrc, imgobj, confidencevalue, targetSize = (0, 0))\n\ndef matchMultiImg_roi(imgsrc, imgobj, roi, confidencevalue=0.8, targetSize=(1440, 810)):\n '返回真实位置'\n if isinstance(imgsrc,str):\n imsrc = imreadCH(imgsrc)\n else:\n imsrc = imgsrc\n\n if targetSize != (0,0):\n imsrc = resize(imsrc, targetSize)\n\n x0 = int(roi[0])\n x1 = int(roi[0] + roi[2])\n y0 = int(roi[1])\n y1 = int(roi[1] + roi[3])\n\n if x0 > imsrc.shape[1] or y0 > imsrc.shape[0]:\n return None\n if x1 > imsrc.shape[1]:\n x1 = imsrc.shape[1]\n if y1 > imsrc.shape[0]:\n y1 = imsrc.shape[0]\n\n imsrc = imsrc[y0:y1, x0:x1]\n ans = matchMultiImg(imsrc, imgobj, confidencevalue = confidencevalue, isResize = False)\n ansReal = None\n if ans != None:\n ans = ans[0]\n if ans != None:\n ansReal = []\n for eachPos in ans:\n ansReal.append([eachPos[0] + x0, eachPos[1] + y0])\n return ansReal\n\ndef matchImg(imgsrc,imgobj,confidencevalue=0.8,targetSize=(1440, 810)): #imgsrc=原始图像,imgobj=待查找的图片\n '用于查找原始图片中的单一目标图片,如果原始图片中可找到多个目标图片,则随机返回一个匹配的结果,返回值为一个字典'\n try:\n if isinstance(imgsrc,str):\n imsrc = imreadCH(imgsrc)\n else:\n imsrc = imgsrc\n except RuntimeError:\n return None\n #imobj = imread(imgobj)\n if isinstance(imgobj,str):\n imobj = imreadCH(imgobj)\n elif isinstance(imgobj, dict):\n imobj = imgobj['pic'] #现在此情况传入的一定是字典\n else:\n imobj = imgobj\n\n if targetSize != (0,0):\n imsrc = resize(imsrc, targetSize)\n\n if isinstance(confidencevalue, list):\n for i in confidencevalue:\n match_result = find_template(imsrc,imobj,i)\n if match_result != None:\n break\n else:\n match_result = find_template(imsrc,imobj,confidencevalue)\n #match_result = None\n if match_result != None:\n if isinstance(imgobj, str):\n match_result['obj'] = resplit(r'[\\\\ /]', imgobj)[-1]\n elif isinstance(imgobj, dict):\n match_result['obj'] = imgobj['obj']\n else:\n match_result['obj'] = 'numpy'\n\n return match_result\n\n\ndef matchMultiImg(imgsrc, imgobj, confidencevalue=0.8, targetSize = (1440, 810), maxReturn=-1, isResize = True, colorSpace = (0,0,0), debugMode = False):\n '用于查找原始图片中的多个目标图片,若不存在图片则返回None,否则返回一个目标图片坐标构成的元组;imgsrc为原始图片路径,imgobj为目标图片路径,confidencevalue为置信度,maxReturn在非负的情况下只会返回相应数值的坐标,为0则永远返回None]'\n maxReturn = int(maxReturn)\n if isinstance(imgsrc,str):\n try:\n imsrc = imreadCH(imgsrc)\n except RuntimeError:\n return None\n else:\n imsrc = imgsrc\n if isResize:\n imsrc = resize(imsrc, targetSize)\n if isinstance(imgobj,str):\n imobj = imreadCH(imgobj)\n elif isinstance(imgobj, dict):\n imobj = imgobj['pic'] #现在此情况传入的一定是字典\n else:\n imobj = imgobj\n matchRect = []\n matchPositionXY = []\n while True:\n match_result = find_template(imsrc,imobj,confidencevalue) \n #match_result = None\n if match_result != None and maxReturn != 0:\n matchPositionXY.append(list(match_result['result']))\n maxReturn -= 1\n matchRect.append(match_result['rectangle'])\n rect = array([match_result['rectangle'][0],match_result['rectangle'][1],match_result['rectangle'][3],match_result['rectangle'][2]])\n fillConvexPoly(imsrc,rect,0)\n else:\n break\n if debugMode:\n imshow('img', imsrc)\n waitKey(0)\n return [matchPositionXY,imsrc,matchRect] if matchPositionXY != [] else [None,imsrc,None]\n \ndef levelOcr(imgsrc):\n allNumList = []\n confidence = 0.88 #调试时使用相似度\n\n mask = zeros((810,1440),dtype = \"uint8\")\n nowLevel = imreadCH(imgsrc)\n nowLevel = resize(nowLevel, (1440, 810))\n operationList = matchMultiImg(nowLevel, cwd + \"/res/fontLibrary/other/OPERATION.png\", colorSpace=0)\n if operationList[2] != None:\n for eachRect in operationList[2]:\n opRect = array([(eachRect[0][0]-45,eachRect[0][1]),(eachRect[0][0]+106,eachRect[0][1]),\n (eachRect[0][0]+106,eachRect[0][1]+50),(eachRect[0][0]-45,eachRect[0][1]+50)])\n fillConvexPoly(mask,opRect,(255,255,255))\n operationWGList = matchMultiImg(nowLevel, cwd + \"/res/fontLibrary/other/OPERATIONWG.png\", colorSpace=0)\n if operationWGList[2] != None:\n for eachRect in operationWGList[2]:\n opRect = array([(eachRect[0][0]-45,eachRect[0][1]),(eachRect[0][0]+106,eachRect[0][1]),\n (eachRect[0][0]+106,eachRect[0][1]+50),(eachRect[0][0]-45,eachRect[0][1]+50)])\n fillConvexPoly(mask,opRect,(255,255,255))\n #cv2.imshow('mask',mask)\n #cv2.waitKey(0)\n nowLevel = copyTo(nowLevel,mask)\n #imshow('op',nowLevel)\n #waitKey(0)\n for num in fontLibraryB:\n matchResult = matchMultiImg(nowLevel, cwd + \"/res/fontLibrary/B/\" + num, confidencevalue=confidence, colorSpace=0)\n oneNumList = matchResult[0]\n #nowLevel = matchResult[1]\n if oneNumList == None:\n continue\n else:\n for each in oneNumList:\n each.append(path.splitext(num)[0])\n allNumList.extend(oneNumList)\n oneNumList = []\n #cv2.imshow('B',nowLevel)\n #cv2.waitKey(0)\n for num in fontLibraryW:\n oneNumList = matchMultiImg(nowLevel, cwd + \"/res/fontLibrary/W/\" + num, confidencevalue=confidence, colorSpace=0, debugMode=False)[0]\n if oneNumList == None:\n continue\n else:\n for each in oneNumList:\n each.append(path.splitext(num)[0])\n allNumList.extend(oneNumList)\n oneNumList = []\n #cv2.imshow('W',nowLevel)\n #cv2.waitKey(0)\n if allNumList == []:\n return None\n \n return levelAnalyse(allNumList)\n \n\ndef levelAnalyse(levelList):\n if levelList == []:\n return dict()\n levelList.sort(key = lambda x:x[0])\n\n count = 0\n totalx = 0\n totaly = 0\n totalNum = ''\n eachNum = 0\n interval = 1\n beginNum = 0\n sndList = []\n dictResult = dict()\n lastx = levelList[0][0]\n lasty = levelList[0][1]\n\n for eachLetter in levelList:\n \n if eachLetter[0] - lastx <40:\n if abs(eachLetter[1] - lasty) < 5:\n if eachLetter[0] - lastx <25:\n totalNum += eachLetter[2]\n else:\n totalNum = totalNum + '-' + eachLetter[2]\n lastx = eachLetter[0]\n lasty = eachLetter[1]\n else:\n sndList.append(eachLetter)\n continue\n else:\n dictResult[totalNum] = [lastx,lasty]\n lastx = eachLetter[0]\n lasty = eachLetter[1]\n totalNum = eachLetter[2]\n \n dictResult[totalNum] = [lastx,lasty]\n sndrResult = levelAnalyse(sndList)\n dictResult.update(sndrResult)\n \n #print(dictResult) #调试时启用\n return dictResult\n\ncwd = getcwd()\nfontLibraryB = listdir(cwd + \"/res/fontLibrary/B\")\nfontLibraryW = listdir(cwd + \"/res/fontLibrary/W\")\n","sub_path":"foo/pictureR/pictureFind.py","file_name":"pictureFind.py","file_ext":"py","file_size_in_byte":10906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"213549123","text":"import pyaudio\nimport random\nfrom oscillator import play_frequencies\n\n# numpy.set_printoptions(threshold=numpy.nan)\nsquare = Oscillator\nsine = Oscillator2\n\ndef bassline():\n frequency = 200\n volume = .25\n for i in range(1000000):\n play_frequencies(\n stream,\n .15,\n volume,\n 300,\n 300,\n frequency,\n random.choice([frequency * 2/1, frequency + 5, frequency - 5, frequency, frequency * 3/2])\n )\n change = random.choice([-75, -75, -7, 7, 1, 2, 3, 4, 100, -125])\n\n print ('frequency: ', frequency, 'change: ', change, 'volume: ', volume)\n if frequency > 150 or not frequency < 40:\n volume = random.choice([.25, .25, .25, .3, .3, .5, 0, 0])\n else:\n volume = random.choice([.4, .5])\n\n if frequency < 0:\n frequency = random.choice([50, 100, 200, 300])\n else:\n frequency = frequency + change\n\nif __name__ == '__main__':\n p = pyaudio.PyAudio()\n stream = p.open(format=pyaudio.paFloat32,\n channels=1,\n rate=44100,\n output=1,\n frames_per_buffer=6615)\n\nbassline()\n\n\n\n","sub_path":"audio/bassline.py","file_name":"bassline.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"333086761","text":"#!/usr/bin/python\n\n\n# To run this program, use the following command:\n#\n# python leap_year_no_error_checking.py YEAR\n#\n# where YEAR is a valid integer\n#\n# Example usage:\n#\n# $ python david_gasper_hw1.py 2012\n# 2012 is a leap year\n\n\nimport sys\n\nif (len(sys.argv) != 2):\n\tprint (\"Incorrect number of arguments supplied!\")\nelse:\n\ttry:\n\t\tyear = int(sys.argv[1])\n\t\tif (year % 4 == 0 and not(year % 100 == 0) and not(year % 400 == 0)):\n\t\t\tprint (str(year) + \" is a leap year\")\n\t\telse:\n\t\t\tprint (str(year) + \" is not a leap year\")\n\texcept ValueError:\n\t\tprint (\"Argument is not a valid year!\")\n","sub_path":"leap_year_no_error_checking.py","file_name":"leap_year_no_error_checking.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"10046126","text":"__author__ = 'jideobs'\n\nimport logging\nfrom datetime import datetime\n\nimport webapp2\nfrom webapp2_extras import sessions\nfrom webapp2_extras import auth\n\nfrom webapp2_extras.auth import InvalidAuthIdError\n\nfrom webapp2_extras.auth import InvalidPasswordError\n\nfrom google.appengine.ext import ndb\n\nfrom lib import tools\nfrom bin.models import models\n\n\nclass BaseHandler(webapp2.RequestHandler):\n def __init__(self, request=None, response=None):\n super(BaseHandler, self).__init__(request=request, response=response)\n\n @webapp2.cached_property\n def auth(self):\n return auth.get_auth()\n\n @webapp2.cached_property\n def user_info(self):\n return self.auth.get_user_by_session()\n\n @webapp2.cached_property\n def user(self):\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None\n\n @webapp2.cached_property\n def user_model(self):\n return self.auth.store.user_model\n\n @webapp2.cached_property\n def session(self):\n return self.session_store.get_session(backend=\"datastore\")\n\n def render_template(self, view_filename, params={}):\n jinja_env = self.app.config.get('template')\n template_environment = jinja_env.get_template(view_filename)\n self.response.write(template_environment.render(params))\n\n def dispatch(self):\n self.session_store = sessions.get_store(request=self.request)\n try:\n webapp2.RequestHandler.dispatch(self)\n finally:\n self.session_store.save_sessions(self.response)\n\n @tools.json_out\n def handle_exception(self, exception, debug):\n\n logging.exception(exception)\n logging.debug(debug)\n\n # Todo: Catching datastore errors\n if isinstance(exception, webapp2.HTTPException):\n self.response.status = exception.code\n return 'error', {'status_code': exception.code, 'message': exception.message}\n # elif isinstance(exception, datastore_errors.BadValueError):\n # self.response.status = 400\n # return 'error', {'message': exception.message}\n elif isinstance(exception, KeyError):\n self.response.status = 400\n return 'error', {'message': \"Uninitialized data '%s'\" % exception.message}\n else:\n self.response.status = 500\n return 'error', {'status_code': 500, 'message': 'Internal server error. Try Again'}\n\n def options(self):\n self.response.headers['Access-Control-Allow-Origin'] = '*'\n self.response.headers['Access-Control-Allow-Headers'] = 'Content-Type'\n\n\nclass DefaultHandler(BaseHandler):\n def get(self):\n self.render_template('index.html')\n\n\nclass HomeHandler(BaseHandler):\n # @tools.user_required\n def get(self):\n self.render_template('home.html')\n\n\nclass LoginHandler(BaseHandler):\n @tools.json_in\n @tools.json_out\n def post(self):\n username = self.request.json.get('username')\n password = self.request.json.get('password')\n\n try:\n user = self.auth.get_user_by_password(username, password)\n except InvalidAuthIdError:\n code = 168\n logging.info('%s tried to login:' % username)\n error_msg = 'Invalid username'\n self.response.status = 404\n return 'error', {'code': code, 'message': error_msg}\n except InvalidPasswordError:\n code = 169\n logging.info('%s tried to login with a wrong password:' % username)\n error_msg = 'Invalid password'\n self.response.status = 404\n return 'error', {'code': code, 'message': error_msg}\n else:\n return 'data', user\n\n\nclass UserHandler(BaseHandler):\n @tools.json_in\n @tools.json_out\n def reset_password(self):\n user_data = self.request.json\n user = ndb.Key(models.Users, user_data['unique_id']).get()\n if user:\n # send mail to user as to resetting password\n user_id = user.key.get_id()\n token = self.user.create_reset_token(user_id)\n reset_url = self.uri_for('reset', user_id=user_id, reset_token=token)\n message = 'Please click the link below to reset your password\\n.%s' % reset_url\n tools.send_mail('no-reply@reachforge.io', user.email, 'Password reset', message)\n return 'error', {'message': 'Reset instructions sent to user email'}\n else:\n self.response.status = 404\n return 'error', {'message': 'User does not exist!'}\n\n @tools.json_in\n @tools.json_out\n def reset(self, user_id, reset_token):\n user_data = self.request.json\n user, ts = self.user_model.get_by_auth_token(int(user_id), reset_token, 'reset')\n if user:\n user.password = user_data['password']\n user.put()\n self.user.delete_reset_token(int(user_id), reset_token)\n return 'success', {'message': 'Password successfully changed!'}\n else:\n self.response.status = 404\n return 'error', {'code': 172, 'message': 'User with that reset token does not exist!'}\n\n @tools.user_required\n @tools.json_in\n # @tools.json_out\n def put(self):\n user_data = self.request.json\n for name, value in user_data.iteritems():\n setattr(self.user, name, value)\n logging.info(getattr(self.user, name))\n logging.info(self.user.put())\n # for item in user_data:\n\n def head(self, unique_id):\n self.response.headers['Access-Control-Allow-Origin'] = '*'\n self.response.headers['Access-Control-Allow-Headers'] = 'Content-Type'\n user = ndb.Key(models.Users, unique_id).get()\n if not user:\n self.response.status = 404\n\n\nclass SignUpHandler(BaseHandler):\n @tools.json_in\n @tools.json_out\n def post(self):\n user_data = self.request.json\n ward = ndb.Key(urlsafe=user_data['ward']).get()\n if not ward:\n self.response.status = 400\n return 'error', {'message': 'Ward sent to server does not exist'}\n\n unique_properties = ['email', 'phone_number']\n username = user_data.get('username')\n user = self.user_model.create_user(username, unique_properties,\n username=username, first_name=user_data['first_name'],\n last_name=user_data['last_name'],\n password_raw=user_data['password'],\n sex=user_data['sex'], email=user_data['email'],\n phone_number=user_data['phone_number'],\n address=user_data['address'],\n date_of_birth=datetime.strptime(user_data['dob'], '%Y-%M-%d'),\n ward=ward.ward, verified=False)\n if not user[0]:\n\n if 'auth_id' in user[1][0]:\n reason = 'Username'\n else:\n reason = user[1][0].capitalize()\n return 'error', {'message': '%s has already been registered' % reason}\n else:\n user = user[1]\n user_id = user.get_id()\n token = self.user_model.create_signup_token(user_id)\n verification_url = self.uri_for('verification', user_id=user_id, signup_token=token)\n message = 'Thank you for registering on ReachForge platform, click on the link below to ' \\\n 'verify your account\\n %s' % verification_url\n tools.send_mail('no-reply@reachforge.io', user.email, 'Verify account', message)\n user_data['id'] = user.key.urlsafe()\n return 'data', user_data\n\n\nclass LogoutHandler(BaseHandler):\n def get(self):\n self.auth.unset_session()\n self.redirect(self.uri_for('default'))\n\n\nclass VerificationHandler(BaseHandler):\n def post(self, user_id, signup_token):\n user, ts = self.user_model.get_by_auth_token(int(user_id), signup_token, 'signup')\n if not user:\n self.response.status = 404\n return 'error', {'message': 'User does not exist for verification'}\n\n # self.auth.set_session(self.auth.store.user_to_dict(user))\n self.user_model.delete_signup_token(user.get_id(), signup_token)\n if not user.verified:\n user.verified = True\n user.put()\n return 'success', {'message': 'User has been successfully verified!'}\n\n\nclass ReachHandler(BaseHandler):\n @tools.user_required\n @tools.json_out\n def get(self, reach_id=None):\n if reach_id:\n campaigns = [ndb.Key(urlsafe=reach_id).get()]\n else:\n campaigns = models.Reach.query().fetch()\n if campaigns:\n data = []\n for campaign in campaigns:\n data.append({'name': campaign.name,\n 'id': campaign.key.urlsafe(),\n 'logo': campaign.logo,\n 'description': campaign.description,\n 'due_date': campaign.due_date.strftime('%Y-%m-%d %X'),\n 'limit': campaign.limit,\n 'credit_per_feedback': campaign.credit_per_feedback,\n 'date_added': campaign.date_added.strftime('%Y-%m-%d %X'),\n 'date_updated': campaign.date_updated.strftime('%Y-%m-%d %X')})\n return 'data', data\n else:\n self.response.status = 404\n return 'error', 'No campaigns for now!'\n\n @tools.user_required\n @tools.json_in\n @tools.json_out\n def post(self):\n reach_data = self.request.json\n due_date = datetime.strptime(reach_data.get('due_date', None), '%Y-%M-%d')\n reach = models.Reach(name=reach_data.get('name', None), logo=reach_data.get('logo', None),\n description=reach_data.get('description', None), banner=reach_data.get('banner', None),\n due_date=due_date, limit=int(reach_data.get('limit', None)),\n credit_per_feedback=int(reach_data.get('credit_per_feedback', None)))\n reach_key = reach.put()\n reach_data['id'] = reach_key.urlsafe()\n return 'data', reach_data\n\n\nclass StatesHandler(BaseHandler):\n @tools.json_in\n @tools.json_out\n def post(self):\n state_data = self.request.json\n state = models.States(state=state_data['state'])\n state_key = state.put()\n if state_data['lgas']:\n for lga_data in state_data['lgas']:\n lga = models.LocalGovernments(parent=state_key, local_government=lga_data['local_government'])\n lga_key = lga.put()\n for ward_data in lga_data['wards']:\n ward = models.Wards(parent=lga_key, ward=ward_data)\n ward.put()\n state_data['id'] = state_key.urlsafe()\n return 'data', state_data\n\n @tools.json_out\n def get(self):\n states_data = models.States.query().order(models.States.state).fetch()\n if states_data:\n states = []\n for state in states_data:\n states.append({'id': state.key.urlsafe(), 'state': state.state})\n return 'data', states\n else:\n self.response.status = 404\n return 'error', {'message': 'No state found!'}\n\n @tools.json_out\n def get_lgas_wards(self, state_id):\n state = ndb.Key(urlsafe=state_id).get()\n if not state:\n self.response.status = 404\n return 'error', {'message': 'State was not found!'}\n\n lgas = models.LocalGovernments.query(ancestor=state.key).fetch()\n if not lgas:\n self.response.status = 404\n return 'error', {'message': 'No local government found!'}\n\n data = []\n for lga in lgas:\n u = {'id': lga.key.urlsafe(), 'local_government': lga.local_government}\n wards = models.Wards.query(ancestor=lga.key).fetch()\n if wards:\n u['wards'] = [{'id': ward.key.urlsafe(), 'ward': ward.ward} for ward in wards]\n data.append(u)\n return 'data', data\n\n\nclass FeedsHandler(BaseHandler):\n @tools.user_required\n @tools.json_out\n def get(self, reach_id):\n reach = ndb.Key(urlsafe=reach_id).get()\n if not reach:\n self.response.status = 404\n return 'error', {'message': 'The reach selected does not exist'}\n\n feeds = models.Feeds.query(ancestor=reach.key).fetch()\n if feeds:\n data = []\n for feed in feeds:\n data.append({\n 'id': feed.key.urlsafe(),\n 'title': feed.title,\n 'category': feed.category,\n 'text': feed.text,\n 'media_url': feed.media_url,\n 'date_uploaded': feed.date_uploaded.strftime('%Y-%M-%d %X'),\n 'date_changed': feed.date_changed.strftime('%Y-%M-%d %X')\n })\n return 'data', data\n else:\n self.response.status = 404\n return 'error', {'message': ''}\n\n @tools.user_required\n @tools.json_in\n @tools.json_out\n def post(self, reach_id):\n reach = ndb.Key(urlsafe=reach_id).get()\n if reach:\n feed_data = self.request.json\n feed = models.Feeds(parent=reach.key, title=feed_data['title'], category=feed_data['category'],\n text=feed_data['text'], media_url=feed_data['media_url'])\n feed_key = feed.put()\n feed_data['id'] = feed_key.urlsafe()\n return 'data', feed_data\n else:\n self.response.status = 404\n return 'error', {'message': 'The reach selected does not exist'}\n\n\nclass SurveyHandler(BaseHandler):\n @tools.user_required\n @tools.json_out\n def get(self, reach_id, feed_id):\n surveys = models.Surveys.get_user_surveys(self.user.key)\n if surveys:\n data = []\n for survey in surveys:\n data.append({\n 'first_name': survey.first_name,\n 'last_name': survey.last_name,\n 'sex': survey.sex,\n 'date_of_birth': survey.date_of_birth.strftime('%Y-%M-%d'),\n 'location': survey.location,\n 'phone_number': survey.phone_number,\n 'email': survey.email,\n 'survey_media': survey.survey_media,\n 'date_given': survey.date_given.strftime('%Y-%M-%d %X'),\n 'date_changed': survey.date_changed.strftime('%Y-%M-%d %X')\n })\n return 'data', data\n else:\n self.response.status = 404\n return 'error', {'message': 'User does not have any survey!'}\n\n @tools.user_required\n @tools.json_in\n @tools.json_out\n def post(self, reach_id):\n reach = ndb.Key(urlsafe=reach_id).get()\n if not reach:\n self.response.status = 404\n return 'error', 'Reach does not exist!'\n\n survey_data = self.request.json\n survey = models.Surveys(parent=reach.key, first_name=survey_data.get('first_name', None),\n last_name=survey_data.get('last_name', None), sex=survey_data.get('sex', None),\n date_of_birth=survey_data.get('dob', None), location=survey_data.get('location', None),\n phone_number=survey_data.get('phone_number', None),\n email=survey_data.get('email', None),\n survey_media=survey_data.get('survey_media', None))\n survey_key = survey.put()\n survey_data['id'] = survey_key.urlsafe()\n return 'data', survey_data\n","sub_path":"bin/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":15997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"221605017","text":"# -*- coding: utf-8 -*\n#rid 21 STP\nimport xlsxwriter\nfrom io import BytesIO\nimport datetime\nfrom .. import stp_config\n\ndef form_url(params):\n\tbase_url = str(stp_config.CONST.API_URL_PREFIX) + 'stp_warranty_lists/'\n\tbase_url += str(params[\"wtype\"])\n\treturn base_url\n\n#Warranty Report Deficiency List Details\ndef render(res, params):\n\n\trid = params[\"rid\"]\n\tyear = params[\"year\"]\n\tcon_num = params[\"con_num\"]\n\tassign_num = params[\"assign_num\"]\n\twtype = params[\"wtype\"]\n\n\toutput = BytesIO()\n\tworkbook = xlsxwriter.Workbook(output, {'in_memory': True})\n\tworksheet = workbook.add_worksheet()\n\n\ttype = 'Year 1 Warranty' if wtype == '1' else 'Year 2 Warranty' if wtype == '2' else '12 Month Warranty'\n\ttitle = 'Warranty Report Deficiency List Details ' + type\n\n\t#MAIN DATA FORMATING\n\tformat_text = workbook.add_format(stp_config.CONST.FORMAT_TEXT)\n\tformat_num = workbook.add_format(stp_config.CONST.FORMAT_NUM)\n\tformat_num2 = workbook.add_format(stp_config.CONST.FORMAT_NUM2)\n\titem_header_format = workbook.add_format(stp_config.CONST.ITEM_HEADER_FORMAT)\n\t##Hunter's additional formatting\n\titem_format = workbook.add_format(stp_config.CONST.ITEM_FORMAT)\n\ttitle_format = workbook.add_format(stp_config.CONST.TITLE_FORMAT)\n\titem_format_money = workbook.add_format(stp_config.CONST.ITEM_FORMAT_MONEY)\n\tsubtitle_format = workbook.add_format(stp_config.CONST.SUBTITLE_FORMAT)\n\tsubtotal_format = workbook.add_format(stp_config.CONST.SUBTOTAL_FORMAT)\n\tsubtotal_format_money = workbook.add_format(stp_config.CONST.SUBTOTAL_FORMAT_MONEY)\n\n\t#HEADER\n\t#write general header and format\n\trightmost_idx = 'G'\n\tstp_config.const.write_gen_title(title, workbook, worksheet, rightmost_idx, year, con_num)\n\n\t#additional header image\n\tworksheet.insert_image('F1', stp_config.CONST.ENV_LOGO,{'x_offset':120,'y_offset':18, 'x_scale':0.5,'y_scale':0.5, 'positioning':2})\n\n\tdata = res\n\n\tworksheet.set_column('A:G', 25)\n\tworksheet.set_row(0,36)\n\tworksheet.set_row(1,36)\n\titem_fields = ['Tag Number', 'Tag Colour', 'Item', 'Tree ID', 'Health Rating', 'Deficiency', 'Required Repair']\n\n\t#MAIN DATA\n\tcr = 7\n\tregions = {}\n\n\tfor iid, val in enumerate(data[\"items\"]):\n\t\tif str(data[\"items\"][iid][\"contractyear\"]) == year:\n\t\t\trKey = str(str(data[\"items\"][iid].get(\"municipality\")) + ' - ' + str(data[\"items\"][iid].get(\"contract item\")) + ' - ' + str(data[\"items\"][iid].get(\"road\")) + ' - ' + str(data[\"items\"][iid].get(\"road side\")))\n\t\t\tif not rKey in regions:\n\t\t\t\tregions.update({rKey : [[\n\t\t\t\t\tdata[\"items\"][iid].get(\"municipality\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"contract item\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"road\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"between1\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"between2\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"road side\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"tag number\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"tag colour\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"item\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"tree id\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"health\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"deficiency\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"required repair\")\n\t\t\t\t\t]]})\n\t\t\telse:\n\t\t\t\tregions[rKey].append([\n\t\t\t\t\tdata[\"items\"][iid].get(\"municipality\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"contract item\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"road\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"between1\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"between2\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"road side\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"tag number\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"tag colour\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"item\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"tree id\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"health\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"deficiency\"),\n\t\t\t\t\tdata[\"items\"][iid].get(\"required repair\")\n\t\t\t\t\t])\n\t\t\t\t\n\tbreaks = []\n\n\tfor reg_id, reg in enumerate(sorted(regions)):\n\t\tworksheet.merge_range('A{}:C{}'.format(cr, cr), \"Municipality: \" + str(regions[reg][0][0]), item_header_format)\n\t\tworksheet.merge_range('A{}:C{}'.format(cr+1, cr+1), \"Contract Item No.: \" + str(regions[reg][0][1]), item_header_format)\n\t\t(worksheet.merge_range('A{}:C{}'.format(cr+2, cr+2), \"Regional Road: \" + str(regions[reg][0][2]) + \" Between \" + \n\t\t\tstr(regions[reg][0][3]) + \" and \" + str(regions[reg][0][4]), item_header_format))\n\t\tcr += 3\n\n\t\tworksheet.write_row('A{}'.format(cr), item_fields, item_header_format)\n\t\tcr += 1\n\n\t\tfor tree in regions[reg]:\n\t\t\tworksheet.write('A{}'.format(cr), tree[6], format_num2)\n\t\t\tworksheet.write('B{}'.format(cr), tree[7], format_text)\n\t\t\tworksheet.write('C{}'.format(cr), tree[8], format_text)\n\t\t\tworksheet.write('D{}'.format(cr), tree[9], format_num2)\n\t\t\tworksheet.write_row('E{}'.format(cr), tree[10:], format_text)\n\t\t\tcr += 1\n\n\t\tbreaks.append(cr)\n\t\tcr += 1\n\t\n\tworksheet.set_h_pagebreaks(breaks)\n\n\tworkbook.close()\n\n\txlsx_data = output.getvalue()\n\treturn xlsx_data","sub_path":"stp/report_classes/stp_wrdd.py","file_name":"stp_wrdd.py","file_ext":"py","file_size_in_byte":4669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"57163830","text":"#!/usr/bin/env python3\n'''\nModule is used for handling or creating data packets to DNS server\n'''\nimport random\nimport requesttypes\n\nclass DNSresponse:\n '''\n Class represents entity of response DNS packet.\n '''\n def __init__(self, pid, qr = 0b1,\n opcode = 0b0000, aa = 0b0, tc = 0b0,\n rd = 0b1, ra = 0b1, z = 0b0,\n ad = 0b0, cd = 0b0, rcode = 0b0000):\n self.bts = pid\n flags1 = (qr<<7) + (opcode<<3) + (aa<<2) + (tc<<1) + rd # Converting binary data (bits) \n flags2 = (ra<<7) + (z<<6) + (ad<<5) + (cd<<4) + rcode # to integers\n self.bts += bytearray([flags1, flags2])\n self.bts += bytearray([\n 0x00, 0x01, # Questions\n 0x00, 0x00, # Answers\n 0x00, 0x00, # Authority RRs\n 0x00, 0x00 # Additional RRs\n ])\n\n def initfrombytes(self, btarray):\n '''\n Initialize data packet from bytearray\n '''\n self.bts = btarray\n\n def addquery(self, name, ptype):\n '''\n Adds query field to packet\n '''\n data = bytearray([])\n for word in name.split('.'):\n data.append(len(word))\n data += word.encode()\n data += bytearray([0x00])\n data += requesttypes.ptypes[ptype]\n data += bytearray([0x00, 0x01])\n self.bts += data\n\n def addanswer(self, data, ptype, \n name = bytearray([0xc0, 0x0c]),\n pclass = bytearray([0x00, 0x01]), \n ttl = bytearray([0x00, 0x00, 0x00, 0x71]),\n length = bytearray([0x00, 0x04])):\n '''\n Adds data with DNS answer to packet\n '''\n self.bts[7] = self.bts[7] + 1 # Increase answer counter\n if ptype is \"A\":\n for octet in resp.split('.'):\n if octet: bindata.append(int(octet))\n elif ptype is \"PTR\":\n bindata = str.encode(data)\n self.bts += name\n self.bts += requesttypes.ptypes[ptype]\n self.bts += pclass\n self.bts += ttl\n print(\" Data\",data,\" length \",len(bindata))\n self.bts += bytearray([0x00, len(bindata)])\n self.bts += bytearray(bindata)\n\n def getbytes(self):\n '''\n Returns bytearray of data which contains DNS packet\n '''\n return self.bts\n\n\nclass DNSrequest:\n '''\n Class represents entity of reply packet\n '''\n def __init__(self, bts):\n self.bts = bytearray(bts)\n\n def getpid(self):\n '''\n Returns ID field from request packet\n '''\n return self.bts[0:2]\n\n def getname(self):\n '''\n Returns string with requested name\n '''\n namebts = self.bts[12:]\n name = ''\n bt = iter(namebts)\n count = next(bt)\n while count:\n namepart = bytearray()\n for y in range(count):\n namepart.append(next(bt))\n name += namepart.decode()\n name += '.'\n count = next(bt)\n return name[:len(name) - 1]\n\n def getaddrfromptr(self):\n if self.typeofrequest() is not \"PTR\": return None\n ipaddr = self.getname().split(\".\")[0:4]\n ipaddrstr = \".\".join([ipaddr[3], ipaddr[2], ipaddr[1], ipaddr[0]])\n return ipaddrstr\n\n def typeofrequest(self):\n '''\n Defines type of request (A, AAAA PTR)\n '''\n type = self.bts[-3]\n if type is 0x0c: # PTR request\n return \"PTR\"\n elif type is 0x01: # A request\n return \"A\"\n elif type is 0x1C: # AAAA request\n return \"AAAA\"\n else:\n return None\n\ndef getrequest(name):\n '''\n getrequest(name)-> bytearray\n Makes bytearray of data with request to DNS server for A-record\n for \"name\" parameter\n '''\n data = bytearray([\n random.randint(0, 255), random.randint(0, 255), # random ID\n 0x01, 0x00,\n 0x00, 0x01,\n 0x00, 0x00,\n 0x00, 0x00,\n 0x00, 0x00])\n for word in name.split('.'):\n data.append(len(word))\n data += word.encode()\n data += bytearray([\n 0x00,\n 0x00, 0x01,\n 0x00, 0x01])\n return data\n\ndef getbytesfromstr(string): # do I need it?\n pass\n\nif __name__ == '__main__':\n # Self test\n print(getrequest('google.com'))\n","sub_path":"s53/nameresolver/dnspacket.py","file_name":"dnspacket.py","file_ext":"py","file_size_in_byte":4370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"240968892","text":"import sys\nsys.stdin = open('input.txt', 'r')\nfrom collections import deque\n\ndef find_parent(x, p): # 더 큰 애 찾기\n global path\n queue = deque()\n visited = [0] * (N + 1)\n for ii in range(len(p[x])):\n queue.append(p[x][ii]) # x 보다 큰 것\n path.append(p[x][ii])\n visited[p[x][ii]] = 1\n while queue:\n hing = queue.popleft()\n for aa in p[hing]: # 부모가 있으면\n if aa not in queue and not visited[aa]:\n queue.append(aa)\n visited[aa] = 1\n if aa not in path:\n path.append(aa)\n return\n\nfor tc in range(1, int(input()) + 1):\n N = int(input())\n M = int(input())\n # arr = deque(list(map(int, input().split())) for _ in range(M))\n pa = deque([]*(N+1) for _ in range(N+1))\n p_reverse = deque([]*(N+1) for _ in range(N+1))\n for _ in range(M):\n a, b = map(int, input().split())\n pa[a].append(b)\n p_reverse[b].append(a)\n print(pa)\n print(p_reverse)\n cnt = 0\n for idx in range(1, N+1):\n path = deque()\n if pa:\n find_parent(idx, pa)\n if p_reverse:\n find_parent(idx, p_reverse)\n if len(path) == N-1:\n cnt += 1\n print('#{} {}'.format(tc, cnt))\n","sub_path":"1911/191112/키 순서/키순서4.py","file_name":"키순서4.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"414411252","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 24 19:53:54 2021\n\n@author: aurelien\n\"\"\"\n\n#%%\nimport os\nimport mne\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np\n#import scipy.io as sio\nimport scipy\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom meegkit.utils.matrix import sliding_window\nimport pandas as pd\nimport pdc_dtf, pdc_dtf2\nimport math\n\nmpl.rcParams['axes.spines.right'] = False\nmpl.rcParams['axes.spines.top'] = False\n\n#%%\n\nsfreq = 250\nresultsLoc = '/home/aurelien/Desktop/AURELE/M2_DCAS/GitHub_DCAS/IRL_DividedAttention/analysis/results/'\n\nsrs = np.load(resultsLoc+'srs_file.npy', allow_pickle='TRUE').item()\nfrqs = np.load(resultsLoc+'frqs_file.npy', allow_pickle='TRUE').item() # frequency ranges\nevoked_all = np.load(resultsLoc+'evoked_all.npy', allow_pickle='TRUE').item()\npsds_all = np.load(resultsLoc+'psds_all.npy', allow_pickle='TRUE').item()\nevoked_by_N = np.load(resultsLoc+'evoked_by_N.npy',allow_pickle='TRUE').item()\nepochs_by_N = np.load(resultsLoc+'epochs_by_N.npy',allow_pickle='TRUE').item()\nfreqs = np.load(resultsLoc+'freqs.npy',allow_pickle='TRUE')\n\nn_participants = evoked_all['a1'].shape[0]\nNs = epochs_by_N.keys()\ngroup_name = ['a1_aav1','a1_v1','v1_vav1','aav1_vav1']\nconds = ['a1','v1','aav1','vav1']\n\nsensorsByCluster = {}\nclu_idx = 1\nfor gp in group_name:\n for clu in srs[gp]:\n sensorsByCluster['cluster'+str(clu_idx)]=clu\n clu_idx+=1\n \n\n \n\n##################\n \n # for key in conds:\n # evoked_all[key] = np.transpose(evoked_all[key],(0,2,1))\n \n # DTF Per participant per evoked (all the epochs)\n # evoked_clu = {} # evoked_clu['aav1_vav1'] = list(clusters1 (#participants x #sensors x #samples), cluster2)\n # for key in conds:\n # l = []\n # for gp in group_name:\n # for clu in srs[gp]:\n # epchs = epochs_by_N[N]['a1'][:,clu[:,None],:][:,:,0,:]\n # for e in epchs:\n # p,bic = pdc_dtf2.compute_order(e, p_max=10)\n # A_est, sigma = pdc_dtf2.mvar_fit(e, p)\n # sigma = np.diag(sigma) # DTF + PDC support diagonal noise\n # # sigma = None\n # # compute DTF\n # D, freqs = pdc_dtf2.DTF(A_est, sigma)\n\nnbClusters = 0\nfor gp in group_name:\n nbClusters += len(srs[gp])\n\n# evoked_clu = {}\n# for key in conds:\n# print('Condition: ',key)\n# bigArray = np.zeros((n_participants,nbClusters,64,64))\n# for N_idx in range(n_participants):\n# clu_idx = 0 # clu_idx <= nbCluster\n# for gp in group_name:\n# for i,clu in enumerate(srs[gp]):\n# evk = evoked_all['a1'][N_idx,clu[:,None],:][:,0,:]\n# p,bic = pdc_dtf2.compute_order(evk, p_max=5)\n# A_est, sigma = pdc_dtf2.mvar_fit(evk, p)\n# sigma = np.diag(sigma) # DTF + PDC support diagonal noise\n# # sigma = None\n# # compute DTF\n# D, freqs = pdc_dtf2.DTF(A_est, sigma)\n# matrix = np.zeros((64,64))\n# F = freqs * sfreq\n# lower_f = frqs[gp][i][0]\n# upper_f = frqs[gp][i][-1]\n# f_range = (F >= lower_f) & (F <= upper_f)\n# D_integrated = D[np.ix_(f_range)].mean(axis=0)\n \n# for s1,ch_row in enumerate(clu):\n# for s2,ch_col in enumerate(clu):\n# matrix[ch_row,ch_col] = D_integrated[s1,s2] \n# bigArray[N_idx,clu_idx,:,:] = matrix # 64x64 matrix including DTF\n# clu_idx += 1\n# print('Participant {}/{}'.format(N_idx+1,n_participants))\n# evoked_clu[key] = bigArray\n \nDTFbig_all = {}\np_all = {}\nDTF_all = {}\nfor key in conds:\n DTF_all[key]={}\n print('Condition: ',key)\n bigArray = np.zeros((n_participants,nbClusters,64,64))\n pMatrix = np.zeros((n_participants,nbClusters,1))\n for N_idx,N in enumerate(Ns):\n DTF_all[key][N_idx] = {}\n clu_idx = 0 # clu_idx <= nbCluster\n for gp in group_name:\n for i,clu in enumerate(srs[gp]):\n epochs_data = epochs_by_N[N][key][:,clu[:,None],:][:,:,0,:]\n # z-scoring across samples (within each epoch)\n for ii,e in enumerate(epochs_data):\n mean,std = e.mean(axis=1),e.std(axis=1)\n mean,std = mean[:,np.newaxis],std[:,np.newaxis]\n epochs_data[ii] = (e-mean)/std \n # z-scoring across trials \n # mean,std = epochs_data.mean(axis=0),epochs_data.std(axis=0)\n mean,std = epochs_data.mean(axis=(0,2)),epochs_data.std(axis=(0,2))\n mean,std = mean[:,np.newaxis],std[:,np.newaxis]\n for ii,e in enumerate(epochs_data):\n epochs_data[ii] = (e-mean)/std \n evoked = epochs_data.mean(axis=0)\n p,bic = pdc_dtf2.compute_order(evoked, p_max=5)\n A_est, sigma = pdc_dtf2.mvar_fit(evoked, p)\n sigma = np.diag(sigma) # DTF + PDC support diagonal noise\n # sigma = None\n # compute DTF\n D, freqs = pdc_dtf2.DTF(A_est, sigma)\n matrix = np.zeros((64,64))\n F = freqs * sfreq\n lower_f = frqs[gp][i][0]\n upper_f = frqs[gp][i][-1]\n f_range = (F >= lower_f) & (F <= upper_f)\n D_integrated = D[np.ix_(f_range)].mean(axis=0)\n \n for s1,ch_row in enumerate(clu):\n for s2,ch_col in enumerate(clu):\n if ch_row != ch_col:\n matrix[ch_row,ch_col] = D_integrated[s1,s2] \n bigArray[N_idx,clu_idx,:,:] = matrix # 64x64 matrix including DTF\n pMatrix[N_idx,clu_idx,:] = p\n DTF_all[key][N_idx]['cluster'+str(clu_idx+1)] = D\n clu_idx += 1\n print('Participant {}/{}'.format(N_idx+1,n_participants))\n DTFbig_all[key] = bigArray\n \nDTF_all2 = {} \nfor key in conds:\n DTF_all2[key] = {}\n for clu_idx in range(len(sensorsByCluster)):\n clu_name = 'cluster'+str(clu_idx+1)\n n_sensors = len(sensorsByCluster[clu_name])\n DTF_all2[key][clu_name] = np.zeros((n_participants,len(freqs),n_sensors,n_sensors))\n for N_idx in range(n_participants):\n DTF_all2[key][clu_name][N_idx] = DTF_all[key][N_idx][clu_name]\nDTF_all = DTF_all2\n \n\nnp.save(resultsLoc+'DTFbig_all.npy',DTFbig_all)\nnp.save(resultsLoc+'DTF_all.npy',DTF_all)\nnp.save(resultsLoc+'sensorsByCluster.npy',sensorsByCluster)\nnp.save(resultsLoc+'freqs_DTF.npy',F)\n\n\n\n \n#%%\n\n# DTFbig_all = np.load(resultsLoc+'DTFbig_all.npy',allow_pickle=True).item()\n\n\n \n \n \n \n \n \n \n \n \n \n ","sub_path":"analysis/DTF_computingEvoked.py","file_name":"DTF_computingEvoked.py","file_ext":"py","file_size_in_byte":6916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"431549872","text":"# Uma pessoa tem em seu guarda roupas x camisas, y calças e z pares de sapato.\n# Escreva um algoritmo que calcula de quantas maneiras diferentes ele pode se vestir.\n# Seu algoritmo deverá ler o número de camisas, o número de calças e o número de sapatos.\n\n\nnumeroCamisas = int(input(\"Quantidade de camisas: \"))\nnumeroCalcas = int(input(\"Quantidade de calças: \"))\nnumeroSapatos = int(input(\"Quantidade de sapatos: \"))\n\nquantidadeDiferentes = numeroCamisas * numeroCalcas * numeroSapatos\n\nprint(\"Quantidade diferentes de combinações: \", quantidadeDiferentes)\n\n# x -> y -> z\n# y -> z -> x\n# z -> x -> y\n","sub_path":"Beginning with Python/02exe/08ExeCalcularManeirasDiferentesDeSeVestir.py","file_name":"08ExeCalcularManeirasDiferentesDeSeVestir.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"298221625","text":"def notas(* n, sit=False):\n '''\n -> a Função notas tem o objetivo de mostrar o calculo das notas do aluno.\n :param n: São as notas dos alunos\n :param sit: Mostra a situação do aluno\n :return: retorna o total, a média, maior e menor nota do aluno.\n '''\n nota = dict()\n nota['nota'] = n\n nota['Total'] = len(n)\n nota['Maior'] = max(n)\n nota['Menor'] = min(n)\n nota['Média'] = sum(n) / len(n)\n if sit:\n if nota['Média'] >= 7:\n nota['Situação'] = 'BOA'\n elif nota['Média'] >= 5:\n nota['Situação'] = 'BOA'\n else:\n nota['Situação'] = 'RUIM'\n return nota\n\nresp = notas(3, 4, 8, 10, 7, 4, sit=True)\nprint(resp)\nhelp(notas)\n","sub_path":"mundo 3/105.py","file_name":"105.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"481911145","text":"from tkinter import *\n\nexpression = ''\n\n\nif __name__ == \"__main__\":\n gui = Tk()\n gui.configure(background=\"light green\")\n gui.title(\"Simple Calculator\")\n display = StringVar()\n expression_field = Entry(gui, textvariable=display)\n expression_field.grid(columnspan=4, ipadx=70)\n\n me = Frame(gui)\n\n\ndef press(n):\n global expression\n expression = expression + str(n)\n display.set(expression)\n\n\ndef equalpress():\n try:\n global expression\n total = str(eval(expression))\n display.set(total)\n expression = \"\"\n\n except:\n display.set(\" error \")\n expression = \"\"\n\n\nbutton1 = Button(gui, text=' 1 ', fg='black', bg='red', command=lambda: press(1), height=1, width=7)\nbutton1.grid(row=2, column=0)\n\nequal = Button(gui, text=' = ', fg='black', bg='red', command=equalpress, height=1, width=7)\nequal.grid(row=5, column=2)\n\nminus = Button(gui, text=' - ', fg='black', bg='red',\n command=lambda: press(\"-\"), height=1, width=7)\nminus.grid(row=3, column=3)\n\nbutton6 = Button(gui, text=' 6 ', fg='black', bg='red',\n command=lambda: press(6), height=1, width=7)\nbutton6.grid(row=3, column=2)\n\ngui.mainloop()\n","sub_path":"venv/Lib/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"609225627","text":"class QryDataAQH:\n\n def __init__(self,parentForm,FormObj):\n self.form = parentForm\n self.app = parentForm.ClientApplication\n self.FormView = None\n\n def Show(self):\n self.form.GetPanelByName('konklusi').GetControlByName('jumlah').caption =self.uipart.Jml\n self.ShowFixedAssetData()\n self.FormContainer.Show()\n\n def bCariClick(self,sender):\n self.ShowFixedAssetData()\n\n def ShowFixedAssetData(self):\n AQH = self.AQH\n uipart = self.uipart\n\n field = uipart.Field or ''\n textcari = uipart.isian or ''\n\n pencarian = ''\n if field != '' and textcari != '':\n pencarian = \"and %s LLIKE '%s'\" % (field,textcari)\n\n Multi = ''\n if self.uipart.MultiKJKS != 'x':\n Multi = \"and MustahiqId in {%s}\" % self.uipart.MultiKJKS\n\n AddParam = \"[ LProduct.ProductGroupName = 'AQH' %s %s ]\" % (pencarian,Multi)\n AQH.OQLText = \" Select from MustahiqProduct as AQH \\\n %s \\\n ( MustahiqExtNumber, \\\n LMustahiq.LCustomer.CustomerName, \\\n LProduct.ProductName, \\\n Status $, \\\n ProductId, \\\n MustahiqId, \\\n LMustahiq.LCustomer.PhoneNumber, \\\n LBranch.BranchName, \\\n self \\\n ) then order by CustomerName;\" % (AddParam)\n AQH.DisplayData()\n\n\n def MultiProgram(self,jenis):\n if jenis == 1:\n self.form.Caption = \"Peragaan Peresta Peserta AL-QORDHUL HASAN yang mengiukuti Multi Program\"\n else:\n self.form.Caption = \"Peragaan Peserta Program AL-QORDHUL HASAN\"\n params = self.app.CreateValues(['Key',jenis])\n self.FormObject.SetDataWithParameters(params)\n self.Show()\n\n\n def NonAktifkanProgram(self,key):\n app = self.app\n if app.ConfirmDialog('Yakin Menonktifkan KJKS Dari keikutsertaan Program AQH!!'):\n pass\n else:\n return 0\n params = self.app.CreateValues(['key',key])\n retval = self.app.ExecuteScript('AQH/NonAktifkanAQH.NonAktifProgram',params)\n\n status = retval.FirstRecord\n self.app.ShowMessage(status.Pesan)\n\n def AktifkanProgram(self,key):\n app = self.app\n if app.ConfirmDialog('Yakin aktifkan KJKS Dari keikutsertaan Program AQH!!'):\n pass\n else:\n return 0\n params = self.app.CreateValues(['key',key])\n retval = self.app.ExecuteScript('AQH/AktifkanAQH.AktifProgram',params)\n\n status = retval.FirstRecord\n self.app.ShowMessage(status.Pesan)\n\n\n\n","sub_path":"dialogs/AQH/QryDataAQH_intr.py","file_name":"QryDataAQH_intr.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"542541493","text":"\n# create a formatted string\ntxt = FormattedString()\n\n# adding some text with some formatting\ntxt.append(\"hello\", font=\"Helvetica\", fontSize=100, fill=(1, 0, 0))\n# adding more text\ntxt.append(\"world\", font=\"Times-Italic\", fontSize=50, fill=(0, 1, 0))\n\n# setting a font\ntxt.font(\"Helvetica-Bold\")\ntxt.fontSize(75)\ntxt += \"hello again\"\n\n# drawing the formatted string\ntext(txt, (10, 10))\n\n\n# create a formatted string\ntxt = FormattedString()\n\n# adding some text with some formatting\ntxt.append(\"hello\", font=\"ACaslonPro-Regular\", fontSize=50)\n# adding more text with an\ntxt.append(\"world\", font=\"ACaslonPro-Regular\", fontSize=50, openTypeFeatures=dict(smcp=True))\n\ntext(txt, (10, 110))","sub_path":"examples/formattedString.py","file_name":"formattedString.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"614366524","text":"# -*- coding:utf-8 -*-\r\nclass Solution:\r\n def FindGreatestSumOfSubArray(self, array):\r\n # write code here\r\n if len(array) == 0:\r\n return 0\r\n dp = [array[0]]\r\n for i in range(1,len(array)):\r\n dp.append(max(dp[i-1]+array[i],array[i]))\r\n return max(dp)\r\nif __name__ == \"__main__\":\r\n array = [2,8,1,5,9]\r\n sol = Solution()\r\n ans = sol.FindGreatestSumOfSubArray(array)\r\n print(ans)\r\n\r\n\r\n\r\n","sub_path":"Q42/Q42.py","file_name":"Q42.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"71204540","text":"# Unit test verifies that one specific aspect\n# of a function is working...\n# A test case is a collecton of unit tests\n# that together verify the funtion works completely.\n\n# To write a test case, import unittest and the function.\n# Then create a class that inherits from unittest.TestCase,\n# and write a series of methods to test different aspects\n# of your functions behavior.\n\n\n\n# This class must inherit from the unittest.TestCase class\n# so python knows how to run the tests you write.\n# Any method that begins with 'test_' will be run automatically\n# when we run this program.\n\nimport unittest\nfrom name_function import get_formatted_name\n\nclass NameTestCase(unittest.TestCase):\n \"\"\" Tests for 'name_function.py' \"\"\"\n\n def test_first_last_name(self):\n \"\"\" Do names like 'Janis Joplin' work? \"\"\"\n formatted_name = get_formatted_name('janis', 'joplin')\n self.assertEqual(formatted_name, 'Janis Joplin')\n\n\n def test_first_middle_last_name(self):\n \"\"\" Do names like 'Wolfgang Amadeus Mozart' work? \"\"\"\n formatted_name = get_formatted_name('wolfgang', 'mozart', 'amadeus')\n self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')\n\nunittest.main()\n\n\n# assertEqual(a, b):: Verify that a == b\n# assertNotEqual(a, b):: Verify that a != b\n# assertTrue(x):: Verify that x is True\n# assertFalse(x):: Verify that x is False\n# assertIn(item, list):: Verify that item is in list\n# assertNotIn(item, list):: Verify that item is not in list\n","sub_path":"imgurex/crash_course/10_Unit_Tests/test_name_function.py","file_name":"test_name_function.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"402500803","text":"#!/usr/bin/env python3.7\n# vim: ft=python\n\"\"\"\nRun a series of benchmarks against a particular Bitcoin Core revision(s).\n\nSee bin/runlocal.sh for a sample invocation.\n\n\"\"\"\nimport argparse\nimport atexit\nimport os\nimport datetime\nimport getpass\nimport traceback\nimport sys\nimport pickle\nfrom pathlib import Path\n\nfrom . import (\n output, config, bitcoind, results, slack, benchmarks, logging, git, sh,\n hwinfo)\nfrom .globals import G\nfrom .logging import get_logger\nfrom .sh import run\n\nlogger = get_logger()\n\nassert sys.version_info >= (3, 7), \"Python 3.7 required\"\n\n# Maintain a lockfile that is global across the host to ensure that we're not\n# running more than one instance on a given system.\nLOCKFILE_PATH = Path(\"/tmp/bitcoin_bench.lock\")\n\n\ndef _startup_assertions(cfg):\n \"\"\"\n Ensure the benchmark environment is suitable in various ways.\n \"\"\"\n if run(\"$(which time) -f %M sleep 0.01\",\n check_returncode=False)[2] != 0:\n raise RuntimeError(\"the time package is required\")\n\n def warn(msg):\n if cfg.safety_checks:\n raise RuntimeError(msg)\n else:\n logger.warning(msg)\n\n if run(\"pgrep --list-name bitcoin | grep -v bitcoinperf\",\n check_returncode=False)[2] == 0:\n warn(\"benchmarks shouldn't run concurrently with unrelated bitcoin \"\n \"processes\")\n\n if cfg.safety_checks:\n run('sudo -n swapoff -a')\n\n if run('cat /proc/swaps | grep -v \"^Filename\"',\n check_returncode=False)[2] != 1:\n warn(\"swap should be disabled during benchmarking\")\n\n if not _try_acquire_lockfile():\n raise RuntimeError(\n \"Couldn't acquire lockfile %s; exiting\", LOCKFILE_PATH)\n\n\ndef run_benches(cfg):\n \"\"\"\n Create a tmp directory in which we will clone bitcoin, build it, and run\n various benchmarks.\n \"\"\"\n logger.info(\n \"Running benchmarks %s with compilers %s\",\n [i[0] for i in cfg.benches if i[1]], cfg.compilers)\n\n _startup_assertions(cfg)\n\n for target in cfg.to_bench:\n os.chdir(cfg.workdir)\n if (cfg.workdir / 'bitcoin').exists():\n sh.rm(cfg.workdir / 'bitcoin')\n\n G.gitco = git.checkout_in_dir(\n cfg,\n target,\n cfg.workdir / 'bitcoin',\n # TODO: pass copy_from_path\n )\n\n for compiler in cfg.compilers:\n G.compiler = compiler\n\n maybe_run_bench_some_times(\n target, cfg,\n cfg.benches.build, benchmarks.Build, always_run=True)\n\n maybe_run_bench_some_times(\n target, cfg, cfg.benches.unittests, benchmarks.MakeCheck)\n\n maybe_run_bench_some_times(\n target, cfg, cfg.benches.functests, benchmarks.FunctionalTests)\n\n maybe_run_bench_some_times(\n target, cfg, cfg.benches.microbench, benchmarks.Microbench)\n\n # Only do the following for gcc (since they're expensive)\n\n maybe_run_bench_some_times(\n target, cfg, cfg.benches.ibd_from_network, benchmarks.IbdReal)\n\n maybe_run_bench_some_times(\n target, cfg, cfg.benches.ibd_from_local, benchmarks.IbdLocal)\n\n maybe_run_bench_some_times(\n target, cfg,\n cfg.benches.ibd_range_from_local, benchmarks.IbdRangeLocal)\n\n maybe_run_bench_some_times(\n target, cfg, cfg.benches.reindex, benchmarks.Reindex)\n\n maybe_run_bench_some_times(\n target, cfg,\n cfg.benches.reindex_chainstate, benchmarks.ReindexChainstate)\n\n\ndef maybe_run_bench_some_times(\n target, cfg, bench_cfg, bench_class, *, always_run=False):\n if not bench_cfg and not always_run:\n logger.info(\"[%s] skipping benchmark\", bench_class.name)\n return\n elif not bench_cfg:\n bench_cfg = config.BenchBuild()\n\n for i in range(getattr(bench_cfg, 'run_count', 1)):\n b = bench_class(cfg, bench_cfg, target, i)\n results.ALL_RUNS.append(b)\n b.wrapped_run(cfg, bench_cfg)\n\n\ndef _try_acquire_lockfile():\n if LOCKFILE_PATH.exists():\n return False\n\n with LOCKFILE_PATH.open('w') as f:\n f.write(\"%s,%s\" % (datetime.datetime.utcnow(), getpass.getuser()))\n G.lockfile_acquired = True\n return True\n\n\ndef _get_shutdown_handler(cfg: config.Config):\n def handler():\n for node in bitcoind.Node.all_instances:\n if node.ps and node.ps.returncode is None:\n node.terminate()\n node.join()\n\n # Release lockfile if we've got it\n if G.lockfile_acquired:\n LOCKFILE_PATH.unlink()\n logger.debug(\"shutdown: removed lockfile at %s\", LOCKFILE_PATH)\n\n # Clean up to avoid filling disk\n # TODO add more granular cleanup options\n if cfg.teardown and cfg.workdir.is_dir():\n os.chdir(cfg.workdir)\n _stash_debug_file(cfg)\n\n # For now only remove the bitcoin subdir, since that'll be far and\n # away the biggest subdir.\n run(\"rm -rf %s\" % (cfg.workdir / 'bitcoin'))\n logger.debug(\"shutdown: removed bitcoin dir at %s\", cfg.workdir)\n elif not cfg.teardown:\n logger.debug(\"shutdown: leaving bitcoin dir at %s\", cfg.workdir)\n\n return handler\n\n\ndef _stash_debug_file(cfg: config.Config):\n \"\"\"\n Throw the last debug file so that we avoid removing it with the\n rest of the bitcoin stuff.\n \"\"\"\n # Move the debug.log file out into /tmp for diagnostics.\n debug_file = cfg.workdir / 'bitcoin' / 'data' / 'debug.log'\n if debug_file.is_file():\n # Overwrite the file so as not to fill up disk.\n debug_file.rename(cfg.workdir / 'stashed-debug.log')\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'yaml_or_pickle_path',\n help=(\n 'path to configuration file (YAML) or a results file (pickle). '\n 'See examples/.'))\n\n args = parser.parse_args()\n arg = args.yaml_or_pickle_path\n cfg = None\n\n if arg.endswith('yml') or arg.endswith('yaml'):\n config_file = Path(sys.argv[1])\n if not config_file.exists():\n print(\".yaml config file required as only argument\",\n file=sys.stderr)\n sys.exit(1)\n\n cfg = config.load(config_file)\n logging.configure_logger(cfg)\n\n if cfg.codespeed:\n results.Reporters.codespeed = results.CodespeedReporter(\n cfg.codespeed)\n\n G.slack = slack.Client(cfg.slack.webhook_url if cfg.slack else '')\n slack.attach_slack_handler_to_logger(cfg, G.slack, logger)\n\n atexit.register(_get_shutdown_handler(cfg))\n\n logger.info(\"Started on host %s (codespeed env %s)\",\n config.HOSTNAME,\n cfg.codespeed.envname if cfg.codespeed else '[none]')\n logger.info(cfg.to_string(pretty=True))\n\n try:\n run_benches(cfg)\n except Exception:\n G.slack.send_to_slack_attachment(\n G.gitco, \"Error\", {},\n text=traceback.format_exc(), success=False)\n raise\n\n logger.info(\"Getting hardware information\")\n hw = hwinfo.get_hwinfo(cfg.workdir, None)\n\n res_dict = {\n 'runs': results.ALL_RUNS,\n 'hwinfo': hw,\n }\n\n try:\n results_path = cfg.results_dir / 'results.pickle'\n results_path.write_bytes(pickle.dumps(res_dict))\n logger.info(\n \"Wrote serialized benchmark results to %s\", results_path)\n except Exception:\n logger.exception(\"failed to pickle results\")\n\n elif arg.endswith('pickle'):\n unpickled = pickle.loads(Path(arg).read_bytes())\n results.ALL_RUNS = unpickled['runs']\n results.HWINFO = unpickled['hwinfo']\n\n grouped = output.GroupedRuns.from_list(results.ALL_RUNS)\n\n if not cfg:\n cfg = list(list(grouped.values())[0].values())[0][0].cfg\n\n if len(cfg.to_bench) <= 1:\n timestr = output.get_times_table(grouped)\n print(timestr)\n else:\n output.print_comparative_times_table(cfg, grouped)\n output.make_plots(cfg, grouped)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"runner/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"59654665","text":"red_meat_file = open('red_meat.txt', 'r')\nred_meat_list = red_meat_file.readlines()\nred_meat_file.close()\ngreen_vegtables_file = open('green_vegtables.txt', 'r')\ngreen_vegtables_list = green_vegtables_file.readlines()\ngreen_vegtables_file.close()\nroots_vegtables_file = open('roots_vegtables.txt', 'r')\nroots_vegtables_list = roots_vegtables_file.readlines()\nroots_vegtables_file.close()\ngrains_file = open('grains.txt', 'r')\ngrains_list = grains_file.readlines()\ngrains_file.close()\nfruits_file = open('fruits.txt', 'r')\nfruits_list = fruits_file.readlines()\nfruits_file.close()\nfish_file = open('fish.txt', 'r')\nfish_list = fish_file.readlines()\nfish_file.close()\nshelfish_file = open('shelfish.txt', 'r')\nshelfish_list = shelfish_file.readlines()\nshelfish_file.close()\ndairy_file = open('dairy.txt', 'r')\ndairy_list = dairy_file.readlines()\ndairy_file.close()\ndrinks_file = open('drinks.txt', 'r')\ndrinks_list = drinks_file.readlines()\ndrinks_file.close()\n\nfood_list = list(my_dict.keys())\npercentage_list = []\nfor each_element in food_list:\n percentage_list.append(my_dict[each_element])\n\nnew_food_list = food_list[0:4]\nnew_percentage_list = percentage_list[0:4]\n\ntimes = []\n\nfor next_food in new_food_list:\n if next_food in red_meat_list:\n times.append(60)\n elif next_food in green_vegtables_list:\n times.append(30)\n elif next_food in grains_list:\n times.append(40)\n elif next_food in fruits_list:\n times.append(20)\n elif next_food in drinks_list:\n times.append(20)\n elif next_food in roots_vegtables_list:\n times.append(20)\n elif next_food in fish_list:\n times.append(20)\n elif next_food in shelfish_list:\n times.append(20)\n elif next_food in dairy_list:\n times.append(20)\n else:\n times.append(30)\n\nmicrowave_times = []\nindex = 0\nfor next_percentage in new_percentage_list:\n microwave_times.append(next_percentage * times[index])\n index += 1\n\npercentage_sum = sum(new_percentage_list)\nmicrowave_time_sum = sum(microwave_times)\nideal_time = microwave_time_sum/percentage_sum\nprint('You should microwave the food ' + ideal_time)\n","sub_path":"src/calc/Calculations.py","file_name":"Calculations.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"268966162","text":"from django.urls import path\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n)\nfrom userApp.views import Register, UserViewSet\n\nurlpatterns = [\n path('login', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n # 로그인 > access/refresh token 발급\n path('register', Register.as_view(), name='register'),\n # 회원가입\n path('refresh', TokenRefreshView.as_view(), name='token_refresh'),\n path('user', UserViewSet.as_view(), name='get_user_info'),\n # 토큰 갱신\n]\n","sub_path":"fireban/userApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"465283731","text":"from django.contrib import messages\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.generic import CreateView, UpdateView\nfrom rest_framework import viewsets\nfrom .forms import PostForm\nfrom .models import Post\nfrom .serializers import PostSerializer\n\n\nclass PostViewSet(viewsets.ModelViewSet):\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n\n\ndef index(request):\n qs = Post.objects.all()\n return render(request, 'blog/index.html', {\n 'post_list': qs,\n })\n\n\nclass PostCreateView(CreateView):\n model = Post\n form_class = PostForm\n success_url = reverse_lazy('blog:index')\n\n def form_valid(self, form):\n messages.success(self.request, 'Saved successfully.')\n return super().form_valid(form)\n\n\npost_new = PostCreateView.as_view()\n\n\nclass PostUpdateView(UpdateView):\n model = Post\n form_class = PostForm\n success_url = reverse_lazy('blog:index')\n\n def form_valid(self, form):\n messages.success(self.request, 'Saved successfully.')\n return super().form_valid(form)\n\npost_edit = PostUpdateView.as_view()\n\n","sub_path":"sample02-django/djproj/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"47574750","text":"from flask import Flask, render_template, request, abort, jsonify\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom sys import argv\nimport time\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite:///test.db\"\n\ndb = SQLAlchemy(app)\n\nprint ('WEBPAGE COMPILED!')\n\n# @app.route('/')\ndef home():\n\treturn render_template ('home.html')\n\nclass User(db.Model):\n\n\t__tablename__ = 'users'\n\tidentity = db.Column(db.String, primary_key = True)\n\tlongitude = db.Column(db.Float, primary_key=True)\n\tlatitude = db.Column(db.Float, primary_key=True)\n\ttimestamp = db.Column(db.Float, primary_key=True)\n\n\tdef __init__(self, identity, longitude, latitude, timestamp):\n\t\tself.identity = identity\n\t\tself.longitude = longitude\n\t\tself.latitude = latitude\n\t\tself.timestamp = timestamp\n\n\n\tdef __repr__(self):\n\t\treturn '' % (self.identity)\n\n\t#def delete(self, identity, timestamp):\n\n# class Party(db.Model):\n\n# \t__tablename__ = 'Parties'\n# \tidentity = db.Column(db.String, primary_key = True)\n# \tname = db.Column(db.String, primary_key = True)\n# \tlongitude = db.Column(db.Float, primary_key = True)\n# \tlatitude = db.Column(db.Float, primary_key = True)\n# \t#timestamp = db.Column(db.Float, primary_key = True)\n# \tclosing_time = db.Column(db.Float,primary_key = True)\n\n# \tdef __init__(self, identity, name, longitude, latitude, closing_time):\n# \t\tself.identity = identity\n# \t\tself.name = name\n# \t\tself.longitude = longitude\n# \t\tself.latitude = latitude\n# \t\tself.closing_time = closing_time\n\n\n@app.route('/')\ndef themap():\n\treturn render_template('geolocation1.html')\n\n@app.route('/start_party')\ndef start_party():\n\treturn render_template('start_party.html')\n\n@app.route('/send_location', methods=['POST'])\ndef handle_send_location():\n\tdata = {\n\t'identity' : request.form['identity'],\n\t'longitude' : request.form['longitude'],\n\t'latitude' : request.form['latitude'],\n\t'timestamp' : time.time()\n\t}\n\n\t# write the data to the database using sqlite\n\tuser = User(identity = data['identity'], longitude = data['longitude'], latitude = data['latitude'], timestamp = data['timestamp'])\n\t#replace????????????????\n\t#user_old = db.session.query(User).filter_by(identity = data['identity']).first()\n\t# user_old = db.session.query(User).filter_by(True).first()\n\t# print ('##############################################', user_old)\n\t# db.session.delete(user_old)\n\tdb.session.query(User).filter(User.identity == data['identity']).delete(synchronize_session = False)\n\tdb.session.add(user)\n\tdb.session.commit()\n\n@app.route('/get_locations')\ndef handle_get_locations():\n\t# read the data from the database using sqlite\n\n\t# format the records you received into list of dictionaries\n\tdata = []\n\n\tusers = db.session.query(User).all()\n\t# Not Filter ????????????????????????\n\t#users = db.session.query(User).filter_by((timestamp - time.time()) < 30)\n\n\tfor user in users:\n\t\t# print time.time()\n\n\t\tdata.append({\n\t\t'longitude': user.longitude,\n\t\t'latitude': user.latitude,\n\t\t'ID': user.identity\n\t\t})\n\n\n\t# return the list of dictionaries as json\n\treturn jsonify(locations=data)\n\ndef handle_create_db():\n\tdb.create_all()\t\n\tdb.session.commit()\n\nif __name__ == '__main__':\n\thandle_create_db()\n\t#app.run(debug = True)\n\n","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"448197305","text":"#!/usr/bin/python\r\n#-*-coding:utf-8-*-\r\n# Author: hongguiting\r\n# Date: 2016-03-08\r\n\r\nimport time\r\nfrom libcom.lib_pub.logging_drv import log_info\r\nfrom libcom.console_drv.console_drv import *\r\nfrom libcom.lib_cmd.basic_cmd import BasicCmd\r\nfrom libcom.config_topo.topo_controller import TopoController\r\nfrom libcom.device_adapt.device_adapt import SwitchAdapter\r\n\r\n__all__ = [\"cintf_pkt_fwd_bas_001\"]\r\n\r\nSUCCESS = 0\r\nFAIL = -1\r\n\r\ndef get_sn(str):\r\n start = str.find('sn')\r\n if (start == -1):\r\n return -1\r\n end = str[start:].find('>')\r\n if (end == -1):\r\n return -1\r\n \r\n sn = str[start + 3 : (start + end)]\r\n return int(sn)\r\n \r\n \r\ndef _cintf_pkt_fwd_update_table_001(cb_arg):\r\n dev_key = cb_arg.dev_names[1] \r\n basic_cmd = BasicCmd(dev_key)\r\n \r\n #手动构造dp_complete通告,刷新广播表、单播表\r\n con = Console(dev_key)\r\n con.run_cmd('show version slots')\r\n basic_cmd.shell()\r\n run_at_mode(dev_key)\r\n run_at_cmd(dev_key, 'load pkt_fwd')\r\n con.write_cmd('ut dp_complete')\r\n time.sleep(1)\r\n con.write_cmd('ut dump_uc')\r\n time.sleep(2)\r\n info = con.read_all()\r\n sn_uc = get_sn(info)\r\n log_info('sn_uc=%d' % sn_uc)\r\n \r\n con.write_cmd('ut dump_bc 0')\r\n time.sleep(2)\r\n info = con.read_all()\r\n sn_bc = get_sn(info)\r\n log_info('sn_bc=%d' % sn_bc)\r\n if (sn_uc != sn_bc):\r\n log_info('pkt fwd table is error')\r\n quit_at_mode(dev_key)\r\n con.write_cmd('exit')\r\n return FAIL\r\n \r\n quit_at_mode(dev_key)\r\n con.write_cmd('exit')\r\n return SUCCESS\r\n\r\ndef cintf_pkt_fwd_bas_001(cb_arg):\r\n if len(cb_arg.dev_names) == 0:\r\n log_info(\"Failed: Need one switch to be test.\")\r\n return FAIL\r\n \r\n dev_name = cb_arg.dev_names[1]\r\n con = Console(dev_name)\r\n con.wake_up()\r\n \r\n result = FAIL\r\n try:\r\n result = _cintf_pkt_fwd_update_table_001(cb_arg)\r\n finally:\r\n con.exit()\r\n return result","sub_path":"cases_set/cintf/pkt_fwd/cintf_pkt_fwd_bas_001.py","file_name":"cintf_pkt_fwd_bas_001.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"629873106","text":"\"\"\"\n @reroes\n Ejemplo de manejo de paquetes\n\"\"\"\n# Salida:\n# Valor 2 elevado a la potencia 2 es igual a 4\nfrom paquete1.informacion import valores\nfrom paquete1.informacion2 import hacer_potencia\n\"\"\" \n\tpara crear paquete, se debe crear siempre\n\tla carpeta \"_init_.py\"\n\"\"\" \n\nfor l in valores:\n r = hacer_potencia(l, 2)\n print(\"%s %d %s %d %s %.0f\" % (\"Valor\", l, \\\n \t\"elevado a la potencia\", 2, \\\n \t\"es igual a\", r))\n","sub_path":"ejercicios-python1/principal.py","file_name":"principal.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"540747016","text":"import pygame\nclass Ship():\n\n def __init__(self,al_settings,screen):\n self.screen = screen\n self.al_settings = al_settings\n self.image = pygame.image.load('images/ship.bmp')\n self.image = pygame.transform.rotate(self.image,270)\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n\n self.rect.left = self.screen_rect.left\n self.rect.centery = self.screen_rect.centery\n self.center_y = float(self.rect.centery)\n\n self.moving_up = False\n self.moving_down = False\n\n def blitme(self):\n self.screen.blit(self.image,self.rect)\n\n def update(self):\n if self.moving_down and self.rect.bottom < self.screen_rect.bottom:\n self.center_y += self.al_settings.ship_speed_factor\n if self.moving_up and self.rect.top > 0:\n self.center_y -= self.al_settings.ship_speed_factor\n\n self.rect.centery = self.center_y\n\n","sub_path":"12/12_5/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"90385749","text":"''' Compute the intersection of two sorted arrays \n\n Instead of going through every element of listB. We can perform Binary\n search in listB. That will improve the time complexity.\n\n check which list is smaller and do binary search on the other.\n \n Time Complexity: O(m log(n)) , here m < n\n m : lenght of listA\n n : length of listB\n'''\n\ndef BinarySearch(item, sorted_list):\n start = 0\n end = len(sorted_list) - 1\n \n while(start <= end):\n \n middle = (start + (end - start) // 2)\n \n if (item < sorted_list[middle]):\n end = middle - 1\n elif (item == sorted_list[middle]):\n return True\n else:\n start = middle + 1\n\n return False\n\ndef compute_intersection_2(a_list, b_list):\n \n result = []\n \n for a in a_list:\n if (BinarySearch(a, b_list) and (len(result) == 0 or a != result[len(result)-1])):\n result.append(a)\n \n return result\n \nlist_a = [2,3,3,5,5,6,7,7,8,12]\nlist_b = [5,5,6,8,8,9,10,10]\n\nintersect_list = compute_intersection_2(list_a,list_b)\nprint(intersect_list)","sub_path":"Sorting/prob_1_2.py","file_name":"prob_1_2.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"80419080","text":"# -*- coding: utf-8 -*-\n\n\n__all__ = ('DEBUG', 'VERSION')\n\n\nDEBUG = False\ndef switch_to_local(endpoint='http://api.local.greendizer.com/'):\n from greendizer.clients import http\n global DEBUG\n http.DEBUG = DEBUG = True\n http.API_ROOT = endpoint\n \ndef switch_to_prod(debug=False, endpoint='https://api.greendizer.com/'):\n from greendizer.clients import http\n global DEBUG\n http.DEBUG = DEBUG = debug\n http.API_ROOT = endpoint\n\nfrom greendizer import version\nVERSION = version.VERSION","sub_path":"greendizer/clients/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"598520117","text":"import LBAMMaterials.LBAMMaterials as mat\nimport LBAMModel.LBAMModel as model\nimport numpy as np\nimport os\nfrom skimage.transform import hough_ellipse\nfrom skimage.draw import ellipse_perimeter\nimport cv2\n\ndef powerEstHoughCircle(circle,I,PP):\n ''' Estimating power using circle found in power density matrix by Hough Circle algorithm\n\n circle : Portion of the Hough Circle results matrix representing one circle. It's an array\n of a list of three values [x,y,radius]\n I : Power density matrix, W/m2\n PP : Pixel Pitch, m\n\n This is designed to be used with Numpy's apply along matrix command as applied to results\n\n Return sums the values within circle and multiplies by the area\n '''\n # create empty mask to draw results on \n mask = np.zeros(I.shape[:2],dtype='uint8')\n # draw filled circle using given parameters\n cv2.circle(mask,(*circle[:2],),circle[2],(255),cv2.FILLED)\n # find where it was drawn\n i,j = np.where(mask==255)\n # sum the power density values in that area and multiply by area\n return np.sum(I[i,j])*(np.pi*(circle[2]*PP)**2.0)\n\n\ndef powerEstBestCircle(I,radii_range,pixel_pitch):\n # normalize image so it can be used by skimage\n I_norm = I/I.max(axis=(0,1))\n # search for circles\n res = hough_circle(I_norm,radii_range)\n # get the top three circles\n accums,cx,cy,radii = hough_circle_peaks(res,radii_range,total_num_peaks=1)\n # choose the highest rated circle to estimate power with\n return powerEstHoughCircle([cx[accums.argmax()],cy[accums.argmax()],radii[accums.argmax()]],I,pixel_pitch)\n\n\nT0 = mat.CelciusToK(23.0)\ne,K,D = mat.buildMaterialData()\nprint(\"Reading in power\")\nQr = model.readH264(\"D:/BEAM/Scripts/LMAP Thermal Data/Example Data - _Tree_/video.h264\")\nT = model.predictTemperature(Qr,e,T0)\nnp.nan_to_num(T,copy=False)\nI = model.predictPowerDensity(T,K,D,T0)\nnp.nan_to_num(I,copy=False)\n\n# laser radius\nr0 = 0.00035 #m\n# laser radius in pixels\npixel_pitch = 20e-6\nr0_p = int(np.ceil(r0/pixel_pitch))\n# assuming gaussian behaviour, 99.7% of values are within 4 std devs of mean\n# used as an upper limit in hough circles\nrmax_gauss = 4*r0_p\n\nrmin_area = np.pi*r0_p**2.0\nradii_range = np.arange(r0_p,64,1)\nprint(\"Normalizing values\")\nI_norm = I/I.max(axis=(0,1))\nI_norm_cv = (I_norm*255).astype('uint8')\n\n# ellipse stats\nellipse_cx = np.zeros(I.shape[2])\nellipse_cx = np.zeros(I.shape[2])\nellipse_a = np.zeros(I.shape[2])\nellipse_b = np.zeros(I.shape[2])\nellipse_theta = np.zeros(I.shape[2])\n\nos.makedirs(\"CannySobel\",exist_ok=True)\nos.makedirs(\"CannySobel/EdgeResults\",exist_ok=True)\nos.makedirs(\"CannySobel/Contours\",exist_ok=True)\nos.makedirs(\"CannySobel/Ellipse\",exist_ok=True)\nprint(\"Starting run\")\nfor ff in range(I.shape[2]):\n sobel_res = np.abs(cv2.Sobel(I_norm_cv[:,:,ff],cv2.CV_8U,1,0,ksize=5))+np.abs(cv2.Sobel(I_norm_cv[:,:,ff],cv2.CV_8U,0,1,ksize=5))\n canny = cv2.Canny(sobel_res,sobel_res.mean(),sobel_res.mean()*2)\n cv2.imwrite(\"CannySobel/EdgeResults/canny-sobel-f{}.png\".format(ff),canny)\n # find contours\n ct,_ = cv2.findContours(canny,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)\n # find ellipse\n minEllipse = []\n # if contours were found\n if ct:\n # if there is a contour\n #print(\"Found {} cts\".format(len(ct)))\n if len(ct) >0:\n mask = np.zeros((*I.shape[:2],3),np.uint8)\n for c in ct:\n cv2.drawContours(mask,c,0,(255,0,0),1)\n cv2.imwrite(\"CannySobel/Contours/canny-sobel-contours-f{}.png\".format(ff),mask)\n if c.shape[0]>5:\n minEllipse.append(cv2.fitEllipse(c))\n #print(\"Found {} ellipses\".format(len(minEllipse)))\n mask[...]=0\n for e in minEllipse:\n cv2.ellipse(mask,e,(0,255,0),2)\n cv2.imwrite(\"CannySobel/Ellipse/canny-sobel-ellipses-f{}.png\".format(ff),mask)\n\n\n \n","sub_path":"03 Scripts/HoughEllipse/houghellipsepest.py","file_name":"houghellipsepest.py","file_ext":"py","file_size_in_byte":3895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"187152914","text":"# -*- coding: utf-8 -*-\r\n'''\r\nCreated on 2018年11月30日\r\n\r\n@author: Zhukun Luo\r\nJiangxi university of finance and economics\r\n'''\r\nfrom env import Env\r\nimport numpy as np\r\nimport time\r\nEPSILON = 0.1\r\nALPHA = 0.1\r\nGAMMA = 0.9\r\nMAX_STEP = 30\r\ne=Env()\r\nQ=np.zeros((e.state_num,4))\r\ndef epsilon_greedy(Q, state):\r\n if (np.random.uniform() > 1 - EPSILON) or ((Q[state, :] == 0).all()):\r\n action = np.random.randint(0, 4) # 0~3\r\n else:\r\n action = Q[state, :].argmax()\r\n return action\r\nfor i in range(200):\r\n e=Env()\r\n while(e.is_end is False) and (e.step= 0 and bindex >= 0:\n\t\tif A[aindex] > B[bindex]:\n\t\t\tA[aend] = A[aindex]\n\t\t\taindex -= 1\n\t\telse:\n\t\t\tA[aend] = B[bindex]\n\t\t\tbindex -= 1\n\t\taend -= 1\n\twhile aindex >= 0:\n\t\tA[aend] = A[aindex]\n\t\taindex -= 1\n\t\taend -= 1\n\twhile bindex >= 0:\n\t\tA[aend] = B[bindex]\n\t\tbindex -= 1\n\t\taend -= 1\n\treturn A\n\ndef less_than_character_count(x, y):\n\tcharacter_set = string.ascii_lowercase\n\tdict1 = {}\n\tdict2 = {}\n\tfor elem in x:\n\t\tif elem in dict1:\n\t\t\tdict1[elem] += 1\n\t\telse:\n\t\t\tdict1[elem] = 1\n\tfor elem in y:\n\t\tif y in dict2:\n\t\t\tdict2[elem] += 1\n\t\telse:\n\t\t\tdict2[elem] = 1\n\tfor char in character_set:\n\t\tif char in dict1 and char in dict2:\n\t\t\tif dict1[char] < dict2[char]:\n\t\t\t\treturn 1\n\t\t\telif dict1[char] > dict2[char]:\n\t\t\t\treturn -1\n\t\telif char in dict1:\n\t\t\treturn 1\n\t\telif char in dict2:\n\t\t\treturn -1\n\treturn 0\n\nclass CircusPerson:\n\tdef __init__(self, height, weight):\n\t\tself.height = height\n\t\tself.weight = weight\n\tdef __lt__(self, other):\n\t\treturn self.height < other.height and self.weight < other.weight\n\tdef __gt__(self, other):\n\t\treturn self.height > other.height and self.weight > other.weight\n\tdef __eq__(self, other):\n\t\treturn self.height == other.height and self.weight == other.weight\n\tdef __le__(self, other):\n\t\treturn self.height <= other.height and self.weight <= other.weight\n\tdef __ge__(self, other):\n\t\treturn self.height >= other.height and self.weight >= other.weight\n\tdef __ne__(self, other):\n\t\treturn self.height != other.height and self.weight == other.weight\n\tdef __str__(self):\n\t\treturn \"(\"+str(round(self.height,1))+\",\"+str(round(self.weight,1))+\")\"\n\nfrom random import shuffle\n\ndef shuffle_word(word):\n word = list(word)\n shuffle(word)\n return ''.join(word)\n\ndef binary_search(A, val, lower, upper): #Performs binary search on a sorted array A\n\t# print(\"lower: \", lower)\n\t# print(\"upper: \", upper)\n\tif lower == upper and A[lower] != val:\n\t\treturn -1\n\tmidpoint = (upper + lower) // 2\n\tprint(\"midpoint: \", midpoint)\n\tif A[midpoint] == val:\n\t\treturn midpoint\n\telif A[midpoint] > val:\n\t\tprint(\"going left\")\n\t\treturn binary_search(A, val, lower, midpoint)\n\telse:\n\t\tprint(\"going right\")\n\t\treturn binary_search(A, val, midpoint, upper)\n\ndef main():\n\tA = [1,3,5,7,9,0,0,0,0,0]\n\tB = [2,4,6,8,10]\n\tprint(merge(A,B,5))\n\n\tstr1 = \"abcdefg\"\n\tstr2 = \"gfedcba\"\n\tprint(less_than_character_count(str1,str2))\n\tprint(less_than_character_count(\"a\",\"aa\"))\n\tprint(less_than_character_count(\"ab\",\"aa\"))\n\tprint(less_than_character_count(\"aab\",\"bba\"))\n\n\tdef cmp_to_key(mycmp):\n\t\t# 'Convert a cmp= function into a key= function'\n\t\tclass K:\n\t\t\tdef __init__(self, obj, *args):\n\t\t\t\tself.obj = obj\n\t\t\tdef __lt__(self, other):\n\t\t\t\treturn mycmp(self.obj, other.obj) < 0\n\t\t\tdef __gt__(self, other):\n\t\t\t\treturn mycmp(self.obj, other.obj) > 0\n\t\t\tdef __eq__(self, other):\n\t\t\t\treturn mycmp(self.obj, other.obj) == 0\n\t\t\tdef __le__(self, other):\n\t\t\t\treturn mycmp(self.obj, other.obj) <= 0\n\t\t\tdef __ge__(self, other):\n\t\t\t\treturn mycmp(self.obj, other.obj) >= 0\n\t\t\tdef __ne__(self, other):\n\t\t\t\treturn mycmp(self.obj, other.obj) != 0\n\t\treturn K\n\n\tL = ['foo', 'biology', 'sequence']\n\tL = [shuffle_word(word) for word in L]\n\tL += [shuffle_word(word) for word in L]\n\tprint(L)\n\tL = sorted(L, key=cmp_to_key(less_than_character_count))\n\tprint(L)\n\n\toneToTwenty = [i for i in range(1,22)]\n\tfor elem in oneToTwenty:\n\t\tprint(elem)\n\t\tprint(binary_search(oneToTwenty, elem, 0, len(oneToTwenty)))\n\n\tprint(\"==================================================\")\n\n\timport numpy as np \n\tmuHeight, sigmaHeight = 170, 10\n\tmuWeight, sigmaWeight = 140, 30\n\theights = np.random.normal(muHeight, sigmaHeight, 3)\n\tweights = np.random.normal(muWeight, sigmaWeight, 3)\n\tpeople = []\n\tfor i in range(len(heights)):\n\t\tperson = CircusPerson(heights[i], weights[i])\n\t\tpeople += [person]\n\tprint([str(person) for person in people])\n\tpeople = sorted(people)\n\tprint([str(person) for person in people])\n\tpeopleGrouped = []\n\tcurrentGroup = []\n\tgroupCount = 1\n\tfor i in range(len(people)):\n\t\tif i == len(people) - 1:\n\t\t\tif len(currentGroup) > 0:\n\t\t\t\tpeopleGrouped += [currentGroup + [people[-1]]]\n\t\t\telse:\n\t\t\t\tpeopleGrouped += [[people[-1]]]\n\t\telse:\n\t\t\tif people[i] < people[i+1]:\n\t\t\t\tcurrentGroup += [people[i]]\n\t\t\t\tpeopleGrouped += [currentGroup]\n\t\t\t\tcurrentGroup = []\n\t\t\t\tgroupCount += 1\n\t\t\telse:\n\t\t\t\tcurrentGroup += [people[i]]\n\n\tprint(len(peopleGrouped))\n\tprint([[str(person) for person in group] for group in peopleGrouped])\n\t# for group in peopleGrouped:\n\t# \tprint([str(person) for person in group])\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"data_structures_and_algorithms/Sorting.py","file_name":"Sorting.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"494436758","text":"# O(n) time | O(n) space\ndef twoNumberSum(array, targetSum):\n\tnums = set()\n\t\n\tfor curr in array:\n\t\tpotentialMatch = targetSum - curr\n\t\t\n\t\tif potentialMatch in nums:\n\t\t\treturn [potentialMatch, curr]\n\t\telse:\n\t\t\tnums.add(curr)\n\t\t\t\n\treturn []","sub_path":"May/twoNumberSum.py","file_name":"twoNumberSum.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"184061947","text":"#\n# @lc app=leetcode.cn id=146 lang=python3\n#\n# [146] LRU缓存机制\n#\n# https://leetcode-cn.com/problems/lru-cache/description/\n#\n# algorithms\n# Medium (44.32%)\n# Likes: 299\n# Dislikes: 0\n# Total Accepted: 23.8K\n# Total Submissions: 53.5K\n# Testcase Example: '[\"LRUCache\",\"put\",\"put\",\"get\",\"put\",\"get\",\"put\",\"get\",\"get\",\"get\"]\\n' +\n# '[[2],[1,1],[2,2],[1],[3,3],[2],[4,4],[1],[3],[4]]'\n#\n# 运用你所掌握的数据结构,设计和实现一个  LRU (最近最少使用) 缓存机制。它应该支持以下操作: 获取数据 get 和 写入数据 put 。\n# \n# 获取数据 get(key) - 如果密钥 (key) 存在于缓存中,则获取密钥的值(总是正数),否则返回 -1。\n# 写入数据 put(key, value) -\n# 如果密钥不存在,则写入其数据值。当缓存容量达到上限时,它应该在写入新数据之前删除最近最少使用的数据值,从而为新的数据值留出空间。\n# \n# 进阶:\n# \n# 你是否可以在 O(1) 时间复杂度内完成这两种操作?\n# \n# 示例:\n# \n# LRUCache cache = new LRUCache( 2 /* 缓存容量 */ );\n# \n# cache.put(1, 1);\n# cache.put(2, 2);\n# cache.get(1); // 返回 1\n# cache.put(3, 3); // 该操作会使得密钥 2 作废\n# cache.get(2); // 返回 -1 (未找到)\n# cache.put(4, 4); // 该操作会使得密钥 1 作废\n# cache.get(1); // 返回 -1 (未找到)\n# cache.get(3); // 返回 3\n# cache.get(4); // 返回 4\n# \n# \n# @lc code=start\nclass LRUCache(object):\n def __init__(self, capacity: int):\n self.capacity = capacity\n self.node_hash = {}\n self.head_node = ListNode(key=-1, value=-1, previous_node=None, next_node=None)\n self.tail_node = ListNode(key=-1, value=-1, previous_node=None, next_node=None)\n self.head_node.next_node = self.tail_node\n self.tail_node.previous_node = self.head_node\n\n\n def __len__(self):\n return len(self.node_hash.items())\n\n\n def remove_before_tail_node(self) -> ListNode:\n target_node = self.tail_node.previous_node\n self.tail_node.previous_node = target_node.previous_node\n target_node.previous_node.next_node = self.tail_node\n target_node.previous_node = None\n target_node.next_node = None\n return target_node\n \n \n def get(self, key: int) -> int:\n if key in self.node_hash.keys():\n target_node = self.node_hash[key]\n target_node.previous_node.next_node = target_node.next_node\n target_node.next_node.previous_node = target_node.previous_node\n target_node.previous_node = self.head_node\n target_node.next_node = self.head_node.next_node\n self.head_node.next_node.previous_node = target_node\n self.head_node.next_node = target_node\n return target_node.value\n else:\n return -1\n \n \n def put(self, key: int, value: int) -> None:\n if key in self.node_hash.keys():\n target_node = self.node_hash[key]\n target_node.value = value\n target_node.previous_node.next_node = target_node.next_node\n target_node.next_node.previous_node = target_node.previous_node\n target_node.previous_node = self.head_node\n target_node.next_node = self.head_node.next_node\n self.head_node.next_node.previous_node = target_node\n self.head_node.next_node = target_node\n return\n else:\n new_node = ListNode(key, value)\n if len(self) < self.capacity:\n self.node_hash[key] = new_node\n new_node.previous_node = self.head_node\n new_node.next_node = self.head_node.next_node\n self.head_node.next_node.previous_node = new_node\n self.head_node.next_node = new_node\n return\n else:\n target_node = self.remove_before_tail_node()\n del self.node_hash[target_node.key]\n self.node_hash[key] = new_node\n new_node.previous_node = self.head_node\n new_node.next_node = self.head_node.next_node\n self.head_node.next_node.previous_node = new_node\n self.head_node.next_node = new_node\n return\n \n\n\nclass ListNode(object):\n def __init__(self, key: int, value: int, previous_node=None, next_node=None):\n self.key = key\n self.value = value\n self.previous_node = previous_node\n self.next_node = next_node\n\n# Your LRUCache object will be instantiated and called as such:\n# obj = LRUCache(capacity)\n# param_1 = obj.get(key)\n# obj.put(key,value)\n# @lc code=end\n\n# if __name__ == '__main__':\n# cache = LRUCache(2)\n# cache.put(1, 1)\n# cache.put(2, 2)\n# cache.get(1) \n# cache.put(3, 3)\n # cache.get(2)\n # cache.put(4, 4) \n # cache.get(1)\n # cache.get(3) \n # cache.get(4) ","sub_path":"python/146.lru缓存机制.py","file_name":"146.lru缓存机制.py","file_ext":"py","file_size_in_byte":4955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"10976378","text":"# -*- coding:utf-8 -*-\n\n'''\nRating for post.\n'''\n\nimport peewee\nfrom torcms.core import tools\nfrom torcms.model.core_tab import TabLog\nfrom config import CMS_CFG, DB_CFG\nfrom torcms.model.abc_model import Mabc, MHelper\n\n\nclass MLog(Mabc):\n '''\n 用户日志\n '''\n\n @staticmethod\n def insert_data(userid, postid, kind):\n '''\n Inert new record.\n '''\n uid = tools.get_uuid()\n TabLog.create(\n uid=uid,\n post_id=postid,\n user_id=userid,\n kind=kind,\n time_create=tools.timestamp()\n )\n return uid\n\n @staticmethod\n def query_pager_by_user(userid, current_page_num=1):\n '''\n Query pager\n '''\n return TabLog.select().where(TabLog.user_id == userid).order_by(\n TabLog.time_create.desc()\n ).paginate(\n current_page_num, CMS_CFG['list_num']\n )\n\n @staticmethod\n def query_all_user(current_page_num=1):\n '''\n Query pager\n '''\n return TabLog.select().distinct(TabLog.user_id).order_by(\n TabLog.user_id\n ).paginate(\n current_page_num, CMS_CFG['list_num']\n )\n\n @staticmethod\n def total_number():\n '''\n Return the number of certian slug.\n '''\n return TabLog.select().count()\n\n @staticmethod\n def count_of_certain(user_id):\n recs = TabLog.select().where(TabLog.user_id == user_id)\n\n return recs.count()\n\n @staticmethod\n def get_by_uid(uid):\n '''\n return the record by uid\n '''\n return MHelper.get_by_uid(TabLog, uid)\n\n @staticmethod\n def get_retention_time_by_id(uid, user_id):\n current_rec = MLog.get_by_uid(uid)\n recs = TabLog.select().where(\n (TabLog.user_id == user_id) &\n (TabLog.time_create > current_rec.time_create)\n ).order_by(TabLog.time_create)\n if recs.count():\n return recs.get()\n return None\n","sub_path":"torcms/model/log_model.py","file_name":"log_model.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"313663107","text":"#python does allocate memory automatically\r\n\r\na = 1 # here a is variable and 1 is data or value\r\nb =44 \r\nc = a+b # expression / logic\r\n\r\nprint('sum of two numbers : ',c) # outuput \r\n\r\na ='1'\r\n\r\n","sub_path":"mylearning.py","file_name":"mylearning.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"177971134","text":"# Copyright 2014 The Bazel Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Once nested repositories work, this file should cease to exist.\n\nload(\"@io_bazel_rules_go//go/private:common.bzl\", \"MINIMUM_BAZEL_VERSION\")\nload(\"@io_bazel_rules_go//go/private:skylib/lib/versions.bzl\", \"versions\")\nload(\"@io_bazel_rules_go//go/private:nogo.bzl\", \"DEFAULT_NOGO\", \"go_register_nogo\")\nload(\"@io_bazel_rules_go//go/platform:list.bzl\", \"GOOS_GOARCH\")\nload(\"@io_bazel_rules_go//proto:gogo.bzl\", \"gogo_special_proto\")\nload(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\")\nload(\"@bazel_tools//tools/build_defs/repo:git.bzl\", \"git_repository\")\n\ndef go_rules_dependencies():\n \"\"\"See /go/workspace.rst#go-rules-dependencies for full documentation.\"\"\"\n if getattr(native, \"bazel_version\", None):\n versions.check(MINIMUM_BAZEL_VERSION, bazel_version = native.bazel_version)\n\n # Was needed by Gazelle in the past. Will likely be needed for go/packages\n # and analysis in the future.\n _maybe(\n http_archive,\n name = \"org_golang_x_tools\",\n # master, as of 2019-01-15\n urls = [\"https://codeload.github.com/golang/tools/zip/bf090417da8b6150dcfe96795325f5aa78fff718\"],\n strip_prefix = \"tools-bf090417da8b6150dcfe96795325f5aa78fff718\",\n type = \"zip\",\n patches = [\n \"@io_bazel_rules_go//third_party:org_golang_x_tools-gazelle.patch\",\n \"@io_bazel_rules_go//third_party:org_golang_x_tools-extras.patch\",\n ],\n patch_args = [\"-p1\"],\n # gazelle args: -go_prefix golang.org/x/tools\n )\n\n # Proto dependencies\n _maybe(\n git_repository,\n name = \"com_github_golang_protobuf\",\n remote = \"https://github.com/golang/protobuf\",\n commit = \"aa810b61a9c79d51363740d207bb46cf8e620ed5\", # v1.2.0, as of 2018-09-28\n patches = [\n \"@io_bazel_rules_go//third_party:com_github_golang_protobuf-gazelle.patch\",\n \"@io_bazel_rules_go//third_party:com_github_golang_protobuf-extras.patch\",\n ],\n patch_args = [\"-p1\"],\n # gazelle args: -go_prefix github.com/golang/protobuf -proto disable_global\n )\n\n # bazel_skylib is a dependency of com_google_protobuf.\n # Nothing in rules_go may depend on bazel_skylib, since it won't be declared\n # when go/def.bzl is loaded. The vendored copy of skylib in go/private/skylib\n # may be used instead.\n _maybe(\n http_archive,\n name = \"bazel_skylib\",\n sha256 = \"54ee22e5b9f0dd2b42eb8a6c1878dee592cfe8eb33223a7dbbc583a383f6ee1a\",\n strip_prefix = \"bazel-skylib-0.6.0\",\n urls = [\"https://github.com/bazelbuild/bazel-skylib/archive/0.6.0.zip\"],\n type = \"zip\",\n )\n _maybe(\n http_archive,\n name = \"com_google_protobuf\",\n strip_prefix = \"protobuf-3.6.1.3\",\n sha256 = \"9510dd2afc29e7245e9e884336f848c8a6600a14ae726adb6befdb4f786f0be2\",\n # v3.6.1.3 as of 2019-01-15\n urls = [\"https://github.com/protocolbuffers/protobuf/archive/v3.6.1.3.zip\"],\n type = \"zip\",\n )\n _maybe(\n git_repository,\n name = \"com_github_mwitkow_go_proto_validators\",\n remote = \"https://github.com/mwitkow/go-proto-validators\",\n commit = \"0950a79900071e9f3f5979b78078c599376422fd\", # master, as of 2019-01-15\n patches = [\"@io_bazel_rules_go//third_party:com_github_mwitkow_go_proto_validators-gazelle.patch\"],\n patch_args = [\"-p1\"],\n # gazelle args: -go_prefix github.com/mwitkow/go-proto-validators -proto disable\n )\n _maybe(\n git_repository,\n name = \"com_github_gogo_protobuf\",\n remote = \"https://github.com/gogo/protobuf\",\n commit = \"4cbf7e384e768b4e01799441fdf2a706a5635ae7\", # v1.2.0, as of 2019-01-15\n patches = [\"@io_bazel_rules_go//third_party:com_github_gogo_protobuf-gazelle.patch\"],\n patch_args = [\"-p1\"],\n # gazelle args: -go_prefix github.com/gogo/protobuf -proto legacy\n )\n _maybe(\n gogo_special_proto,\n name = \"gogo_special_proto\",\n )\n\n # GRPC dependencies\n _maybe(\n git_repository,\n name = \"org_golang_x_net\",\n remote = \"https://github.com/golang/net\",\n commit = \"915654e7eabcea33ae277abbecf52f0d8b7a9fdc\", # master as of 2019-01-15\n patches = [\"@io_bazel_rules_go//third_party:org_golang_x_net-gazelle.patch\"],\n patch_args = [\"-p1\"],\n # gazelle args: -go_prefix golang.org/x/net\n )\n _maybe(\n git_repository,\n name = \"org_golang_x_text\",\n remote = \"https://github.com/golang/text\",\n commit = \"f21a4dfb5e38f5895301dc265a8def02365cc3d0\", # v0.3.0, latest as of 2019-01-15\n patches = [\"@io_bazel_rules_go//third_party:org_golang_x_text-gazelle.patch\"],\n patch_args = [\"-p1\"],\n # gazelle args: -go_prefix golang.org/x/text\n )\n _maybe(\n git_repository,\n name = \"org_golang_x_sys\",\n remote = \"https://github.com/golang/sys\",\n commit = \"2be51725563103c17124a318f1745b66f2347acb\", # master as of 2019-01-15\n patches = [\"@io_bazel_rules_go//third_party:org_golang_x_sys-gazelle.patch\"],\n patch_args = [\"-p1\"],\n # gazelle args: -go_prefix golang.org/x/sys\n )\n _maybe(\n git_repository,\n name = \"org_golang_google_grpc\",\n remote = \"https://github.com/grpc/grpc-go\",\n commit = \"df014850f6dee74ba2fc94874043a9f3f75fbfd8\", # v1.17.0, latest as of 2019-01-15\n patches = [\n \"@io_bazel_rules_go//third_party:org_golang_google_grpc-gazelle.patch\",\n \"@io_bazel_rules_go//third_party:org_golang_google_grpc-crosscompile.patch\",\n ],\n patch_args = [\"-p1\"],\n # gazelle args: -go_prefix google.golang.org/grpc -proto disable\n )\n _maybe(\n git_repository,\n name = \"org_golang_google_genproto\",\n remote = \"https://github.com/google/go-genproto\",\n commit = \"db91494dd46c1fdcbbde05e5ff5eb56df8f7d79a\", # master as of 2019-01-15\n patches = [\"@io_bazel_rules_go//third_party:org_golang_google_genproto-gazelle.patch\"],\n patch_args = [\"-p1\"],\n # gazelle args: -go_prefix google.golang.org/genproto -proto disable_global\n )\n _maybe(\n http_archive,\n name = \"go_googleapis\",\n # master as of 2019-01-17\n urls = [\"https://codeload.github.com/googleapis/googleapis/zip/0ac60e21a1aa86c07c1836865b35308ba8178b05\"],\n strip_prefix = \"googleapis-0ac60e21a1aa86c07c1836865b35308ba8178b05\",\n type = \"zip\",\n patches = [\n \"@io_bazel_rules_go//third_party:go_googleapis-directives.patch\",\n \"@io_bazel_rules_go//third_party:go_googleapis-gazelle.patch\",\n \"@io_bazel_rules_go//third_party:go_googleapis-fix.patch\",\n ],\n patch_args = [\"-p1\"],\n )\n\n # Needed for examples\n _maybe(\n git_repository,\n name = \"com_github_golang_glog\",\n remote = \"https://github.com/golang/glog\",\n commit = \"23def4e6c14b4da8ac2ed8007337bc5eb5007998\", # master as of 2019-01-15\n patches = [\"@io_bazel_rules_go//third_party:com_github_golang_glog-gazelle.patch\"],\n patch_args = [\"-p1\"],\n # gazelle args: -go_prefix github.com/golang/glog\n )\n _maybe(\n git_repository,\n name = \"com_github_kevinburke_go_bindata\",\n remote = \"https://github.com/kevinburke/go-bindata\",\n commit = \"06af60a4461b70d84a2b173d92f9f425d78baf55\", # v3.11.0, latest as of 2019-01-15\n patches = [\"@io_bazel_rules_go//third_party:com_github_kevinburke_go_bindata-gazelle.patch\"],\n patch_args = [\"-p1\"],\n # gazelle args: -go_prefix github.com/kevinburke/go-bindata\n )\n\n # This may be overridden by go_register_toolchains, but it's not mandatory\n # for users to call that function (they may declare their own @go_sdk and\n # register their own toolchains).\n _maybe(\n go_register_nogo,\n name = \"io_bazel_rules_nogo\",\n nogo = DEFAULT_NOGO,\n )\n\ndef _maybe(repo_rule, name, **kwargs):\n if name not in native.existing_rules():\n repo_rule(name = name, **kwargs)\n","sub_path":"go/private/repositories.bzl","file_name":"repositories.bzl","file_ext":"bzl","file_size_in_byte":8668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"442793563","text":"from __future__ import absolute_import, division\nfrom rich import print, pretty\npretty.install()\n\nimport cv2\nimport numpy as np\n\ndef compute_errors(gt, pred):\n NA_mask = gt <= 0\n thresh_map = np.maximum((gt / pred), (pred / gt))\n diff_map = (gt - pred)\n pix_test = diff_map > 3\n val_test = diff_map > gt * 0.05\n outliers_map = np.logical_and(pix_test, val_test)\n\n abs_rel_map = np.abs(gt - pred) / gt\n\n diff_map[NA_mask] = 0\n abs_rel_map[NA_mask] = 0\n outliers_map[NA_mask] = 0\n\n\n\n mask = gt > 0\n pred = pred[mask]\n gt = gt[mask]\n \"\"\"Computation of error metrics between predicted and ground truth depths\n \"\"\"\n thresh = np.maximum((gt / pred), (pred / gt))\n a1_map = thresh < 1.25\n a1 = a1_map.mean()\n\n a2_map = thresh < 1.25**2\n a2 = a2_map.mean()\n\n a3_map = thresh < 1.25**3\n a3 = a3_map.mean()\n\n diff = (gt - pred)\n rmse = np.sqrt((diff ** 2).mean())\n\n rmse_log = (np.log(gt) - np.log(pred)) ** 2\n rmse_log = np.sqrt(rmse_log.mean())\n\n abs_rel = np.abs(gt - pred) / gt\n abs_rel = np.mean(abs_rel)\n\n sq_rel = np.mean(((gt - pred) ** 2) / gt)\n\n diff = np.abs(gt - pred)\n pix_test = diff > 3\n val_test = diff > gt*0.05\n outliers = np.logical_and(pix_test, val_test)\n percent_outlier = np.sum(outliers) /outliers.size\n\n return ((percent_outlier, abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3),\n (outliers_map, abs_rel_map, diff_map, thresh_map))\n\n\ndef evaluate(gt_depths, pred_disps):\n \"\"\"Evaluates a pretrained model using a specified test set\n \"\"\"\n\n errors = []\n error_maps = []\n\n if type(gt_depths) != np.ndarray:\n gt_depths = gt_depths.detach().cpu().numpy()\n\n if type(pred_disps) != np.ndarray:\n pred_disps = pred_disps.detach().cpu().numpy()\n\n if len(gt_depths.shape) == 2:\n gt_depths = gt_depths[np.newaxis, ...]\n\n if len(pred_disps.shape) == 2:\n pred_disps = pred_disps[np.newaxis, ...]\n\n for i in range(pred_disps.shape[0]):\n\n gt_depth = gt_depths[i]\n gt_height, gt_width = gt_depth.shape[-2], gt_depth.shape[-1] #NCHW\n\n pred_disp = pred_disps[i]\n ratio = gt_width / pred_disp.shape[1]\n pred_disp = cv2.resize(pred_disp, (gt_width, gt_height)) * ratio\n\n\n\n\n scores, maps = compute_errors(gt_depth, pred_disp)\n\n\n percent_outlier, abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3 = scores\n\n # resized_maps = []\n # for m in maps:\n # resized_maps.append(m.reshape(gt_height, gt_width))\n\n outliers_map, abs_rel_map, diff_map, thresh_map = maps\n\n\n out = {}\n out[\"d_all\"] = percent_outlier\n # out[\"abs_rel\"] = abs_rel\n # out[\"sq_rel\"] = sq_rel\n out[\"rmse\"] = rmse\n # out[\"rmse_log\"] = rmse_log\n # out[\"a1\"] = a1\n # out[\"a2\"] = a2\n # out[\"a3\"] = a3\n errors.append(out)\n\n out_maps = {}\n out_maps[\"outliers_map\"] = outliers_map\n out_maps[\"abs_rel_map\"] = abs_rel_map\n out_maps[\"diff_map\"] = diff_map\n # out_maps[\"thresh_map\"] = thresh_map\n\n error_maps.append(out_maps)\n\n return errors, error_maps\n\n\n","sub_path":"utils/kitti_eval.py","file_name":"kitti_eval.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"484726996","text":"'''\nBenjamin Horn\nHomework for Week 4\n-os walk\n- string work\n- command line parsing\n- file size\n- find largest and smallest files\n9/14/2015\n'''\n\nimport os, sys, getopt, operator\n\ntemp_dir = '.' # Fill so it will run if no dir given (-d)\ntemp_key = 'tkinter'# Fill so it will run if no key given (-k)\ntemp_num = '1' # give smallest and largest 1 as default\nopts, args = getopt.getopt(sys.argv[1:],\"hd:k:n:\")\nfor opt, arg in opts:\n if opt == '-h': # option help\n print('-d directory -k keyword -n number -h help')\n print('default: -d .')\n print('default: -k tkinter')\n print('number: -n 1')\n sys.exit() # do not run defaults if they call for help\n elif opt in (\"-d\"): # option give directory\n temp_dir = arg \n elif opt in (\"-k\"): # option give keyword\n temp_key = arg\n elif opt in (\"-n\"): #option smallest and largest files\n temp_num = arg\nprint('Directory is ', temp_dir)\nprint('Keyword is ', temp_key)\n\ncount = 0\nfile_size = [] # empty to store te file and the temp_dict = {'file_name': 'file', 'size': 10}\n\nfor (dirs, subs, fils) in os.walk(temp_dir):\n for each in fils:\n temp_file = os.path.join(dirs,each)\n try:\n if str(temp_key) in (open(temp_file).read()):\n temp_dict = {'file_name': temp_file, 'file_size': os.path.getsize(temp_file)}\n file_size.append(temp_dict)\n count = count + 1\n except:\n pass\nprint('In the directory,',temp_dir,',the keyword',temp_key,'is found,',count,'times')\n\n'''\nAfter this showing how to get the largest and smallest files\n'''\n\nnew_list = sorted(file_size, key=operator.itemgetter('file_size'))\nprint()\nprint('. . . . . . . . . . . .')\ntemp_num = int(temp_num)\nprint(temp_num,'Smallest Files')\nfor i in range(0, temp_num):\n print(new_list[i])\n\nprint()\nprint()\n\nprint(temp_num,'Largets Files')\nfor i in range(len(new_list)-1,len(new_list) - temp_num -1, -1):\n print(new_list[i])\n \n","sub_path":"School/Week4HW.BDH.py","file_name":"Week4HW.BDH.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}