diff --git "a/2507.jsonl" "b/2507.jsonl" new file mode 100644--- /dev/null +++ "b/2507.jsonl" @@ -0,0 +1,750 @@ +{"seq_id":"105019412","text":"####################################################################################################\n#\n# Utility functions that might be useful across different files and scripts.\n#\n####################################################################################################\n\nfrom itertools import zip_longest\nfrom sklearn.utils import shuffle\n\n\ndef print_separator(text: str) -> None:\n \"\"\"Print a nice separator to the console with the given text.\n\n Arguments:\n text -- The texts that should be printed under the separator.\n \"\"\"\n print(\"\")\n print(f\"\\033[91m{'=' * 80}\\033[00m\")\n print(f\"\\033[91m= > {text}\\033[00m\")\n\n\ndef extract_unique_tokens(lexicon: dict) -> list:\n \"\"\"Extract all unique tokens from a lexicon and return it as list.\n\n The lexicon is expected to have the format returned by the `prepare_lexicons` function.\n\n Arguments:\n lexicon -- A dictionary containing all the tokens that should be extracted into a unique list of\n tokens.\n \"\"\"\n all_tokens = []\n\n # Collect all possible tokens from the provided lexicon\n for test_type, test_lexicons in lexicon.items():\n for words in test_lexicons.values():\n all_tokens.extend(words[0][-1])\n\n # Remove duplicates and return as list\n return list(set(all_tokens))\n\n\ndef build_word_embedding_cache(lexicon: dict, embedding_model) -> dict:\n \"\"\"Retrieve the word vectors for all tokens in the provided lexicons and cache them.\n\n Return the cache as a dictionary.\n This should decrease the access times in cases where the vectors are requested multiple hundred\n times. Since the runs should all have the same tokens, use the last index of the first run and\n extract all tokens.\n\n Arguments:\n lexicon -- A dictionary containing the tokens that should be cached. Expects the lexcion to be\n in a specific format, as returned by the lexicon preparation function\n `prepare_lexicons`.\n embedding_model -- The model that should be used to retrieve the word vectors.\n \"\"\"\n unique_tokens = extract_unique_tokens(lexicon)\n\n # Save embeddings for all tokens in the lexicon as a dict\n return {key: embedding_model[key] for key in unique_tokens}\n\n\ndef _determine_combined_lexicon_eval_lengths(\n lexicons: list,\n step_size: int,\n allow_different_lengths: bool) -> list:\n \"\"\"Determine the lengths at which the given lexicons should be evaluated at.\n\n Return a list of lexicon sizes.\n\n The lexicon sizes will start at `step_size` and end at the combined length of all given\n lexicons.\n\n Arguments:\n lexicons -- The lexcions for which the evlauation lengths should be determined.\n step_size -- The number of words to be added to each lexicon at each step.\n allow_different_lengths -- Whether the evaluation allows for differently sized lexicon lengths\n to be used or not.\n \"\"\"\n lexicon_lengths = [len(lex) for lex in lexicons]\n\n # If different lengths are allowed, the maximum lexicon size will be the combination of all\n # words in all lexicons and thus the sum of all lexicons lengths. Otherwise, we need to use the\n # length of the smallest lexicon and cut all other lexicons to its size and thus have a maximum\n # length of min-length * #lexicons.\n if allow_different_lengths:\n max_lexicon_size = sum(lexicon_lengths)\n else:\n max_lexicon_size = min(lexicon_lengths) * len(lexicons)\n\n # Since we are combining all given lexicons later on, we need to increase the step size here to\n # be the product of the #lexicons with the provided step size.\n combined_step_size = step_size * len(lexicons)\n\n lexicon_eval_lengths = list(range(combined_step_size, max_lexicon_size + 1, combined_step_size))\n\n # In cases where the division of the maximum length by the step size does leave a rest we need\n # to add the last lexicon length step manually; we would miss it otherwise\n if max_lexicon_size % combined_step_size > 0:\n lexicon_eval_lengths.append(max_lexicon_size)\n\n return lexicon_eval_lengths\n\n\ndef _determine_lexicon_eval_lengths(\n lexicons: list,\n step_size: int,\n allow_different_lengths: bool) -> list:\n \"\"\"Determine the lengths at which the given lexicons should be evaluated at.\n\n Return a list of lexicon sizes.\n\n The lexicon sizes will start at `step_size` and end at the length of the longest lexicon if\n different lengths are allowed and the length of the shortest one otherwise.\n\n Arguments:\n lexicons -- The lexcions for which the evlauation lengths should be determined.\n step_size -- The number of words to be added to each lexicon at each step.\n allow_different_lengths -- Whether the evaluation allows for differently sized lexicon lengths\n to be used or not.\n \"\"\"\n lexicon_lengths = [len(lex) for lex in lexicons]\n\n # If different lengths are allowed, the maximum lexicon size will be the size of the longest\n # lexicon. Otherwise, we need to use the length of the smallest lexicon and cut all other\n # lexicons to its size.\n if allow_different_lengths:\n max_lexicon_size = max(lexicon_lengths)\n else:\n max_lexicon_size = min(lexicon_lengths)\n\n lexicon_eval_lengths = list(range(step_size, max_lexicon_size + 1, step_size))\n\n # In cases where the division of the total length by the step size does leave a rest we need to\n # add the last lexicon length step manually; we would miss it otherwise\n if max_lexicon_size % step_size > 0:\n lexicon_eval_lengths.append(max_lexicon_size)\n\n return lexicon_eval_lengths\n\n\ndef prepare_combined_lexicons(\n lexicon_1: list,\n lexicon_2: list,\n shuffled_runs: int,\n step_size: int,\n lowercase: bool,\n allow_different_lengths: bool = False) -> list:\n \"\"\"Combine and prepare a two lists of tokens for the metric evaluation.\n\n Return a list of shuffled runs for a lexicon that is created by combining all given ones. This\n method ensures that each partial lexicon (parts of the full lexicons at different runs) will\n contain roughly the same number of tokens from each of the lexicons.\n\n Arguments:\n lexicon_1 -- The first list of tokens that should be prepared.\n lexicon_2 -- The second list of tokens that should be prepared.\n shuffled_runs -- How many shuffled lexcions to prepare. Each run will have a different shuffle.\n step_size -- By which size to increase the lexicons for each test.\n lowercase -- Whether the tokens should be lowercased or not.\n allow_different_lengths -- Whether to allow for different lexicon lengths or not. If `False`,\n all lexicons will be trimed to the size of the shortest one.\n \"\"\"\n # Lowercase the provided tokens, if required\n prepared_lexicons = [[\n t.lower() for t in inner]\n for inner in [lexicon_1, lexicon_2]] if lowercase else [lexicon_1, lexicon_2]\n shortest_lexicon = min([len(lex) for lex in prepared_lexicons])\n\n lexicon_lengths = _determine_combined_lexicon_eval_lengths(\n prepared_lexicons, step_size, allow_different_lengths)\n\n # All runs of the final lexicon\n lexicon_runs = []\n\n for run in range(0, shuffled_runs):\n # All partial lexicons of the current run (including the full lexicon at the end)\n shuffled_partials = []\n\n if allow_different_lengths:\n # Zip all lexicons into tuples and flatten the list of tuples afterwards.\n # For shorter lists, the later tuples contain \"None\" as value, which will basically\n # lead to a final, flattened list where the later tokens are from the longer lexicon\n # exclusively (before shuffling that is).\n lexicons_zip = zip_longest(*prepared_lexicons, fillvalue=None)\n lexicons_unpkg = [\n t for t_tuple in lexicons_zip for t in t_tuple if t is not None]\n\n # Shuffle the combined and flattened lexicons\n shuffled_lexicons_unpkg = shuffle(lexicons_unpkg, random_state=run)\n else:\n # Trim each of the given lexicons to the size of the shortest lexicon and shuffle\n # them individually\n shuffled_lexicons = [\n shuffle(lexicon[:shortest_lexicon], random_state=run)\n for lexicon in prepared_lexicons]\n\n # Zip all shuffled lexicons into tuples and flatted the list of tuples afterwards\n shuffled_lexicons_zip = zip(*shuffled_lexicons)\n shuffled_lexicons_unpkg = [t for t_tuple in shuffled_lexicons_zip for t in t_tuple]\n\n # Split the final shuffled and combined lexicon into multiple partials\n for length in lexicon_lengths:\n shuffled_partials.append(shuffled_lexicons_unpkg[:length])\n\n lexicon_runs.append(shuffled_partials)\n\n # Return a the runs for a single lexicon\n return lexicon_runs\n\n\ndef prepare_lexicons(\n lexicon_1: list,\n lexicon_2: list,\n shuffled_runs: int,\n step_size: int,\n lowercase: bool,\n allow_different_lengths: bool = False) -> list:\n \"\"\"Prepare a two lists of tokens for the metric evaluation.\n\n Return a tuple of all given lexicons that were shuffled and split separately.\n\n Arguments:\n lexicon_1 -- The first list of tokens that should be prepared.\n lexicon_2 -- The second list of tokens that should be prepared.\n shuffled_runs -- How many shuffled lexcions to prepare. Each run will have a different shuffle.\n step_size -- By which size to increase the lexicons for each test.\n lowercase -- Whether the tokens should be lowercased or not.\n allow_different_lengths -- Whether to allow for different lexicon lengths or not. If `False`,\n all lexicons will be trimed to the size of the shortest one. If\n `True` the sizes of the lists will be increased proportional to their\n relative length to each other. The shortest list will use\n the defined step size, while the longer lists will use a step size\n that makes them grow proportionally, so that the relative length\n difference is the same for all test runs.\n \"\"\"\n # Lowercase the provided tokens, if required\n prepared_lexicons = [[\n t.lower() for t in inner]\n for inner in [lexicon_1, lexicon_2]] if lowercase else [lexicon_1, lexicon_2]\n\n lexicon_lengths = _determine_lexicon_eval_lengths(\n prepared_lexicons, step_size, allow_different_lengths)\n\n # List of all lexicons, where each element holds the runs for one of them\n all_lexicon_runs = []\n\n for lexicon in prepared_lexicons:\n # All runs of the current lexicon\n lexicon_runs = []\n\n for run in range(0, shuffled_runs):\n # All partial lexicons of the current run (including the full lexicon at the end)\n shuffled_partials = []\n\n # Shuffle first, then split the shuffled lexicon into multiple partials\n # If different lengths are not allowed, we need to additionally trim the lexicons\n # to the length of the shortest before shuffling to make sure that all runs contain\n # the same vocabulary.\n if allow_different_lengths:\n shuffled_lexicon = shuffle(lexicon, random_state=run)\n else:\n shuffled_lexicon = shuffle(lexicon[:lexicon_lengths[-1]], random_state=run)\n # Due to the working of the list indexing `:length`, this will also ensure that each\n # run has always the same number of steps. In case of a shorter lexicon, the\n # `:length` will just \"overshoot\" it at add the full lexicon to the list again\n # (which is what we want). In cases where we want the lexicons to have the same size\n # we need to take care of trimming them to that size above and this part still works\n # the same.\n for length in lexicon_lengths:\n shuffled_partials.append(shuffled_lexicon[:length])\n\n lexicon_runs.append(shuffled_partials)\n\n all_lexicon_runs.append(lexicon_runs)\n\n # Return a tuple of all lexicons, each containing the runs for each lexicon\n return tuple(all_lexicon_runs)\n","sub_path":"webias/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"379647755","text":"from django.shortcuts import get_object_or_404\nfrom django.db import IntegrityError\nfrom rest_framework.response import Response\nfrom .models import Device, DeviceOwner\nfrom .serializers import CreateDeviceSerializer, DeviceSerializer, UpdateDeviceSerializer, DeviceRetrieveSerializer\nfrom rest_framework import permissions\nfrom rest_framework import viewsets, status, mixins\nfrom django.db import transaction\nimport hashlib\nfrom django.core.files.storage import default_storage\nfrom iedcs.settings.base import BASE_DIR\nfrom django.core.files.base import ContentFile\nfrom geoip import geolite2\nfrom security.primes import Primes\n\n\nclass DeviceViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin, mixins.UpdateModelMixin,\n mixins.CreateModelMixin, viewsets.GenericViewSet):\n queryset = Device.objects.filter()\n serializer_class = DeviceSerializer\n\n def get_permissions(self):\n return permissions.IsAuthenticated(),\n\n def list(self, request, *args, **kwargs):\n \"\"\"\n B{List} user devices\n B{URL:} ../api/v1/player/devices/\n \"\"\"\n devices = DeviceOwner.objects.filter(owner=request.user)\n serializer = self.serializer_class([device.device for device in devices], many=True)\n return Response(serializer.data)\n\n def create(self, request, *args, **kwargs):\n \"\"\"\n B{Create} a device\n B{URL:} ../api/v1/player/devices/\n\n :type unique_identifier: str\n :param unique_identifier: the device unique identifier\n :type host_name: str\n :param host_name: the host name\n :type cpu_model: str\n :param cpu_model: the device cpu model\n :type op_system: str\n :param op_system: the device op system\n :type ip: str\n :param ip: the device ip\n :type country: str\n :param country: the device country\n :type timezone: str\n :param timezone: the device timezone\n :type public_key: str\n :param public_key: the public_key\n \"\"\"\n serializer = CreateDeviceSerializer(data=request.data)\n\n if serializer.is_valid():\n public_key = serializer.data[\"public_key\"]\n\n m = hashlib.md5()\n m.update(serializer.data[\"unique_identifier\"])\n folder_name = m.hexdigest()\n\n path = default_storage.save(BASE_DIR+'/media/devices/' + folder_name + '/device_pub.key',\n ContentFile(public_key))\n\n c = geolite2.lookup(serializer.data[\"ip\"]).country\n try:\n with transaction.atomic():\n if Device.objects.filter(unique_identifier=serializer.data[\"unique_identifier\"]).count() is 1:\n device = Device.objects.get(unique_identifier=serializer.data[\"unique_identifier\"])\n\n if DeviceOwner.objects.filter(device=device, owner=request.user).count() is 1:\n return Response({'status': 'Already created',\n 'message': 'The device has been already registered'},\n status=status.HTTP_400_BAD_REQUEST)\n else:\n DeviceOwner.objects.create(device=device, owner=request.user)\n\n return Response({'status': 'Associated',\n 'message': 'The device has been associated'},\n status=status.HTTP_201_CREATED)\n else:\n device = Device.objects.create(unique_identifier=serializer.data[\"unique_identifier\"],\n cpu_model=serializer.data[\"cpu_model\"],\n op_system=serializer.data[\"op_system\"],\n ip=serializer.data[\"ip\"],\n country=c,\n time=serializer.data[\"time\"],\n host_name=serializer.data[\"host_name\"],\n public_key=path)\n\n DeviceOwner.objects.create(device=device, owner=request.user)\n\n return Response({'status': 'Created',\n 'message': 'The device has been registered'},\n status=status.HTTP_201_CREATED)\n\n except IntegrityError:\n return Response({'status': 'Bad request',\n 'message': 'The device can\\'t be added!'},\n status=status.HTTP_400_BAD_REQUEST)\n\n return Response({'status': 'Bad Request',\n 'message': serializer.errors},\n status=status.HTTP_400_BAD_REQUEST)\n\n def update(self, request, *args, **kwargs):\n \"\"\"\n B{Update} the device\n B{URL:} ../api/v1/player/devices//\n\n :type unique_identifier: str\n :param unique_identifier: the unique identifier\n :type host_name: str\n :param host_name: the host name\n :type cpu_model: str\n :param cpu_model: the device cpu model\n :type op_system: str\n :param op_system: the device op system\n :type ip: str\n :param ip: the device ip\n :type country: str\n :param country: the device country\n :type timezone: str\n :param timezone: the device timezone\n \"\"\"\n serializer = UpdateDeviceSerializer(data=request.data)\n\n if serializer.is_valid():\n device = get_object_or_404(Device.objects.all(), unique_identifier=serializer.validated_data['unique_identifier'])\n get_object_or_404(DeviceOwner.objects.all(), owner=request.user, device=device)\n\n device.cpu_model = serializer.validated_data['cpu_model']\n device.op_system = serializer.validated_data['op_system']\n device.ip = serializer.validated_data['ip']\n device.time = serializer.validated_data['time']\n device.host_name = serializer.validated_data['host_name']\n device.save()\n Primes.changePrimes()\n\n n = Primes.generateN(Primes.g, Primes.p)\n\n return Response({'status': 'Updated',\n 'g': Primes.g,\n 'p': Primes.p,\n 'n': n,\n 'message': 'The device has been updated.'},\n status=status.HTTP_200_OK)\n\n else:\n return Response({'status': 'Bad request',\n 'message': serializer.errors},\n status=status.HTTP_400_BAD_REQUEST)\n\n\nclass DeviceRetrieveView(mixins.CreateModelMixin, viewsets.GenericViewSet):\n queryset = Device.objects.filter()\n serializer_class = DeviceRetrieveSerializer\n\n def get_permissions(self):\n return permissions.IsAuthenticated(),\n\n def create(self, request, *args, **kwargs):\n \"\"\"\n B{Retrieve} a device\n B{URL:} ../api/v1/player/retrieveDevice/\n\n :type unique_identifier: str\n :param unique_identifier: the device unique identifier\n \"\"\"\n\n serializer = DeviceRetrieveSerializer(data=request.data)\n if serializer.is_valid():\n\n device = get_object_or_404(Device.objects.all(), unique_identifier=serializer.data[\"unique_identifier\"])\n get_object_or_404(DeviceOwner.objects.all(), owner=request.user, device=device)\n serializer = self.serializer_class(device)\n return Response(serializer.data)\n\n return Response({'status': 'Bad Request',\n 'message': serializer.errors},\n status=status.HTTP_400_BAD_REQUEST)\n","sub_path":"src/iedcs-server/players/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"190855550","text":"# Runtime: 60 ms, faster than 80.10% of Python3 online submissions for Construct Binary Tree from Preorder and Inorder Traversal.\n# Memory Usage: 17.8 MB, less than 86.84% of Python3 online submissions for Construct Binary Tree from Preorder and Inorder Traversal.\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:\n i_dict = {str(inorder[i]): i for i in range(len(inorder))}\n \n def build_tree(in_left, in_right, pre_idx):\n if in_left > in_right:\n return None\n val = preorder[pre_idx]\n in_idx = i_dict[str(val)]\n root = TreeNode(val)\n left_mid = pre_idx + 1\n if 0 <= left_mid < len(preorder):\n root.left = build_tree(in_left, in_idx - 1, left_mid)\n right_mid = pre_idx + 1 + (in_idx - in_left)\n if 0 <= right_mid < len(preorder):\n root.right = build_tree(in_idx + 1, in_right, right_mid)\n return root\n \n return build_tree(0, len(inorder)-1, 0)","sub_path":"0105_Construct_Binary_Tree_from_Preorder_and_Inorder_Traversal.py","file_name":"0105_Construct_Binary_Tree_from_Preorder_and_Inorder_Traversal.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"41188475","text":"'''\nThis file defines 4 terms\nLYRICS_LOC - the location of where our music sheets are\nFILTER_WORDS - words that we don't want as part of our lyrics\n note: this set is very large\nCOR_WD_FILE - filename of corpus words\nWD_BY_FILE - filename of words by file\n'''\n\nCOR_WD_FILE = \"../Backend/corpus_words.csv\"\nWD_BY_FILE = \"../Backend/words_by_file.csv\"\nLYRICS_LOC = \"../Resources/\"\nFILTER_WORDS = {\n \"BRIDGE\",\n \"REPEAT\",\n \"PRE-CHORUS\",\n \"PRECHORUS\",\n \"CHORUS\",\n \"INTRO\",\n \"VERSE\",\n \"MELODY\",\n \"CODA\",\n \"[\",\n \"]\",\n \"|\",\n \"{\",\n \"}\",\n \"1123432\",\n \"HARMONY\",\n \"BROTHERS\",\n \"SISTERS\",\n \"B\",\n \"S\",\n \"HARMONIZE\",\n \"INSTRUMENTAL\",\n \"RIFF\",\n \"INST\",\n \"INSTR\",\n \"FULL\",\n \"CHORDS\",\n \"-\",\n}\n\nfor i in range(2, 7):\n for rep in (\"x\", \"X\"):\n FILTER_WORDS.add(str(i) + rep)\n FILTER_WORDS.add(rep + str(i))\n\nfor word in list(FILTER_WORDS):\n FILTER_WORDS.add(word + \":\")\n FILTER_WORDS.add(word + \"...\")\n FILTER_WORDS.add(\":\" + word)\n FILTER_WORDS.add(\"(\" + word + \")\")\n FILTER_WORDS.add(\"[\" + word + \"]\")\n\nbase_keys = (\"A\", \"B\", \"C\", \"D\", \"E\", \"F\",\"G\")\nmodifications = (\"\", \"#\", \"b\", \"s\", \"m\", \"M\", \"sus\", \"2\", \"7\")\nall_keys = []\n\nfor key in base_keys:\n for modification in modifications:\n all_keys.append(key + modification)\n for modification_2 in modifications:\n all_keys.append(key + modification + modification_2)\n for modification_3 in modifications:\n all_keys.append(key + modification + modification_2 + modification_3)\n\nfor key in all_keys:\n FILTER_WORDS.add(key)\n\nfor key in base_keys:\n for alt_key in all_keys:\n FILTER_WORDS.add(key + \"/\" + alt_key)\n FILTER_WORDS.add(alt_key + \"/\" + key)\n\nfor i in range(30):\n i_s = str(i)\n FILTER_WORDS.add(i_s)\n FILTER_WORDS.add(i_s + \"#\")\n for j in range(10):\n j_s = str(j)\n FILTER_WORDS.add(i_s + \"/\" + j_s)\n\n","sub_path":"Backend/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"460571414","text":"from datetime import datetime, timedelta, timezone\nfrom dateutil import parser, tz\n# workaround as feegen raise error: AttributeError: module 'lxml' has no attribute 'etree'\nfrom lxml import etree\nfrom feedgen import util\nfrom feedgen.feed import FeedGenerator\nfrom google.cloud import storage\nfrom gql import gql, Client\nfrom gql.transport.requests import RequestsHTTPTransport\nfrom json import JSONDecoder\nimport __main__\nimport argparse\nimport gzip\nimport hashlib\nimport yaml\n\nprint(f'[{__main__.__file__}] executing...')\n\nCONFIG_KEY = 'config'\nGRAPHQL_CMS_CONFIG_KEY = 'graphqlCMS'\nNUMBER_KEY = 'number'\n\nyaml_parser = argparse.ArgumentParser(\n description='Process configuration of generate_google_news_rss')\nyaml_parser.add_argument('-c', '--config', dest=CONFIG_KEY,\n help='config file for generate_google_news_rss', metavar='FILE', type=str)\nyaml_parser.add_argument('-g', '--config-graphql', dest=GRAPHQL_CMS_CONFIG_KEY,\n help='graphql config file for generate_google_news_rss', metavar='FILE', type=str, required=True)\nyaml_parser.add_argument('-m', '--max-number', dest=NUMBER_KEY,\n help='number of feed items', metavar='75', type=int, required=True)\nargs = yaml_parser.parse_args()\n\nwith open(getattr(args, CONFIG_KEY), 'r') as stream:\n config = yaml.safe_load(stream)\nwith open(getattr(args, GRAPHQL_CMS_CONFIG_KEY), 'r') as stream:\n config_graphql = yaml.safe_load(stream)\nnumber = getattr(args, NUMBER_KEY)\n\n__gql_transport__ = RequestsHTTPTransport(\n url=config_graphql['apiEndpoint'],\n use_json=True,\n headers={\n 'Content-type': 'application/json',\n },\n verify=True,\n retries=3,\n)\n\n__gql_client__ = Client(\n transport=__gql_transport__,\n fetch_schema_from_transport=True,\n)\n\n__seven_days_ago__ = datetime.now(timezone.utc) - timedelta(days=7)\n\n# To retrieve the latest post published after a specified day\n__qgl_post_template__ = '''\n{\n allPosts(where: %s, sortBy: publishTime_DESC, first: %d) {\n name\n slug\n briefHtml\n contentHtml\n heroImage {\n urlOriginal\n name\n }\n categories {\n name\n slug\n }\n relatedPosts {\n name\n slug\n }\n writers {\n name\n }\n publishTime\n updatedAt\n }\n}\n'''\n\n__gql_query__ = gql(__qgl_post_template__ %\n (config['postWhereFilter'], number))\n__result__ = __gql_client__.execute(__gql_query__)\n\n__config_feed__ = config['feed']\n# the timezone for rss\n__timezone__ = tz.gettz(__config_feed__['timezone'])\n\nfg = FeedGenerator()\nfg.load_extension('media', atom=False, rss=True)\nfg.load_extension('dc', atom=False, rss=True)\nfg.title(__config_feed__['title'])\nfg.description(__config_feed__['description'])\nfg.id(__config_feed__['id'])\nfg.pubDate(datetime.now(timezone.utc).astimezone(__timezone__))\nfg.updated(datetime.now(timezone.utc).astimezone(__timezone__))\nfg.image(url=__config_feed__['image']['url'],\n title=__config_feed__['image']['title'], link=__config_feed__['image']['link'])\nfg.rights(rights=__config_feed__['copyright'])\nfg.link(href=__config_feed__['link'], rel='alternate')\nfg.ttl(__config_feed__['ttl']) # 5 minutes\nfg.language('zh-TW')\n\n__base_url__ = config['baseURL']\n\n__json_decoder__ = JSONDecoder()\n\nfor item in __result__['allPosts']:\n guid = hashlib.sha224((__base_url__+item['slug']).encode()).hexdigest()\n fe = fg.add_entry(order='append')\n fe.id(guid)\n fe.title(item['name'])\n fe.link(href=__base_url__+item['slug'], rel='alternate')\n fe.guid(guid)\n fe.pubDate(util.formatRFC2822(\n parser.isoparse(item['publishTime']).astimezone(__timezone__)))\n fe.updated(util.formatRFC2822(\n parser.isoparse(item['updatedAt']).astimezone(__timezone__)))\n content = ''\n\n brief = item['briefHtml']\n if brief is not None:\n fe.description(description=brief, isSummary=True)\n content += brief\n if item['heroImage'] is not None:\n fe.media.content(\n content={'url': item['heroImage']['urlOriginal'], 'medium': 'image'}, group=None)\n content += '\"%s\"' % (\n item['heroImage']['urlOriginal'], item['heroImage']['name'])\n if item['contentHtml'] is not None:\n content += item['contentHtml']\n if len(item['relatedPosts']) > 0:\n content += __config_feed__['item']['relatedPostPrependHtml']\n for related_post in item['relatedPosts'][:3]:\n content += '
%s' % (\n __base_url__+related_post['slug'], related_post['name'])\n fe.content(content=content, type='CDATA')\n fe.category(\n list(map(lambda c: {'term': c['name'], 'label': c['name']}, item['categories'])))\n if item['writers'] is not None:\n fe.dc.dc_creator(creator=list(\n map(lambda w: w['name'], item['writers'])))\n\n\ndef upload_data(bucket_name: str, data: bytes, content_type: str, destination_blob_name: str):\n '''Uploads a file to the bucket.'''\n # bucket_name = 'your-bucket-name'\n # data = 'storage-object-content'\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n blob.content_encoding = 'gzip'\n print(f'[{__main__.__file__}] uploadling data to gs://{bucket_name}{destination_blob_name}')\n blob.upload_from_string(\n data=gzip.compress(data=data, compresslevel=9), content_type=content_type, client=storage_client)\n blob.content_language = 'zh'\n blob.cache_control = 'max-age=300,public'\n blob.patch()\n\n print(\n f'[{__main__.__file__}] finished uploading gs://{bucket_name}{destination_blob_name}')\n\n\n# Instantiates a client\n__storage_client__ = storage.Client()\n\n__file_config__ = config['file']\n# The name for the new bucket\n__bucket_name__ = __file_config__['gcsBucket']\n\n# rss folder path\n__rss_base__ = __file_config__['filePathBase']\n\nprint(f'[{__main__.__file__}] generated rss: {fg.rss_str(pretty=False, extensions=True,encoding=\"UTF-8\", xml_declaration=True).decode(\"UTF-8\")}')\n\nupload_data(\n bucket_name=__bucket_name__,\n data=fg.rss_str(pretty=False, extensions=True,\n encoding='UTF-8', xml_declaration=True),\n content_type='application/rss+xml; charset=utf-8',\n destination_blob_name=__rss_base__ +\n f'/{__file_config__[\"filenamePrefix\"]}.{__file_config__[\"extension\"]}'\n)\n\nprint(f'[{__main__.__file__}] exiting... goodbye...')\n","sub_path":"feed/yahoo_rss/generate_yahoo_rss.py","file_name":"generate_yahoo_rss.py","file_ext":"py","file_size_in_byte":6546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"336039057","text":"from Vertex import Vertex\r\nfrom Edge import Edge\r\nimport json\r\n\r\n\r\nclass Graph(object):\r\n \r\n def __init__(self):\r\n self.vertices = []\r\n self.edges = []\r\n self.sources = []\r\n self.unvisited = []\r\n \r\n def getVertex(self, name):\r\n \"\"\" Go through the list of vertices in graph looking for name in info\r\n Return the vertex matching the name \"\"\"\r\n for x in self.vertices:\r\n if x.info['name'] == name:\r\n return x\r\n \r\n def addVertex(self, info):\r\n \"\"\" Creates a new vertex with the info given and adds it to the graph \"\"\"\r\n v = Vertex(info)\r\n self.vertices.append(v)\r\n \r\n def addEdge(self, info):\r\n \"\"\" Adds a new edge to the graph based off the info given \"\"\" \r\n v1 = self.getMetro(info['ports'][0])\r\n v2 = self.getMetro(info['ports'][1])\r\n\r\n e = Edge(v1, v2, info) \r\n self.edges.append(e)\r\n v1.edges.append(e)\r\n v2.edges.append(e)\r\n \r\n def removeVertex(self, v):\r\n # Delete all edges in neighbors that include v\r\n \"\"\"\r\n for e in v.edges:\r\n if e.start == v: # Out bound edge\r\n for edge in e.end.edges:\r\n if (edge.end == v or edge.start == v):\r\n e.end.edges.remove(edge)\r\n else: # In bound edge\r\n for edge in e.start.edges:\r\n if (edge.end == v or edge.start == v):\r\n e.start.edges.remove(edge)\r\n \"\"\"\r\n for u in self.vertices:\r\n for e in u.edges:\r\n if (e.start is v or e.end is v):\r\n u.edges.remove(e)\r\n \r\n # Remove v and its edges\r\n for e in v.edges:\r\n self.edges.remove(e)\r\n \r\n self.vertices.remove(v)\r\n \r\n def removeEdge(self, v1, v2):\r\n for e in v1.edges:\r\n if e.start is v2 or e.end is v2: # Found the edge to remove\r\n v1.edges.remove(e)\r\n self.edges.remove(e)\r\n \r\n for e in v2.edges:\r\n if e.start is v1 or e.end is v1: # Found the edge to remove\r\n v2.edges.remove(e)\r\n \r\n \r\n def add2WayEdge(self, e):\r\n \"\"\" Adds two edges to the edges list. The second added edge is the first edge reversed\"\"\"\r\n route2Data = {'ports': [e.info['ports'][1], e.info['ports'][0]], 'distance': e.info['distance']}\r\n e2 = Edge(e.end, e.start, route2Data)\r\n \r\n self.edges.append(e)\r\n self.edges.append(e2)\r\n \r\n # Add the edges to the edges list and to the vertices' edge list\r\n e.start.addEdge(e)\r\n e.start.addEdge(e2)\r\n e.end.addEdge(e)\r\n e.end.addEdge(e2)\r\n\r\n def getMetro(self, code):\r\n \"\"\" Takes in a metro code and returns the vertex matching it \r\n Input the city code to be searched for \"\"\"\r\n for x in self.vertices:\r\n if code == x.info['code']:\r\n return x\r\n\r\n def shortestPath(self, source, destination):\r\n for v in self.vertices:\r\n v.distance = 41000\r\n v.previous = None\r\n \r\n source.distance = 0\r\n unvisited = list(self.vertices)\r\n \r\n while (len(unvisited) != 0):\r\n unvisited = sorted(unvisited, key=lambda vertex: vertex.distance) \r\n \r\n # Get the vertex with the smallest distance from source\r\n v = unvisited.pop(0)\r\n \r\n # Look through all neighbors\r\n for e in v.edges:\r\n u = e.end\r\n if (u in unvisited and u != v): \r\n # Calculate potential new distance and compare\r\n alt = v.distance + e.getLength()\r\n if alt < u.distance: # New shortest path\r\n u.distance = alt\r\n u.previous = v\r\n \r\n # Get the shortest path from source to destination\r\n path = []\r\n path.append(destination)\r\n prev = destination.previous\r\n while (prev is not source):\r\n path.append(prev)\r\n prev = prev.previous\r\n path.append(source)\r\n return path\r\n \r\n def jsonSourceExists(self, data):\r\n for x in self.sources:\r\n if x == data:\r\n return True\r\n return False\r\n \r\n def jsonMetroExists(self, data):\r\n \r\n for x in self.vertices:\r\n if x.info == data:\r\n return True\r\n return False\r\n \r\n def jsonRouteExists(self, data): \r\n for x in self.edges:\r\n if x.info == data:\r\n return True\r\n return False\r\n \r\n def parseData(self, fileName):\r\n \"\"\" Goes through the JSON file and creates objects for the routes and metros \"\"\"\r\n json_data = open(fileName)\r\n self.data = json.load(json_data)\r\n json_data.close()\r\n \r\n for x in self.data['data sources']:\r\n if not self.jsonSourceExists(x):\r\n self.sources.append(x)\r\n \r\n for x in self.data['metros']:\r\n if not self.jsonMetroExists(x):\r\n self.vertices.append(Vertex(x))\r\n \r\n for x in self.data['routes']:\r\n if not self.jsonRouteExists(x):\r\n v1 = self.getMetro(x['ports'][0])\r\n v2 = self.getMetro(x['ports'][1])\r\n edge = Edge(v1, v2, x)\r\n self.add2WayEdge(edge)","sub_path":"Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":5579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"449964105","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\nfrom datetime import datetime\n\n# Create your models here.\n# 필요한 모델: User, Profile(if needed), Genre, Calendar, Event, Schedule\n\nclass User(AbstractUser):\n pass\n\nclass Genre(models.Model):\n name = models.CharField(max_length=20)\n\ndef cal_batch():\n now = int(\"{:%Y}\".format(datetime.now()))\n return now - 1994\n\nclass Profile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n genre = models.ManyToManyField(Genre, related_name='genrers', related_query_name='genrer')\n batch = models.IntegerField(default = cal_batch())\n\nclass Calendar(models.Model):\n name = models.CharField(max_length=20)\n user = models.OneToOneField(User, on_delete=models.CASCADE, blank=True, null=True) # 내 캘린더 딱 하나 소유 가능!\n\n def __str__(self):\n return self.id + \": \" + self.name\n\nclass Event(models.Model):\n calendarId = models.ManyToManyField(Calendar, related_name=\"schedules\", related_query_name=\"schedule\") # 여러 캘린더에 속해있을 수 있음\n owner = models.ForeignKey(User, on_delete=models.CASCADE, related_name='my_schedules', related_query_name='my_schedule')\n title = models.CharField(max_length=30)\n body = models.CharField(max_length=5000) # 추후 이미지도 들어갈 수 있나 확인해봐야 함\n media = models.FileField(upload_to='schedules/')\n genre = models.ManyToManyField(Genre, related_name='genre_schedules', related_query_name='genre_schedule') # 여러 장르 가질 수 있음\n \n def __str__(self):\n return str(self.calendarId) + \": \" + self.title\n\n# class RecurrenceRule(models.Model):\n# FREQUENCY_CHOICES = [('Daily', 'Daily'), ('Weekly', 'Weekly'), ('Monthly', 'Monthly'), ('Yearly', 'Yearly')]\n# BY_DAY_CHOICIES = [('SUN', 'SUN'), ('MON', 'MON'), ('TUE', 'TUE'), ('WED', 'WED'), ('THU', 'THU'), ('FRI', 'FRI'), ('SAT', 'SAT')]\n# start = models.DateTimeField()\n# end = models.DateTimeField(blank=True, null=True)\n# count = models.IntegerField(blank=True, null=True)\n# frequency = models.CharField(choices = FREQUENCY_CHOICES, default='Weekly', max_length=30)\n# interval = models.IntegerField(default = 1)\n# byweeknumber = models.BooleanField(default = False)\n# byday = models.CharField(choices = BY_DAY_CHOICIES, max_length=3)\n\nclass Schedule(models.Model):\n event = models.ForeignKey(Event, on_delete=models.CASCADE, related_name='events', related_query_name='event')\n start = models.DateTimeField()\n end = models.DateTimeField()\n location = models.CharField(max_length=30, blank=True, null=True)\n subtitle = models.CharField(max_length=1000, blank=True, null=True)\n # recurrenceRule = models.OneToOneField(RecurrenceRule, on_delete=models.SET_NULL, null=True, blank=True)\n isAllDay = models.BooleanField(default=False)\n isPrivate = models.BooleanField(default=False) # 나만 볼 일정인지\n isOpen = models.BooleanField(default=True) # 컨디 외부에 공개될 행사인지, 컨디 회원만 확인할 수 있는 행사인지\n\n def __str__(self):\n return str(self.event) + \": \" + str(self.start)\n\n# FREQUENCY_CHOICES = [('Daily', 'Daily'), ('Monthly', 'Weekly'), ('Monthly', 'Monthly'), ('Yearly', 'Yearly')]\n# BY_DAY_CHOICIES = [('SUN', '일요일'), ('MON', '월요일'), ('TUE', '화요일'), ('WED', '수요일'), ('THU', '목요일'), ('FRI', '금요일'), ('SAT', '토요일')]\n# COUNT : 총 몇 회 반복\n# INTERVAL : 몇 주마다 반복\n# Note that! 무한 반복 없음!\n ","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"85356184","text":"#!/usr/bin/env python3\n\"\"\"\nCompute coloured image to visualize optical flow file `.flo`\nAccording to the matlab code of Deqing Sun and c++ source code of Daniel Scharstein\nContact: dqsun@cs.brown.edu\nContact: schar@middlebury.edu\nUpdated to python3.7 etc. by Celyn Walters\nContact: c.walters@surrey.ac.uk\n\nOriginal author: Johannes Oswald, Technical University Munich\nContact: johannes.oswald@tum.de\n\nFor more information, check http://vision.middlebury.edu/flow/\n\"\"\"\nimport argparse\nfrom pathlib import Path\nimport cv2\nimport sys\nimport numpy as np\n\n# ==================================================================================================\ndef makeColorwheel() -> np.array:\n\t\"\"\"\n\tColor encoding scheme adapted from the color circle idea described at http://members.shaw.ca/quadibloc/other/colint.htm\n\n\tReturns:\n\t\tnp.array: Colorwheel\n\t\"\"\"\n\tRY = 15\n\tYG = 6\n\tGC = 4\n\tCB = 11\n\tBM = 13\n\tMR = 6\n\n\tncols = RY + YG + GC + CB + BM + MR\n\tcolorwheel = np.zeros([ncols, 3]) # R, G, B\n\n\tcol = 0\n\t# RY\n\tcolorwheel[0:RY, 0] = 255\n\tcolorwheel[0:RY, 1] = np.floor(255 * np.arange(0, RY, 1) / RY)\n\tcol += RY\n\n\t# YG\n\tcolorwheel[col:YG + col, 0] = 255 - np.floor(255 * np.arange(0, YG, 1) / YG)\n\tcolorwheel[col:YG + col, 1] = 255\n\tcol += YG\n\n\t# GC\n\tcolorwheel[col:GC + col, 1] = 255\n\tcolorwheel[col:GC + col, 2] = np.floor(255 * np.arange(0, GC, 1) / GC)\n\tcol += GC\n\n\t# CB\n\tcolorwheel[col:CB + col, 1] = 255 - np.floor(255*np.arange(0, CB, 1) / CB)\n\tcolorwheel[col:CB + col, 2] = 255\n\tcol += CB\n\n\t# BM\n\tcolorwheel[col:BM + col, 2] = 255\n\tcolorwheel[col:BM + col, 0] = np.floor(255 * np.arange(0, BM, 1) / BM)\n\tcol += BM\n\n\t# MR\n\tcolorwheel[col:MR + col, 2] = 255 - np.floor(255 * np.arange(0, MR, 1) / MR)\n\tcolorwheel[col:MR + col, 0] = 255\n\n\treturn colorwheel\n\n\n# ==================================================================================================\ndef computeColor(u: float, v: float) -> np.array:\n\t\"\"\"\n\tGet the colour in the wheel at the specified coordinates.\n\n\tArgs:\n\t\tu (float): X coordinate\n\t\tv (float): Y coordinate\n\n\tReturns:\n\t\tnp.array:\n\t\"\"\"\n\tcolorwheel = makeColorwheel()\n\tnan_u = np.isnan(u)\n\tnan_v = np.isnan(v)\n\tnan_u = np.where(nan_u)\n\tnan_v = np.where(nan_v)\n\n\tu[nan_u] = 0\n\tu[nan_v] = 0\n\tv[nan_u] = 0\n\tv[nan_v] = 0\n\n\tncols = colorwheel.shape[0]\n\tradius = np.sqrt(u**2 + v**2)\n\ta = np.arctan2(-v, -u) / np.pi\n\tfk = (a + 1) / 2 * (ncols - 1) # -1~1 maped to 1~ncols\n\tk0 = fk.astype(np.uint8)\t # 1, 2, ..., ncols\n\tk1 = k0 + 1\n\tk1[k1 == ncols] = 0\n\tf = fk - k0\n\n\timg = np.empty([k1.shape[0], k1.shape[1], 3])\n\tncolors = colorwheel.shape[1]\n\tfor i in range(ncolors):\n\t\ttmp = colorwheel[:, i]\n\t\tcol0 = tmp[k0] / 255\n\t\tcol1 = tmp[k1] / 255\n\t\tcol = ((1 - f) * col0) + (f * col1)\n\t\tidx = radius <= 1\n\t\tcol[idx] = 1 - radius[idx] * (1 - col[idx]) # Increase saturation with radius\n\t\tcol[~idx] *= 0.75 # out of range\n\t\timg[:, :, 2 - i] = np.floor(255 * col).astype(np.uint8)\n\n\treturn img.astype(np.uint8)\n\n\n# ==================================================================================================\ndef computeImg(flow) -> np.array:\n\t\"\"\"\n\tCompute the colour-coded flow image.\n\n\tArgs:\n\t\tflow (np.array): Flow field\n\n\tReturns:\n\t\tnp.array: Colour-coded image\n\t\"\"\"\n\teps = sys.float_info.epsilon\n\tUNKNOWN_FLOW_THRESH = 1e9\n\tUNKNOWN_FLOW = 1e10\n\n\tu = flow[:, :, 0]\n\tv = flow[:, :, 1]\n\n\tmaxu = -999\n\tmaxv = -999\n\n\tminu = 999\n\tminv = 999\n\n\tmaxrad = -1\n\t# Fix unknown flow\n\tgreater_u = np.where(u > UNKNOWN_FLOW_THRESH)\n\tgreater_v = np.where(v > UNKNOWN_FLOW_THRESH)\n\tu[greater_u] = 0\n\tu[greater_v] = 0\n\tv[greater_u] = 0\n\tv[greater_v] = 0\n\n\tmaxu = max([maxu, np.amax(u)])\n\tminu = min([minu, np.amin(u)])\n\n\tmaxv = max([maxv, np.amax(v)])\n\tminv = min([minv, np.amin(v)])\n\trad = np.sqrt(np.multiply(u, u) + np.multiply(v, v))\n\tmaxrad = max([maxrad, np.amax(rad)])\n\tprint(f\"max flow:\")\n\tprint(f\" {maxrad:.4f}\")\n\tprint(f\"flow range:\")\n\tprint(f\" u = {minu:.3f} .. {maxu:.3f}\")\n\tprint(f\" v = {minv:.3f} .. {maxv:.3f}\")\n\n\tu = u / (maxrad + eps)\n\tv = v / (maxrad + eps)\n\timg = computeColor(u, v)\n\n\treturn img\n\n\n# ==================================================================================================\nif (__name__ == \"__main__\"):\n\timport readFlowFile\n\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"flow_file\", type=str, default=\"colorTest.flo\", help=\"Flow file\")\n\tparser.add_argument(\"--write\", action=\"store_true\", help=\"Write flow as PNG\")\n\targs = parser.parse_args()\n\targs.flow_file = Path(args.flow_file)\n\tflow = readFlowFile.read(args.flow_file)\n\timg = computeImg(flow)\n\tcv2.imshow(str(args.flow_file), img)\n\tk = cv2.waitKey()\n\tif parser.parse_args().write:\n\t\tcv2.imwrite(str(args.flow_file.with_suffix(\".png\")), img)\n","sub_path":"computeColor.py","file_name":"computeColor.py","file_ext":"py","file_size_in_byte":4652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"49963088","text":"# encoding=utf-8\nimport math\nimport web\nimport xutils\nimport xtemplate\nimport xtables\nimport xauth\nimport xconfig\nimport xmanager\n\nfrom . import dao\nfrom xutils import Storage\n\nPAGE_SIZE = xconfig.PAGE_SIZE\n\nclass PathNode:\n\n def __init__(self, name, url):\n self.name = name\n self.url = url\n\nclass handler:\n\n @xauth.login_required()\n def GET(self):\n return xtemplate.render(\"file/view.html\",\n file_type=\"group\",\n pseudo_groups=True,\n files=dao.get_category(xauth.get_current_name()))\n\n def POST(self):\n pass\n\nclass Ungrouped:\n\n @xauth.login_required()\n def GET(self):\n page = xutils.get_argument(\"page\", 1, type=int)\n db = xtables.get_file_table()\n\n sql = \"SELECT a.* FROM file a LEFT JOIN file b ON a.parent_id = b.id WHERE a.is_deleted = 0 AND a.type != 'group' AND (b.id is null OR b.type != 'group') ORDER BY smtime DESC LIMIT %s,%s\"\n files = db.query(sql % ((page-1)*10, 10))\n \n count_sql = \"SELECT COUNT(1) AS amount FROM file a LEFT JOIN file b ON a.parent_id = b.id WHERE a.is_deleted = 0 AND a.type != 'group' AND (b.id is null OR b.type != 'group')\"\n amount = db.count(sql = count_sql)\n\n return xtemplate.render(\"file/view.html\",\n pathlist=[PathNode(\"未分类\", \"/file/group/ungrouped\")],\n file_type=\"group\",\n files = files,\n page = page,\n page_max = math.ceil(amount / 10),\n page_url=\"/file/group/ungrouped?page=\")\n\nclass MoveHandler:\n \n @xauth.login_required()\n def GET(self):\n id = xutils.get_argument(\"id\", \"\", type=int)\n parent_id = xutils.get_argument(\"parent_id\", \"\", type=int)\n db = xtables.get_file_table()\n db.update(parent_id=parent_id, where=dict(id=id))\n return dict(code=\"success\")\n\n def POST(self):\n return self.GET()\n \nclass ListHandler:\n\n def GET(self):\n id = xutils.get_argument(\"id\", \"\", type=int)\n sql = \"SELECT id, name FROM file WHERE type = 'group' AND is_deleted = 0 ORDER BY name DESC LIMIT 200\"\n data = xtables.get_file_table().query(sql)\n web.header(\"Content-Type\", \"text/html; charset=utf-8\")\n return xtemplate.render(\"file/group_list.html\", id=id, filelist=data)\n\nclass RemovedHandler:\n\n @xauth.login_required()\n def GET(self):\n page = xutils.get_argument(\"page\", 1, type=int)\n db = xtables.get_file_table()\n files = db.select(where=\"is_deleted=1\", order=\"sctime DESC\", offset=(page-1)*10, limit=10)\n amount = db.count(where=\"is_deleted=1\")\n\n return xtemplate.render(\"file/view.html\",\n pathlist=[PathNode(\"回收站\", \"/file/group/removed\")],\n file_type=\"group\",\n files = files,\n page = page,\n page_max = math.ceil(amount / 10),\n page_url=\"/file/group/removed?page=\")\n\nclass RecentCreatedHandler:\n\n @xauth.login_required()\n def GET(self):\n page = xutils.get_argument(\"page\", 1, type=int)\n page = max(1, page)\n db = xtables.get_file_table()\n where = \"is_deleted=0 AND creator=%r\" % xauth.get_current_name()\n files = db.select(where=where, order=\"sctime DESC\", offset=page*PAGE_SIZE,limit=PAGE_SIZE)\n count = db.count(where=where)\n return xtemplate.render(\"file/view.html\", \n pathlist = [Storage(name=\"最近创建\", url=\"/file/group/recent_created\")],\n file_type = \"group\",\n files = files,\n page = page, \n page_max = math.ceil(count/PAGE_SIZE), \n page_url=\"/file/recent_edit?page=\")\n\nclass BookmarkHandler:\n \n @xauth.login_required()\n def GET(self):\n page = xutils.get_argument(\"page\", 1, type=int)\n page = max(1, page)\n db = xtables.get_file_table()\n where = \"is_deleted=0 AND is_marked=1 AND creator=%r\" % xauth.get_current_name()\n files = db.select(where=where, order=\"smtime DESC\", offset=(page-1)*PAGE_SIZE,limit=PAGE_SIZE)\n count = db.count(where=where)\n return xtemplate.render(\"file/view.html\", \n pathlist = [Storage(name=\"收藏\", url=\"/file/group/bookmark\")],\n file_type = \"group\",\n files = files,\n page = page, \n page_max = math.ceil(count/PAGE_SIZE), \n page_url=\"/file/group/bookmark?page=\")\n\n\nclass MemoHandler:\n\n @xauth.login_required(\"admin\")\n def GET(self):\n db = xtables.get_schedule_table()\n # files = db.select()\n files = xmanager.get_task_list()\n def set_display_name(file):\n file.display_name = file.name if file.name != \"\" else file.url\n return file\n files = list(map(set_display_name, files))\n return xtemplate.render(\"file/view.html\", \n pathlist = [PathNode(\"备忘录\", \"/file/group/memo\")],\n file_type = \"memo\",\n files = files)\n\nxurls = (\n r\"/file/group\", handler,\n r\"/file/group/ungrouped\", Ungrouped,\n r\"/file/group/removed\", RemovedHandler,\n r\"/file/group/list\", ListHandler,\n r\"/file/group/move\", MoveHandler,\n r\"/file/group/bookmark\", BookmarkHandler,\n r\"/file/group/recent_created\", RecentCreatedHandler,\n r\"/file/group/memo\", MemoHandler,\n)\n\n","sub_path":"handlers/file/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":5293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"190847431","text":"#!/usr/bin/env python3 \n#-*- coding: utf-8 -*-\n###\n# Name: Jack Savage \n# Student ID: 2295072 \n# Email: jsavage@chapman.edu \n# Course: PHYS220/MATH220/CPSC220 Fall 2018 Assignment: CW03\n###\ndef main(local_argv):\n from sequences import fibonacci as fib\n \n \n # storing first value in list of command line arguments for use in sequence\n # catching index error in case of no command line option\n try:\n n = int(local_argv[1])\n except IndexError:\n n = 1\n\n print(fib(n).pop())\n\n# Below is the python convention for defining an executable main section\nif __name__ == \"__main__\":\n from sequences import fibonacci as fib\n import sys\n main(sys.argv)\n","sub_path":"hw03-eneadodi/fib.py","file_name":"fib.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"47562526","text":"import binascii\nimport json\nimport re\n\nfrom settings import opcodes, opcodes_dict\n\ndef serialize(value: int) -> list:\n \"\"\"Reproduces `serialize` in Bitcoin Core src/script/script.h.\"\"\"\n if value == 0:\n return []\n\n result = []\n neg = value < 0\n absvalue = -value if neg else value\n while absvalue:\n result.append(absvalue & 0xff)\n absvalue >>= 8\n if (result[-1] & 0x80):\n result.append(0x80 if neg else 0)\n elif neg:\n result[-1] |= 0x80\n return result\n\ndef set_vch(vch: list) -> int:\n \"\"\"Reproduces `set_vch` in Bitcoin Core src/script/script.h.\"\"\"\n if len(vch) == 0:\n return 0\n\n result = 0\n for i in range(len(vch)):\n result |= vch[i] << 8 * i\n\n if vch[-1] & 0x80:\n return -(result & ~(0x80 << (8 * (len(vch) - 1))))\n\n return result\n\ndef opcode_to_word(opcode: int):\n assert(0 <= opcode <= 185)\n if 1 <= opcode <= 75: return str(opcode)\n\n word = ''\n for key in opcodes_dict.keys():\n if opcodes_dict[key] == opcode:\n word = key\n break\n assert(word != '')\n return word\n\ndef is_push(opcode: int):\n return 1 <= opcode <= 78\n\ndef decode_hexscript(hexscript: str) -> str:\n bytes = bytearray.fromhex(hexscript)\n script = ''\n\n i = 0\n while i < len(bytes):\n opcode = bytes[i]\n i += 1\n if 0 <= opcode <= 185:\n word = opcode_to_word(opcode)\n script += word\n script += ' '\n\n if is_push(opcode):\n if 1 <= opcode <= 75:\n j = 0\n num_bytes = opcode\n else:\n j = [1, 2, 4][opcode - 76]\n num_bytes_hexstring = ''\n for m in range(i, i + j):\n num_bytes_hexstring += '{:02x}'.format(bytes[m])\n num_bytes = int(num_bytes_hexstring, 16)\n assert(num_bytes >= 0)\n\n i += j\n data = binascii.hexlify(bytes[i:(i + num_bytes)])\n script += data.decode('utf-8')\n script += ' '\n i += num_bytes\n elif opcode == 192:\n word = 'OP_MARKET'\n script += word\n script += ' '\n else:\n raise SyntaxError('invalid opcode %d' % opcode)\n\n return script[:-1]\n\ndef is_valid_opcode(token: str):\n if token in opcodes: return True\n try:\n opcode = int(token)\n return 1 <= opcode <= 75\n except ValueError:\n return False\n\ndef is_valid_data(token: str):\n return re.match(\"^[a-f0-9]*$\", token)\n\ndef tokenize(script: str) -> list:\n tokens = []\n for token in script.split():\n if is_valid_opcode(token):\n tokens.append(token)\n elif is_valid_data(token):\n tokens.append(int(token, 16))\n else:\n raise SyntaxError\n return tokens\n\ndef parse_tokens(tokens: list) -> list:\n if len(tokens) == 0:\n return []\n\n token = tokens.pop(0)\n if token in map(str, range(1, 76)):\n exp = [token]\n token = tokens.pop(0)\n exp.append(token)\n L = []\n L.append(exp)\n rest = parse_tokens(tokens)\n L.extend(rest)\n return L\n elif token == 'OP_PUSHDATA1':\n exp = ['OP_PUSHDATA1']\n token = tokens.pop(0)\n exp.append(token)\n L = []\n L.append(exp)\n rest = parse_tokens(tokens)\n L.extend(rest)\n return L\n elif token == 'OP_PUSHDATA2':\n exp = ['OP_PUSHDATA2']\n token = tokens.pop(0)\n exp.append(token)\n L = []\n L.append(exp)\n rest = parse_tokens(tokens)\n L.extend(rest)\n return L\n elif token == 'OP_PUSHDATA4':\n exp = ['OP_PUSHDATA4']\n token = tokens.pop(0)\n exp.append(token)\n L = []\n L.append(exp)\n rest = parse_tokens(tokens)\n L.extend(rest)\n return L\n elif token == 'OP_IF':\n L =[]\n conseq_tokens = []\n while not tokens[0] in ['OP_ELSE', 'OP_ENDIF']:\n token = tokens.pop(0)\n conseq_tokens.append(token)\n conseq = parse_tokens(conseq_tokens)\n exp = ['OP_IF']\n exp.append(conseq)\n token = tokens.pop(0) # pop off 'OP_ELSE' or 'OP_ENDIF'\n if token == 'OP_ELSE':\n alt_tokens = []\n while tokens[0] != 'OP_ENDIF':\n token = tokens.pop(0)\n alt_tokens.append(token)\n token = tokens.pop(0) # pop off 'OP_ENDIF'\n alt = parse_tokens(alt_tokens)\n exp.append(alt)\n L.append(exp)\n rest = parse_tokens(tokens)\n L.extend(rest)\n return L\n elif token == 'OP_NOTIF':\n L =[]\n conseq_tokens = []\n while not tokens[0] in ['OP_ELSE', 'OP_ENDIF']:\n token = tokens.pop(0)\n conseq_tokens.append(token)\n conseq = parse_tokens(conseq_tokens)\n exp = ['OP_NOTIF']\n exp.append(conseq)\n token = tokens.pop(0) # pop off 'OP_ELSE' or 'OP_ENDIF'\n if token == 'OP_ELSE':\n alt_tokens = []\n while tokens[0] != 'OP_ENDIF':\n token = tokens.pop(0)\n alt_tokens.append(token)\n token = tokens.pop(0) # pop off 'OP_ENDIF'\n alt = parse_tokens(alt_tokens)\n exp.append(alt)\n L.append(exp)\n rest = parse_tokens(tokens)\n L.extend(rest)\n return L\n elif token == 'OP_ENDIF':\n raise SyntaxError('unexpected OP_ENDIF')\n else:\n L = [token]\n rest = parse_tokens(tokens)\n L.extend(rest)\n return L\n\ndef parse(script: str) -> list:\n return parse_tokens(tokenize(script))","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":5745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"204947539","text":"#!/usr/bin/env python\nimport StringIO\nimport argparse\nimport ConfigParser as configparser\nimport base64\nimport re\nimport signal\nimport subprocess\nimport os\nimport sys\n\nimport errno\nimport threading\n\nimport atexit\nimport time\n\nimport thread\nimport traceback\n\nimport binascii\nfrom configuration import client_transformation\nfrom configuration import post_write_hook\n\nsys.path = sys.path[1:]\nimport consul as consul_client\n\n\nclass Resource():\n def __init__(self, key, content):\n self.key = key\n self.content = content\n\n\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\nclass Starter():\n def __init__(self):\n self.loop = True\n self.restart = False\n self.process = None\n\n def run_command(self, command):\n while self.loop:\n args = []\n if len(command) > 1:\n args.extend(command[1:])\n self.process = subprocess.Popen(\" \".join(command), shell=True, env=os.environ, stdout=sys.stdout)\n self.restart = False\n self.process.wait()\n if self.process.returncode:\n if self.restart:\n print(\"Restarting application\")\n time.sleep(2)\n else:\n print(\"Error code \" + str(self.process.returncode) + \" restarting after 10 s\")\n time.sleep(10)\n else:\n self.loop = False\n print(\"Interrupt main thread\")\n os.kill(os.getpid(), signal.SIGINT)\n\n\n\n def stop_process(self):\n if self.process and self.process.returncode == None:\n print(\"Killing process\")\n self.process.kill()\n\n def sigterm(self):\n self.loop = False\n self.stop_process()\n\n def main(self, args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"command\", help=\"Command to start\", nargs=\"*\")\n parser.add_argument(\"--prefix\", help=\"Consul tree prefix\", required=True)\n parser.add_argument(\"--path\", help=\"Consul tree path\", required=True)\n parser.add_argument(\"--destination\", help=\"Destination directory\", required=True)\n args = parser.parse_args(args=args)\n\n if args.command:\n vt = threading.Thread(target=self.run_command, args=(args.command,))\n vt.daemon = True\n vt.start()\n\n signal.signal(signal.SIGTERM, self.sigterm)\n atexit.register(self.stop_process)\n\n self.poll_consul(args.prefix, args.path, args.destination, args.command)\n\n def poll_consul(self, prefix, path, destination, loop=True):\n\n self.consul = consul_client.Consul()\n index = None\n resources = {}\n first = True\n\n try:\n configuration = None\n configuration_entry = self.consul.kv.get(prefix + \"/config.ini\")\n if configuration_entry:\n configuration = configparser.ConfigParser()\n configuration.readfp(StringIO.StringIO(binascii.a2b_base64(configuration_entry[1]['Value'])), \"config.ini\")\n while first or loop:\n consul_subtree_path = (prefix + \"/\" + path).strip(\"/\")\n index, data = self.consul.kv.get(consul_subtree_path, recurse=True, index=index)\n changed = []\n for d in data:\n key = d['Key']\n value = bytes(base64.b64decode(bytes(d['Value'])))\n if key not in resources.keys():\n resources[key] = Resource(key, \"\")\n resource = resources[key]\n if resource.content != value:\n if value:\n changed.append(key)\n resource.content = value\n for key in changed:\n value = resources[key].content\n transformed_value = resources[key].content\n try:\n if configuration:\n transformed_value = self.transform_value(configuration, self.consul, key, value)\n relative_key = key.replace(consul_subtree_path, \"\").strip('/')\n self.write_to_file(destination, relative_key, transformed_value)\n except:\n print(\"Unexpected error during a write to {}: {}\".format(key, sys.exc_info()[0]))\n traceback.print_exc()\n\n if \"post_write_hook\" in configuration.sections():\n for hook in configuration.options('post_write_hook'):\n pattern = configuration.get('post_write_hook', hook)\n for key in changed:\n if re.match(pattern, key):\n relative_key = key.replace(consul_subtree_path, \"\").strip('/')\n value = getattr(post_write_hook, hook)(key, resources[key].content, self.dest_file_path(destination, relative_key))\n\n\n\n\n if not first and self.process and self.process.returncode is None and changed:\n self.restart = True\n os.kill(self.process.pid, signal.SIGTERM)\n\n first = False\n except KeyboardInterrupt:\n pass\n\n def dest_file_path(self, destination, key):\n return os.path.join(destination, key)\n\n def write_to_file(self, destination, key, value):\n dest_file = self.dest_file_path(destination, key)\n parent_dir = os.path.dirname(dest_file)\n mkdir_p(parent_dir)\n print(\"Saving file from consul to {}\".format(dest_file))\n with open(dest_file, \"wb\") as file:\n file.write(value)\n\n def transform_value(self, config, consul, key, value):\n if not config:\n return value\n if \"transformation\" in config.sections():\n for transformation in config.options('transformation'):\n pattern = config.get('transformation', transformation)\n if re.match(pattern, key):\n value = getattr(client_transformation, transformation)(key, value, consul)\n return value\n\n\ndef main(args=sys.argv[1:]):\n Starter().main(args)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"configuration/consul_starter.py","file_name":"consul_starter.py","file_ext":"py","file_size_in_byte":6394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"500208606","text":"from Kfold import k_fold\nimport numpy as np\ndef cv_estimate(fit_fn, pred_fn, loss_fn, X, y, n_folds, **varargin):\n \"\"\"\n Input:\n model = fit_fn(X_train, y_train)\n y_hat = pred_fn(X_test)\n L = loss_fn(y_hat, y_test)\n X, 设计矩阵, shape=(n_sampels,dim)\n y, 类标签, shape=(n_samples,)\n n_folds, 交叉验证需要的fold\n\n 可选参数:\n varargin= {'randomizeorder':randomize_order,'testfolds':test_folds}\n 分别表示:是否对原数据进行打乱:{0,1}\n 额外指定的test_folds: [np.array_1,np.array_2,...,np.array_folds]\n \"\"\"\n randomize_order = varargin.get('randomizeorder', False) # 0 代表不对数据进行打乱\n test_folds = varargin.get('testfolds', []) # 获取额外指定的测试fold \n n_samples = X.shape[0]\n if not test_folds: # 如果未指定测试用的fold\n train_folds, test_folds = k_fold(n_samples, n_folds, randomize_order)\n else:\n all_index = set(range(n_samples))\n train_folds = [np.array(list(all_index-set(single_array))) for single_array in test_folds]\n loss = np.zeros((n_samples, ))\n for f in range(len(train_folds)):\n X_train = X[train_folds[f]]; X_test = X[test_folds[f]]\n y_train = y[train_folds[f]]; y_test = y[test_folds[f]]\n model = fit_fn(X_train, y_train)\n y_hat_index, _ = pred_fn(model, X_test)\n y_hat = model.support[y_hat_index]\n loss[test_folds[f]] = loss_fn(y_hat, y_test)\n\n mu = np.mean(loss)\n se = np.std(loss)/np.power(n_samples,0.5)\n\n return mu, se\n","sub_path":"MLAPP_CODE/MLAPP-C7-Code/CvEstimate.py","file_name":"CvEstimate.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"250556905","text":"# -*- coding: UTF-8 -*-\nimport mcpi.minecraft as minecraft\nimport mcpi.block as block\n\nmc = minecraft.Minecraft.create()\n\nsize = 20\n'''\npos = mc.player.getTilePos()\nx = pos.x+2\ny = pos.y\nz = pos.z\n'''\nx = 0\ny = 0\nz = 0\n\nmidx = x+size/2\nmidy = y+size/2\n\n# 建造房子主体\nmc.setBlocks(x, y, z, x+size, y+size, z+size, block.STONE.id)\nmc.setBlocks(x+1, y, z+1, x+size-1, y+size-1, z+size-1, block.AIR.id)\n\n# 建造门\nmc.setBlocks(midx-1, y, z, midx+1, y+3, z, block.AIR.id)\n\n# 建造窗户\nmc.setBlocks(x+3, midy+3, z, midx-3, y+size-3, z, block.GLASS.id)\nmc.setBlocks(midx+3, midy+3, z, x+size-3, y+size-3, z, block.GLASS.id)\n\n# 建造屋顶\nmc.setBlocks(x, y+size+1, z, x+size, y+size+1, z+size, block.WOOD.id)\n\n# 建造羊毛地毯\nmc.setBlocks(x, y-1, z, x+size, y-1, z+size, block.WOOL.id, 11)\n","sub_path":"Unit 04 创意建造-房子/01-House.py","file_name":"01-House.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"394993891","text":"#!/usr/bin/env python3\n\n# from optparse import OptionParser\nimport sys\nfrom game import *\n\nplayers = PlayerList( \"../json/players.json\")\nquestion_list = QuestionList(\"../json/game1.json\")\ngame = Game(question_list, players, \"score\", \"status\", \"answered_id\")\n\noption = sys.argv[1]\nargs = sys.argv[2:]\nprint(option, args, len(args))\n\nif option == 'a':\n # the answer argument\n if len(args) < 3:\n raise ValueError(\"Usage: ./Jeopardy.py a Player_id Question_id Choice\")\n question_id = (ord(args[1][0]) - ord('a')) + int(args[1][1]) * 5\n player_id, choice_id = int(args[0]), ord(args[2]) - ord('a')\n print(player_id, question_id, choice_id)\n game.answer(player_id, question_id, choice_id)\nelif option == 'r':\n game.reset()\n print(\"Game reset done.\")\nelif option == 's':\n # question_id = int(args[0])\n question_id = (ord(args[0][0]) - ord('a')) + int(args[0][1]) * 5\n print(question_id)\n game.select(question_id)\nelif option == 'h':\n game.home()\nelse:\n print(\"Usage: ./Jeopardy.py a Player_id Question_id Choice\")\n\n","sub_path":"src/jeopardy.py","file_name":"jeopardy.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"87420145","text":"#!/usr/bin/env python3\n\nimport multiprocessing as mp\nimport time\nimport sys\n\nMAX_REC = 2000000\n\nclass jobexecclass:\n\n def __init__(self, id, startpointlist, movelist):\n self.id = id\n self.startpointlist = startpointlist\n self.movelist = movelist\n self.rcounter = 0\n pass\n\n def makearr(self,x, y):\n arr = []\n for i in range(y):\n currlist = []\n for j in range(x):\n currlist.append(0)\n arr.append(currlist)\n return arr\n\n def executejob(self, q):\n visited = self.makearr(8,8)\n starttime = time.time()\n try:\n self.ktour(self.startpointlist[1], self.startpointlist[0], 0, visited)\n except KeyboardInterrupt:\n print(\"---KeyboardInterrupt---\")\n pass\n totaltime = time.time() - starttime\n q.put([self.rcounter,totaltime])\n sys.exit()\n\n\n def ktour(self,startx, starty, iteration, visited):\n self.rcounter += 1\n\n visited[starty][startx] = iteration\n\n if iteration == 64:\n return True\n\n if self.rcounter > MAX_REC:\n return False\n\n for move in self.movelist:\n testx = startx + move[1]\n testy = starty + move[0]\n\n if testx >= 0 and testx <= 7 and testy >= 0 and testy <= 7 and visited[testy][testx] == 0:\n\n if (self.ktour(testx,testy,iteration+1,visited)):\n return True\n\n visited[starty][startx] = 0\n return False\n\n\n\n\n\ndef shuffle():\n for i in shufflelist:\n inputlist.append(originallist[i])\n\nif __name__ == \"__main__\":\n\n # P Vars\n originallist = [[-2,1],[-1,2],[1,2],[2,1],[1,-2],[2,-1],[-1,-2],[-2,-1]]\n inputlist = []\n\n ## VARS\n startpoint = [0,1]\n shufflelist = [7,0,1,2,3,4,5,6]\n\n shuffle()\n #executejob(startpoint,inputlist)\n\n proclist = []\n returnlist = []\n\n processcount = mp.cpu_count()-1 if mp.cpu_count() != 1 else 1\n q = mp.Queue()\n\n if mp.get_start_method() != \"spawn\":\n mp.set_start_method('spawn')\n print(mp.get_start_method())\n print(processcount)\n for i in range(processcount):\n k = jobexecclass(i+1, startpoint,inputlist)\n kprocess = mp.Process(target=k.executejob, args=(q,))\n proclist.append(kprocess)\n print(\"Spawning process: \" + str(i+1))\n\n for p in proclist:\n p.start()\n print(\"Starting process: \" + str(proclist.index(p)+1))\n\n getcounter = 0\n for p in proclist:\n ret = q.get() # will block\n getcounter += 1\n print(f\"Getcounter {getcounter}\")\n returnlist.append(ret)\n\n for p in proclist:\n p.join()\n\n\n\n while True:\n alldone = True\n for p in proclist:\n alldone = False if p.is_alive() else True\n if alldone:\n q.close()\n q.join_thread()\n avgrperslist = []\n maxs = -1\n totalr = 0\n for i in range(len(returnlist)):\n maxs = returnlist[i][1] if returnlist[i][1] > maxs else maxs\n totalr += returnlist[i][0]\n avgrperslist.append(returnlist[i][0]/returnlist[i][1])\n \n totrpers = totalr / maxs\n\n avgrpers = totalr/len(returnlist)\n avgrpers = avgrpers/maxs\n\n printformatting = \"formatted\" # formatted or raw\n if printformatting == \"formatted\":\n print(\"_____________________________\")\n print(f\"Rcounter: {totalr:,}\")\n print(f\"Total Time: {maxs:.2f}\")\n print(f\"Total R Per S Per Proc: {avgrpers:,.2f}\")\n print(f\"Total R Per S: {totrpers:,.2f}\")\n elif printformatting == \"raw\":\n print(\"_____________________________\")\n print(f\"{totalr}\")\n print(f\"{maxs}\")\n print(f\"{avgrpers}\")\n print(f\"{totrpers}\")\n break","sub_path":"scriptsandstuff/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":3976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"498740431","text":"from sqlalchemy import create_engine, MetaData\nfrom sqlalchemy.ext.declarative import declarative_base\nimport logging\nfrom pathlib import Path\n\n\nclass SqlConnector:\n\n def __init__(self, db_name):\n self._logger = logging.getLogger(__name__)\n self._db_name = db_name\n\n self._engine = self._connect_engine()\n\n @property\n def engine(self):\n return self._engine\n\n def _connect_engine(self):\n # get flask root level folder\n engine = None\n root_path = Path(__file__).parent.parent\n db_path = root_path / f'data/{self._db_name}'\n\n self._logger.info('Connecting to database')\n try:\n engine = create_engine(f'sqlite:///{db_path}')\n except Exception as ex:\n self._logger.error(ex)\n\n return engine\n\n\nclass Table:\n\n def __init__(self, table_name, engine, table_schema = None):\n self._logger = logging.getLogger(__name__)\n self._engine = engine\n\n self._metadata = MetaData(bind=self._engine)\n self._Base = declarative_base()\n\n self._table_name = table_name\n self._table_schema = table_schema\n\n self._table = self._reflect_table_metadata()\n\n @property\n def table(self):\n return self._table\n\n def create_table(self):\n \"\"\" Creates table if table does not exist in database \"\"\"\n\n self._logger.debug('Create State %s', self._table)\n\n if self._table is None:\n table = type('Table', (self._Base, ), self._table_schema)\n table.__table__.create(bind=self._engine)\n self._logger.info('%s table created', self._table_name)\n\n # update table object after creation\n self._table = self._reflect_table_metadata()\n\n def drop_table(self):\n \"\"\" drops table if exists in database \"\"\"\n\n self._logger.debug('Drop State %s', self._table)\n\n if self._table is not None:\n self._table.drop(self._engine)\n self._logger.info('%s table dropped', self._table_name)\n self._table = self._reflect_table_metadata()\n\n def insert_data(self, data_dict: list):\n \"\"\" Inserts data into table. Data is passed as a list of dictionaries \"\"\"\n if self._table is None:\n self._logger.error('%s table does not exist. Create table first')\n else:\n conn = self._engine.connect()\n conn.execute(self._table.insert(), data_dict)\n\n def _reflect_table_metadata(self):\n \"\"\" Reflects database object if exists. Returns None if object does no exist \"\"\"\n self._metadata.clear()\n self._metadata.reflect()\n try:\n table = self._metadata.tables[self._table_name]\n except KeyError:\n table = None\n except Exception as ex:\n self._logger.error(ex)\n raise(ex)\n\n self._logger.debug(table)\n return table\n","sub_path":"util/sqlconnector.py","file_name":"sqlconnector.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"311870546","text":"import requests\nimport urllib.request\nimport pandas as pd\nimport subprocess\nfrom subprocess import call\nfrom bs4 import BeautifulSoup, SoupStrainer\nfrom boilerpipe.extract import Extractor\nfrom collections import Counter\n## Headers for accessing the urls.\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:63.0) Gecko/20100101 Firefox/63.0'}\n\n## Taking input th console.\nurl = input(\"Please enter the url (e.g: https://www.beko.com.tr): \")\npage = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'}) \ndata = page.text\nsoup = BeautifulSoup(data, \"html.parser\")\n\n## Creating empty array.\nrecords = []\nfor link in soup.find_all('a'):\n ## Following the urls.\n links = link.get('href')\n if links is not None:\n ## Checking the links.\n all_links = str(url)+links\n records.append(all_links)\n \"\"\"f = open(\"content.txt\", \"w+\")\n f.write(\"%s\\n\" % records)\"\"\"\n \n## Converting dataframe.\ndf = pd.DataFrame(records)\ndf.to_csv('contents.txt', index=False, encoding='utf-8', columns=None, header=False)\n## Running shell script from the our directory.\nsubprocess.call(['bash', './cleaner.sh'])\n\n## Creating empty array.\nall_records = []\n## Definin filepath.\nfilepath = 'contents.txt' \nwith open(filepath) as fp: \n lines = fp.readlines()\n # Ask boilerpipe to fetch the data\n extractors = [Extractor(extractor='ArticleExtractor', url=url) for url in lines]\n # Ask boilerpipe to extract the text\n raw_texts = [extractor.getText() for extractor in extractors]\n # count the occurrences of words in each text\n # word_counts = [Counter(text.split(\" \")) for text in raw_texts]\n #print(raw_texts)\n all_records.append(raw_texts)\n #print(alll_records)\n ## Converting dataframe.\n df = pd.DataFrame(all_records).drop_duplicates()\n df.to_csv('output_contents.txt', sep=\"\\n\", index=False, columns=None, header=False, encoding='utf-8')","sub_path":"get_data_boiler.py","file_name":"get_data_boiler.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"256350401","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''py -3 main.py C:\\\\Users\\\\Admin\\\\Downloads\\\\intro-to-python3-analysis-master'''\n\nimport os\nimport sys\n\ndef main():\n userpath = ''\n if len(sys.argv)==2:\n userpath = sys.argv[1]\n else:\n print('input command parameter!, f.e. py -3 main.py C:\\\\Users\\\\Admin\\\\Downloads')\n\n if os.path.isdir(userpath): \n for root, dirs, files in os.walk(userpath):\n path = root.split(os.sep)\n\n count_dirs = count_files = 0\n for f in dirs:\n count_dirs += 1\n\n for file in files:\n count_files += 1\n print((len(path) - 2) * '--'+'>', os.path.basename(root), ':\\t', count_dirs, 'folders, ', count_files, 'files')\n else:\n print('{} is not a folder or it doesnt exist. input correct folder!'.format(userpath))\n\nmain()\n","sub_path":"CountFilesAndFolders/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"654289269","text":"#include=utf-8\nimport unittest\nfrom selenium import webdriver\nimport time\nfrom selenium.webdriver import ActionChains\nimport WebAppTest\n\n\nclass WebappTestCase(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n localtime = time.ctime()\n print('forgetpwd_all用例开始,当前时间:%s'%localtime)\n\n cls.webtest = WebAppTest.WebAppTest()\n webtest = cls.webtest\n cls.driver = webtest.chrome()\n driver = cls.driver\n driver.get(\"http://test.robotscloud.com:81/robots/aiadmin_login\")\n driver.maximize_window()\n driver.implicitly_wait(10)\n driver.find_element_by_css_selector('#loginForm > div.password-group > a').click()\n time.sleep(2)\n\n def test_forgetpwd1(self):\n #测试内容:账号输入66个字符,只能输入65个字符\n '''WebappTest forgetpwd1'''\n driver = self.driver\n webtest = self.webtest\n webtest.forgetpwd_account_send('123456789022345678903234567890423456789052345678906234567890723456')\n #输入66个字符\n lenth = len(driver.find_element_by_class_name('text-input').get_attribute('value'))\n self.assertEqual(lenth,65)\n #判断输入了66个字符之后是否只显示了65个\n\n def test_forgetpwd2(self):\n #测试内容:输入未注册的账号,点击框外区域,提示:\"该账号未注册\"\n '''WebappTest forgetpwd2'''\n webtest = self.webtest\n driver = self.driver\n webtest.forgetpwd_account_send('17671243338')\n #输入未注册的手机号码\n webtest.forgetpwd_blankclick()\n time.sleep(1)\n text = driver.find_element_by_class_name('error-info').text\n self.assertEqual(text,'该账号未注册')\n #判断页面是否弹出错误提示“该账号未注册”\n\n def test_forgetpwd3(self):\n #测试内容:输入正确的已注册手机号,点击下一步,跳转到手机号页面成功\n '''WebappTest forgetpwd3'''\n driver = self.driver\n webtest = self.webtest\n webtest.forgetpwd_account_send('17671243339')\n webtest.forgetpwd_next_click()\n #输入正确的已注册的手机号,点击下一步\n time.sleep(1)\n text = driver.find_element_by_css_selector('body > div.container > div > h4').text\n self.assertEqual(text,'忘记密码')\n #判断跳转的页面是否能获取到文本“忘记密码”\n\n def test_forgetpwd4(self):\n #测试内容:输入正确的已注册邮箱,点击下一步,页面跳转后显示已发送邮件到对应的账号成功,点击“确定”按钮返回登录页面\n '''WebappTest forgetpwd4'''\n driver = self.driver\n webtest = self.webtest\n webtest.forgetpwd_account_send('627508698@qq.com')\n webtest.forgetpwd_next_click()\n #输入正确的已注册的邮箱账号627508698@qq.com\n time.sleep(1)\n text1 = driver.find_element_by_css_selector('body > div.container > div > div').text\n self.assertIn('已发送',text1)\n #判断跳转的页面是否能获取到文本“已发送……”\n driver.find_element_by_css_selector('body > div.container > div > button').click()\n time.sleep(1)\n text2 = driver.find_element_by_css_selector('#loginForm > div.register > a').text\n self.assertEqual(text2,'注册账号')\n #判断跳转的页面是否能获取到按钮“注册账号”\n\n def test_forgetpwd5(self):\n #测试内容:手机号设置密码页面下,输入空的或小于6位的密码和6位验证码,点击确定,提示:\"密码为6~12位的数字或字母\"\n '''WebappTest forgetpwd5'''\n driver = self.driver\n webtest = self.webtest\n webtest.forgetpwd_account_send('17671243339')\n webtest.forgetpwd_next_click()\n #输入正确的已注册的手机号\n time.sleep(1)\n driver.find_element_by_css_selector('body > div.container > div > div:nth-child(4) > div > input').send_keys('12345')\n driver.find_element_by_css_selector('body > div.container > div > div:nth-child(5) > div > input').send_keys('123456')\n # 输入小于6位的密码和6位验证码\n driver.find_element_by_css_selector('body > div.container > div > button').click()\n time.sleep(0.5)\n text1 = driver.find_element_by_class_name('error-info').text\n self.assertEqual(text1,'密码为6~12位的数字或字母')\n driver.refresh()\n time.sleep(1)\n driver.find_element_by_class_name('text-input').send_keys('17671243339')\n driver.find_element_by_css_selector('body > div.container > div > button').click()\n time.sleep(1)\n driver.find_element_by_css_selector('body > div.container > div > div:nth-child(5) > div > input').send_keys('123456')\n driver.find_element_by_css_selector('body > div.container > div > button').click()\n #输入空的密码和6位验证码点击确定\n time.sleep(0.5)\n text2 = driver.find_element_by_class_name('error-info').text\n self.assertEqual(text2,'密码为6~12位的数字或字母')\n\n def tearDown(self):\n self.driver.get(\"http://test.robotscloud.com:81/robots/aiadmin_login\")\n time.sleep(2)\n self.driver.find_element_by_css_selector('#loginForm > div.password-group > a').click()\n time.sleep(2)\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.quit()\n\n localtime = time.ctime()\n print('forgetpwd_all用例结束,当前时间:%s'%localtime)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"unittest_forgetpwd_all.py","file_name":"unittest_forgetpwd_all.py","file_ext":"py","file_size_in_byte":5636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"316062645","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 2 16:09:58 2019\n\n@author: lxu\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom PerformanceFactorAnalysis import PFA\nfrom copy import deepcopy\nimport matplotlib.pyplot as plt\n\nfrom PreprocessAssistment import PreprocessAssistmentSkillBuilder, PreprocessAssistmentProblemSkill\n\n# not complete yet\n\nif __name__ == \"__main__\":\n # file_path = \"data/ml-100k/u.data\"\n file_path = \"/home/lxu/Documents/StudentLearningProcess/skill_builder_data_corrected_withskills.csv\"\n item = 'builder'\n model = PFA()\n model.set_params({\"num_feat\": 6, \"epsilon\": 5, \"_lambda\": 0, \"alpha\": 0, \"momentum\": 0.8, \"maxepoch\": 20, \"num_batches\": 300,\n \"batch_size\": 1000})\n data = PreprocessAssistmentSkillBuilder(file_path) \n \n print(data.shape[0], len(np.unique(data['user_id'])), len(np.unique(data['problem_id'])), len(np.unique(data['skill_id'])))\n \n trainnp, testnp = train_test_split(data, test_size=0.2)\n \n model.fit(train, test, order_train, order_test, len(np.unique(ratings[:, 0])), len(np.unique(ratings[:, 1])))\n\n # Check performance by plotting train and test errors\n plt.plot(range(model.maxepoch), model.logloss_train, marker='o', label='Training Data')\n plt.plot(range(model.maxepoch), model.logloss_test, marker='v', label='Test Data')\n plt.plot(range(model.maxepoch), model.auc_test, marker='*', label='AUC: Test Data')\n plt.title('The Truncated Assitment Dataset Learning Curve')\n plt.xlabel('Number of Epochs')\n plt.ylabel('RMSE')\n plt.legend()\n plt.grid()\n plt.show()\n print(\"precision_acc,recall_acc:\" + str(model.topK(test)))","sub_path":"RunPFA.py","file_name":"RunPFA.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"84569848","text":"import numpy as np\n\nimport matplotlib.pyplot as pl\n\nimport tacoma as tc\nfrom tacoma.model_conversions import estimate_flockwork_P_args\n\nsample_aggregates = False\nN_time_steps = 2\n\nprint(\"===== edge_lists => edge_lists =====\")\n\nL = tc.edge_lists()\n\nL.N = 3\nL.t = [0.0,1.0,2.0]\nL.tmax = 3.0\nL.edges = [ \n [\n (0,1)\n ],\n [\n (1,2), (0,2)\n ],\n [\n (1,2)\n ],\n ]\n\nparams = estimate_flockwork_P_args(L,dt=1.1,adjust_last_bin_if_dt_does_not_fit=True)\n\ng = np.array(params['rewiring_rate'])\nt, g = g[:,0], g[:,1]\n\nt = np.append(t, params['tmax'])\ng = np.append(g, g[-1])\nprint(t, g)\n\nP = np.array(params['P'])\nP = np.append(P, P[-1])\nprint(t, P)\n\nfig, ax = pl.subplots(1,2)\n\nax[0].step(t,g,where='post')\nax[1].step(t,P,where='post')\n\n# second estimation\n\nparams = estimate_flockwork_P_args(L, dt = 1.0)\n\ng = np.array(params['rewiring_rate'])\nt, g = g[:,0], g[:,1]\n\nt = np.append(t, params['tmax'])\ng = np.append(g, g[-1])\nprint(t, g)\n\nP = np.array(params['P'])\nP = np.append(P, P[-1])\nprint(t, P)\nax[0].step(t,g,where='post')\nax[1].step(t,P,where='post')\n\npl.show()\n\n","sub_path":"sandbox/test_fwP_params.py","file_name":"test_fwP_params.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"279446859","text":"\nimport numpy\nimport subprocess\nimport os\nimport pandas as pd \n\nopt2_flag = [\"-fthread-jumps\", \n \"-fcrossjumping\", \n \"-foptimize-sibling-calls\", \n \"-fcse-follow-jumps\", \"-fcse-skip-blocks\", \n \"-fgcse\", \"-fgcse-lm\", \n \"-fexpensive-optimizations\", \n \"-fstrength-reduce\", \n \"-frerun-cse-after-loop\", \"-frerun-loop-opt\", \n \"-fcaller-saves\", \n \"-fpeephole2\", \n \"-fschedule-insns\", \"-fschedule-insns2\", \n \"-fsched-interblock\", \"-fsched-spec\", \n \"-fregmove\", \n \"-fstrict-aliasing\", \n \"-fdelete-null-pointer-checks\", \n \"-freorder-blocks\", \"-freorder-functions\", \n \"-funit-at-a-time\", \n \"-falign-functions\", \"-falign-jumps\", \n \"-falign-loops\", \"-falign-labels\", \n \"-ftree-vrp\", \n \"-ftree-pre\"]\n \nopt3_flag = [\"-fgcse-after-reload\", \n \"-fipa-cp-clone\",\n \"-floop-interchange\",\n \"-floop-unroll-and-jam\", \n \"-fpeel-loops\", \n \"-fpredictive-commoning\", \n \"-fsplit-loops\", \n \"-fsplit-paths\", \n \"-ftree-loop-distribution\", \n \"-ftree-loop-vectorize\", \n \"-ftree-partial-pre\", \n \"-ftree-slp-vectorize\", \n \"-funswitch-loops\", \n \"-fvect-cost-model\"]\nopt2_len = len(opt2_flag)\nopt3_len = len(opt3_flag)\nspace_num = 20\nexe_file_name = \"test_ex\"\nbenchmark_file_name = \"simple.c\"\nexecute_times = 5\ndict = {'generation': [], 'space': [],'flags':[], 'time': []} \ndf = pd.DataFrame(dict) \n\ndef gen_spaces(num):\n spaces = []\n for j in range(0,space_num):\n space = numpy.random.randint(2, size=(num+1))\n spaces.append(space.tolist())\n return spaces\n\ndef get_time(str):\n str=str.strip()\n len_str = len(str)\n str = str[0:len_str-1]\n start = str.find(\":\")\n time = float(str[start+1:])\n return time\n\ndef gen_opt(spaces,opt_flags):\n time_result = 10000\n opt_space_id = -1\n time_repo = []\n flags_repo = []\n flags = [\"gcc\"]\n for space_id in range(0,len(spaces)):\n if spaces[space_id][0] == 1 :\n flags = [\"gcc\", \"-O1\"]\n else :\n flags = [\"gcc\"]\n for i in range(1, len(spaces[space_id])):\n if spaces[space_id][i] == 1:\n flags.append(opt_flags[i-1])\n flags.append(\"-o\")\n flags.append(exe_file_name)\n flags.append(benchmark_file_name)\n subprocess.run(flags)\n total_time = 0\n for j in range(0, execute_times):\n os.system(\"echo>output\")\n outfile = open(\"out\", \"w\")\n subprocess.call([\"/usr/bin/time\", \"-f'%E'\",\"./test_ex\"],stderr=outfile)\n f = open(\"out\", \"r\")\n f.readline()\n t_str = f.readline()\n total_time = total_time + get_time(t_str)\n time = total_time/execute_times\n time_repo.append(time)\n flags_repo.append(flags)\n if time.*?): (?P.*?)\\n\", text))\n\n k = v[\"Plugin\"]\n if \"PluginInstance\" in list(v.keys()):\n k += \"-\" + v[\"PluginInstance\"]\n k += \"/\" + v[\"Type\"]\n if \"TypeInstance\" in list(v.keys()):\n k += \"-\" + v[\"TypeInstance\"]\n\n if v[\"Severity\"] == \"OKAY\":\n data.pop(k, None)\n else:\n data[k] = v;\n\n with open(COLLECTD_FILE, 'wb') as f:\n f.write(pickle.dumps(data))\n\n lock.release()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/freenas/usr/local/libexec/collectd_alert.py","file_name":"collectd_alert.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"475251244","text":"import healpy as hp\nimport tables\n\n# read ebhis pytable\nebhis_store = tables.open_file('../survey2pytable/data/ebhis.h5')\nebhis = ebhis_store.root.survey\n\n# query hvc125 position and save data\nhi = ebhis.read_where(\"(GLON > 124.) & (GLON < 127.) & (GLAT > 40.5) & (GLAT < 42.5)\")\nnp.save('data/hi', hi)\n\n# read full meisner & finkbeiner map\nmf_full = hp.read_map('../mf_maps/opacity_10.83.hpx.fits')\nmf_full = hp.ud_grade(mf_full, 1024)\n\n# extract only valid hpxindices and save to disk\nmf_opacity = mf_full[hi['HPXINDEX']]\nnp.save('data/mf_opacity', mf_opacity)\n","sub_path":"generate_hpx_spectra.py","file_name":"generate_hpx_spectra.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"161463555","text":"import time\nimport math\n\ndef solve( target ):\n \n best = target+1\n area = 0\n\n a = 1\n while True:\n\n #b = a\n b = int( math.sqrt( 4*target//(a*a+a) ) )\n while True:\n cnt = a*b*(a+1)*(b+1)//4\n diff = abs( cnt - target )\n if diff < best:\n best = diff\n area = a*b\n #print(\"a = %d , b = %d , area = %d , bestdiff = %d\" % ( a , b , area , best ) )\n elif diff == best:\n area = max( area , a*b )\n\n if cnt > target + best:\n break\n\n b += 1\n \n a += 1\n if a*a*(a+1)*(a+1)//4 > target + best:\n break\n\n #print( \"best :\" , best ) \n #print( \"area :\" , area\n return area\n\n\ndef test():\n\n t1 = time.time()\n for t in range(10000):\n solve(2*10**6)\n t2 = time.time()\n print( \"time:\" , t2-t1 , \"s\" )\n\n\nif __name__ == \"__main__\":\n\n #test()\n T = int(input())\n for t in range(T):\n print( solve(int(input())) )\n \n\n \n","sub_path":"ProjectEulerPlus/085-Counting-Rectangles_20150824.py","file_name":"085-Counting-Rectangles_20150824.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"411232944","text":"\"\"\"\n8.\tПосчитать, сколько раз встречается определенная цифра в введенной\n последовательности чисел. Количество вводимых чисел и цифра,\n которую необходимо посчитать, задаются вводом с клавиатуры.\n\nПример:\nСколько будет чисел? - 2\nКакую цифру считать? - 3\nЧисло 1: 223\nЧисло 2: 21\nБыло введено 1 цифр '3'\n\nЗДЕСЬ ДОЛЖНА БЫТЬ РЕАЛИЗАЦИЯ ЧЕРЕЗ РЕКУРСИЮ\n\"\"\"\n\n\ndef num_enter(message):\n while True:\n num = input(message)\n if num.isdigit():\n return int(num)\n else:\n print(\"Invalid number. Repeat entry.\")\n\n\ndef check_num(num, digit, count=-1, digit_count=0):\n if count == 0:\n return digit_count\n if count == -1:\n count = len(str(num))\n num_dig = (num // pow(10, count-1)) % 10\n if num_dig == digit:\n digit_count += 1\n return check_num(num, digit, count - 1, digit_count)\n\n\ndef dig_count(num_count, digit, count=1, digit_count=0):\n if num_count + 1 == count:\n return digit_count\n num = num_enter(f\"Number {count}: \")\n digit_count += check_num(num, digit)\n return dig_count(num_count, digit, count + 1, digit_count)\n\n\nNUM_COUNT = num_enter(\"How many numbers will there be? - \")\nDIGIT = num_enter(\"Which digit to count? - \")\nDIGIT_COUNT = dig_count(NUM_COUNT, DIGIT)\n\nprint(f\"Number {DIGIT} occurs {DIGIT_COUNT} times.\")\n","sub_path":"Урок 2. Практическое задание/task_8/task_8_2.py","file_name":"task_8_2.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"558935954","text":"#!usr/bin/env python\n#-*- coding:utf-8 _*-\n\"\"\"\n@author:alvin\n@file: http_seesion.py\n@time: 2019/01/14\n\"\"\"\n'''会话对象:Session'''\nimport requests\ndef getHeader():\n\t\theaders={\n\t\t\t'UserAgent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',\n\t\t\t'Content-Type':'application/x-www-form-urlencoded'}\n\t\treturn headers\n\ndata={\n\t 'email':'13221454225',\n\t 'icode':'',\n\t 'origURL':'http://www.renren.com/home',\n\t 'domain':'renren.com',\n\t 'key_id':1,\n\t 'captcha_type':'web_login',\n\t 'password':'31a7f0da50cdb967b72610ca996e30fb2f64d2c93dc40283688e9491077e6ae6',\n\t 'rkey':'5aa2d6b85ce9f85402f2fa3af466d326',\n\t 'f':'https%3A%2F%2Fwww.sogou.com%2Flink%3Furl%3DDSOYnZeCC_r-h4h4RsVJtWYvcrgiTSe0'}\n\ndef login():\n\t#对Session类进行实例化\n\ts=requests.Session()\n\tr=s.post(\n\t\turl='http://www.renren.com/ajaxLogin/login?1=1&uniqueTimestamp=2019011947419',\n\t\tdata=data,\n\t\theaders=getHeader())\n\treturn s\n\ndef getProfile():\n\tr=login().get('http://www.renren.com/969444156/profile')\n\tprint(r.text)\n\ngetProfile()\n\n","sub_path":"wuyaAPI/day11/http_seesion.py","file_name":"http_seesion.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"126409308","text":"\"\"\"1323. Maximum 69 Number Description\nGiven a positive integer num consisting only of digits 6 and 9.\n\nReturn the maximum number you can get by changing at most one digit (6 becomes 9, and 9 becomes 6).\n\nExample 1:\nInput: num = 9669\nOutput: 9969\nExplanation:\nChanging the first digit results in 6669.\nChanging the second digit results in 9969.\nChanging the third digit results in 9699.\nChanging the fourth digit results in 9666.\nThe maximum number is 9969.\n\nExample 2:\nInput: num = 9996\nOutput: 9999\nExplanation: Changing the last digit 6 to 9 results in the maximum number.\n\nExample 3:\nInput: num = 9999\nOutput: 9999\nExplanation: It is better not to apply any change.\n\nConstraints:\n1 <= num <= 10^4\nnum's digits are 6 or 9.\"\"\"\n\n\nclass Solution:\n def maximum69Number(self, num: int) -> int:\n num_list = [x for x in str(num)]\n if \"6\" not in num_list:\n return num\n for i in range(len(num_list)):\n if num_list[i] == \"6\":\n num_list[i] = \"9\"\n return int(\"\".join(num_list))\n\n\n\"\"\"Submission\nRuntime: 56 ms, faster than 5.93% of Python3 online submissions for Maximum 69 Number.\nMemory Usage: 12.7 MB, less than 100.00% of Python3 online submissions for Maximum 69 Number.\"\"\"\n","sub_path":"leetcode/1323_maximum_69_number.py","file_name":"1323_maximum_69_number.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"398683017","text":"import sqlite3\nimport json\nfrom datetime import datetime\nimport pytz\n\n\ndef sql2json():\n tz = pytz.timezone(\"UTC\")\n offset = 978393600 # they use 02.01.2015 as starting point :)\n out = []\n conn = sqlite3.connect('SpendingTracker.sqlite')\n c = conn.cursor()\n for row in c.execute(\"\"\"\n select t.ZAMOUNT as amount,\n t.ZDATE as date,\n t.ZNOTE as note,\n t.ZTYPE as is_expense,\n c.ZNAME as category\n from ZTRANSACTION t join ZCATEGORY c on t.ZCATEGORY == c.Z_PK;\n \"\"\"):\n out.append({\n \"amount\": row[0],\n \"sub_date\": tz.localize(datetime.fromtimestamp(int(row[1]) + offset)).strftime('%Y-%m-%d %H:%M:%SZ'),\n \"note\": row[2],\n \"is_expense\": row[3],\n \"category\": row[4],\n })\n out.sort(key=lambda item:item['sub_date'], reverse=False)\n conn.close()\n return json.dumps(out)\n\n\ndef json2file(json_obj, json_file):\n with open(json_file, 'w+') as f:\n f.write(json_obj)\n print(\"Written to: %s\" % json_file)\n\n\nif __name__ == \"__main__\":\n json2file(sql2json(), \"spending.json\")\n","sub_path":"spending/sqlite2json.py","file_name":"sqlite2json.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"16376418","text":"# -*- coding: utf-8 -*-\n\"\"\"\nClass Tree for Prefix Trees homework\n\"\"\"\n\n# General Tree class\n# ------------------------------------------------------------------------------\n\nclass Tree:\n \"\"\"\n Simple class for General Tree\n \"\"\"\n\n def __init__(self, key=None, children=None):\n \"\"\"\n Init General Tree, children is [] if not given\n \"\"\"\n self.key = key\n if children is not None:\n self.children = children\n else:\n self.children = []\n\n @property\n def nbchildren(self):\n return len(self.children)\n\n \n","sub_path":"algo_python/prefix_trees/algopy/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"84524139","text":"#winner = \"Tie\"\r\n\r\ndef main ():\r\n #global winner\r\n play_game()\r\n #print (\"Game winner is: \", winner)\r\n\r\n \r\n\r\ndef play_game():\r\n import random\r\n computer_number = random.randint(1,3)\r\n #\r\n print(computer_number)\r\n #\r\n computer_word = turn_to_word(computer_number)\r\n player_word = get_player_word()\r\n print('Player picked: ', player_word)\r\n print(\"Computer picked: \", computer_word)\r\n game_winner = compare_words(computer_word, player_word)\r\n #return game_winner\r\n print (\"Game winner is: \", game_winner)\r\n \r\n\r\n\r\ndef turn_to_word(c_pick):\r\n if c_pick == 1:\r\n return 'rock'\r\n elif c_pick == 2:\r\n return 'paper'\r\n else:\r\n return 'scissors'\r\n\r\n\r\ndef get_player_word():\r\n word = input('Enter rock, paper, or scissors: ')\r\n return word\r\n\r\n\r\ndef compare_words(c_word, p_word):\r\n if c_word == p_word:\r\n #winner = \"Tie\"\r\n print('Game is a tie. Play again. ')\r\n play_game()\r\n elif c_word == 'rock':\r\n if p_word == 'paper':\r\n winner = \"Player\"\r\n else:\r\n winner = \"Computer\"\r\n elif c_word == 'paper':\r\n if p_word == 'rock':\r\n winner = 'Computer'\r\n else:\r\n winner = 'Player'\r\n elif c_word == 'scissors':\r\n if p_word == 'rock':\r\n winner = 'Player'\r\n else:\r\n winner = 'Computer'\r\n return winner\r\n \r\n# 1 is rock\r\n# 2 is paper\r\n# 3 is scissors\r\n\r\nmain()\r\n","sub_path":"RockPaperScissors.py","file_name":"RockPaperScissors.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"99869042","text":"import asyncio\nimport logging\nimport asyncpg\nimport traceback\nimport decimal\n\n_pool = None\n\n\nasync def connect():\n # Create a connection pool for each database defined in the configuration\n global _pool\n _pool = await asyncpg.create_pool(\n host=\"localhost\",\n user=\"postgres\",\n password=\"postgres\",\n database=\"postgres\",\n port=5432,\n min_size=0,\n max_size=3,\n max_queries=30,\n timeout=5,\n command_timeout=10,\n max_inactive_connection_lifetime=180\n )\n\n\nasync def close_connections():\n await _pool.close()\n\n\nasync def _fetch_data( query, *args):\n # Acquire a connection\n async with _pool.acquire(timeout=30) as connection:\n try:\n result = await connection.fetch(query, *args, timeout=3)\n print(\".\")\n return [dict(row) for row in result]\n except asyncio.TimeoutError:\n print(f\"Query timed out\\n{query}\")\n\n\nasync def fetch(query, *args):\n try:\n return await _fetch_data(query, *args)\n except:\n print(f\"{traceback.format_exc()}\\n{query} {args}\")\n return None\n\n\ndef print_counts():\n print(\"Queue size =\", _pool._queue.qsize())","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"175706065","text":"import os\nimport csv\nimport pandas as pd\nimport numpy as np\n\n# Read in test data\nsubsample_test = pd.read_csv('new_subsample_test.csv')\nsubsample_test['ground_list'] = subsample_test['ground_list'].apply(eval)\nsubsample_test['ground_list'] = subsample_test['ground_list'].apply(set)\n\n# Set parameters for run\ni_start = 0\ni_end_exclusive = 10000\nfpath = 'testdata_cosim.csv'\n\n# Get and save cosine similarity between playlist pairs\nif os.path.exists(fpath):\n write_mode = 'at' # Append if already exists\nelse:\n write_mode = 'wt' # Make a new file if not\nwith open(fpath, write_mode) as fp:\n writer = csv.writer(fp)\n if write_mode == 'wt':\n writer.writerow(['pid1', 'pid2', 'cosim']) # Write a header when making a new file\n for i in range(i_start, i_end_exclusive):\n pid_i = subsample_test['playlist_id'][i]\n set_i = subsample_test['ground_list'][i]\n print(f'Working on i={i}')\n for j in range(i+1, subsample_test.shape[0]):\n pid_j = subsample_test['playlist_id'][j]\n set_j = subsample_test['ground_list'][j]\n # Calculate cosine similarity\n cosim = len(set_i.intersection(set_j)) / (len(set_i)*len(set_j))**0.5 # This works because both vectors only contain ones and zeros\n if cosim != 0:\n writer.writerow([pid_i, pid_j, cosim])\n","sub_path":"Data/testdata_step1_get_and_save_cosim.py","file_name":"testdata_step1_get_and_save_cosim.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"611634345","text":"from flask import (\n Blueprint, flash, g, redirect, render_template, request, session, url_for\n)\nfrom portal.auth import (login_required, teacher_required)\nfrom . import db\n\nbp = Blueprint('sessions', __name__, url_prefix='/portal/sessions')\n\n@bp.route('//view-session/', methods=('GET', 'POST'))\n@login_required\ndef view_session(course_id, session_id):\n cur = db.get_db().cursor()\n cur.execute(\"\"\"SELECT * FROM courses\n WHERE id = %s;\"\"\",\n (course_id,))\n courses = cur.fetchall()\n cur.execute(\"\"\"SELECT * FROM session\n WHERE id = %s;\"\"\",\n (session_id,))\n sessions = cur.fetchall()\n cur.execute(\"\"\"SELECT * FROM assignments\n WHERE session_id = %s;\"\"\",\n (session_id,))\n assignments = cur.fetchall()\n\n #new code to make names appear rather than id\n #need to make a join table that points the roster ids to the names to display the names\n\n cur.execute(\"\"\"SELECT users.id, users.email, users.name, roster.users_id FROM roster\n JOIN users ON users.id= roster.users_id\n WHERE roster.session_id = %s;\"\"\",\n (session_id,))\n students = cur.fetchall()\n cur.close()\n if courses == [] or sessions == []:\n error = \"404 Not found\"\n return render_template('error.html', error=error)\n return render_template('portal/courses/sessions/view-session.html', courses=courses, sessions=sessions, assignments=assignments, students=students)\n\n@bp.route('//create-session', methods=('GET', 'POST'))\n@login_required\n@teacher_required\ndef create_session(course_id):\n\n if request.method == 'POST':\n name = request.form['name']\n times = request.form['times']\n error = None\n cur = db.get_db().cursor()\n cur.execute(\"\"\"\n SELECT * FROM session\n WHERE name = %s and courses_id = %s;\n \"\"\",\n (name, course_id))\n session = cur.fetchone()\n\n if session != None:\n error = \"That session already exists\"\n flash(error)\n\n if error is None:\n try:\n cur.execute(\"\"\"INSERT INTO session (courses_id, times, name)\n VALUES (%s, %s, %s);\n \"\"\",\n (course_id, times, name))\n db.get_db().commit()\n except:\n error = \"There was a problem creating that session\"\n flash(error)\n else:\n cur.execute(\"\"\"SELECT id FROM session\n WHERE name = %s and courses_id = %s;\n \"\"\",\n (name, course_id))\n sessions = cur.fetchone()\n session_id = sessions[0]\n\n return redirect(url_for('sessions.view_session', session_id=session_id, course_id=course_id))\n else:\n return redirect(url_for('sessions.create_session', course_id=course_id))\n return render_template('portal/courses/sessions/create-session.html')\n\n@bp.route('///add-student', methods=('GET', 'POST'))\n@login_required\n@teacher_required\ndef add_student(course_id, session_id):\n cur = db.get_db().cursor()\n cur.execute(\"\"\"SELECT users.*, roster.* FROM roster\n JOIN users ON users.id = roster.users_id\n WHERE session_id = %s\"\"\",\n (session_id,))\n added_students = cur.fetchall()\n cur.execute(\"\"\"SELECT * FROM users\n WHERE role = 'student'\"\"\")\n all_students = cur.fetchall()\n\n cur.execute(\"\"\"SELECT * FROM courses\n WHERE id = %s;\"\"\",\n (course_id,))\n courses = cur.fetchall()\n cur = db.get_db().cursor()\n cur.execute(\"\"\"SELECT * FROM session\n WHERE id = %s;\"\"\",\n (session_id,))\n sessions = cur.fetchall()\n\n if courses == [] or sessions == []:\n error = \"404 Not found\"\n return render_template('error.html', error=error)\n\n if request.method == 'POST':\n student = request.form['student']\n error = None\n for added_student in added_students:\n print(added_student['users_id'])\n print(student)\n if added_student['users_id'] == int(student):\n error = \"That student is already in the session\"\n flash(error)\n if error == None:\n\n try:\n cur.execute(\"\"\"INSERT INTO roster (users_id, session_id)\n VALUES (%s, %s);\n \"\"\",\n (student, session_id))\n db.get_db().commit()\n cur.close()\n except:\n error = \"There was a problem adding that student\"\n flash(error)\n else:\n return redirect(url_for('sessions.view_session', session_id=session_id, course_id=course_id))\n\n return render_template('portal/courses/sessions/add-students.html', added_students=added_students, all_students=all_students)\n","sub_path":"portal/sessions.py","file_name":"sessions.py","file_ext":"py","file_size_in_byte":5068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"243457110","text":"\n# Python 2.7 Standard Library\nfrom __future__ import absolute_import, print_function\nimport argparse\nimport collections\nimport inspect\nimport json\nimport os.path\nimport shutil\nimport sys\nimport tempfile\n\n# Third-Party Libraries\nimport plumbum\n\n# Pandoc\nfrom .about import *\nfrom . import utils\n\n\n# TODO / Roadmap\n# ------------------------------------------------------------------------------\n#\n# TODO: target 2.0 milestone, that supports up to pandoc 2.0\n#\n# - rethink the UX when configure is NOT called explictly, \n# I may import types, but there is nothing in it.\n# It's only when I do some read/write that the default configure\n# is called ... Shall I plug a configure() hook into the types\n# modules, to be called at import time?\n# (so that you can still import pandoc, configure if needed,\n# then import types). But then read and write would have to\n# import types lazily (OK, why not?).\n#\n# - study encoding issues and bytes vs unicode representation.\n#\n#\n# - switch readers/writers (lazily) depending of pandoc_api_version >= 1.17\n# or not\n#\n# - pandoc executable API (connect with version API)\n#\n# - reader and writer for more than JSON (Markdown, HTML, etc.)\n#\n# - test new JSON scheme completely (need a harness with arbitrary \n# pandoc executable version)\n#\n# - error management/messages in type checking. MAYBE ROLLBACK THIS\n# ATM (needs a great effort) and make a branch that will land in\n# 3.0 ? Or 2.1 whatever ...\n#\n# - documentation (mkdocs): START ! Will make the public API design\n# issues easier (maybe)\n#\n# - reconsider \"main\"?\n#\n\n\n# Configuration\n# ------------------------------------------------------------------------------\n_configuration = None # TODO: public API for this?\n\ndef configure(auto=None, path=None, version=None, pandoc_types_version=None):\n global _configuration\n\n # In the default configuration, we set auto to `True`.\n if auto is None and \\\n path is None and \\\n version is None and \\\n pandoc_types_version is None:\n auto = True\n\n if auto is True: \n try:\n pandoc = plumbum.local['pandoc'] # Encoding issue? pandoc works\n # with utf-8 in and out by construction, but maybe plumbum infers\n # something different with the locale?\n found_path = str(pandoc.executable)\n except plumbum.CommandNotFound as error:\n message = 'cannot find the pandoc program.\\n'\n paths = [str(p) for p in error.path]\n message += 'paths:' + str(paths)\n raise RuntimeError(message)\n if path is None:\n path = found_path\n elif path != found_path:\n error = 'found path {0!r} with auto=True '\n error += 'but it doesn\\'t match path={1!r}.'\n raise ValueError(error.format(found_path, path))\n\n if path is not None:\n # TODO: manage invalid path\n pandoc = plumbum.machines.LocalCommand(path, \"utf-8\")\n found_version = pandoc('--version').splitlines()[0].split(' ')[1]\n if version is None:\n version = found_version\n elif version != found_version:\n error = 'the version of the pandoc program is {0!r}'\n error += 'but it doesn\\'t match version={1!r}.'\n raise ValueError(error.format(found_version, version))\n\n if version is not None:\n found_pandoc_types_versions = utils.resolve(version)\n if pandoc_types_version is None:\n if len(found_pandoc_types_versions) >= 1:\n # pick latest (ignore the real one that may be unknown)\n pandoc_types_version = found_pandoc_types_versions[-1]\n else:\n error = 'cannot find a version of pandoc-types '\n error += 'matching pandoc {0}' \n raise ValueError(error.format(version))\n elif pandoc_types_version not in found_pandoc_types_versions:\n error = 'the version of pandoc is {0!r}'\n error += 'but it doesn\\'t match pandoc_types_version={1!r}.'\n raise ValueError(error.format(version, pandoc_types_version))\n\n _configuration = {\n 'auto': auto, \n 'path': path, \n 'version': version, \n 'pandoc_types_version': pandoc_types_version\n }\n\n if \"pandoc.types\" not in sys.modules:\n from .types import make_types # enough to trigger make_types()\n else:\n from .types import make_types; make_types()\n\n return _configuration\n\n\n# JSON Reader / Writer\n# ------------------------------------------------------------------------------\n\n# Break compat? Read consumes markdown only? Bump major version number then.\n# Yay, worth it.\n# \n# TODO: optional input or output FILES or FILENAMES in read/write? Dunno.\n# Think about it. The NAMES read and write seem to imply it ...\n# But the filesystem stuff is orthogonal really ...\n# However, for a \".doc\" output for example, writing it as a string\n# is *probably* useless. \n#\n# TODO: Study also the str vs bytes stuff: we don't want encoding stuff \n# mixed in when we produce a word document, just BYTES. \n# For Markdown OTOH, unicode repr is the right abstraction.\n# What to do for latex, html docs? Bytes or Unicode?\n# FYI, Pandoc is using DECLARING utf-8 encoding for both in standalone\n# mode, so if you write these standalone outputs, you SHALL use utf-8 ...\n# Otherwise, well, I don't know ... but it's pretty much the same for\n# markdown: to get it properly processed, pandoc REQUIRES utf-8.\n# So, distinguish, markdown, latex and html as \"source formats\" and\n# use unicode for them? And bytes for the others?\n# What is the list? There is also ReST? How to get it automatically?\n# Try to trap the error? (Assuming the error message are stable?)\n# Nota: from the pandoc source code, ATM, \"non-text formats\" are\n# [\"odt\",\"docx\",\"epub2\",\"epub3\",\"epub\",\"pptx\"]\n# But the non-text format categorization is used for OUTPUTS only,\n# what about inputs? OK, there is a classification into StringReader\n# (text sources) and ByteStringReader. For the latter, piping is not\n# accepted. So where is the list of the types of readers?\n# Grepping the sources leads to \"docx\", \"odt\" and \"epub\". OK then.\n# Am I really willing to hardcode all this stuff, or shall I return\n# bytes and let the user decide what to do with it? For INPUTS,\n# I can still accept unicode and convert to utf-8 seemlessly, the\n# question is: what to do for outputs? Only return unicode for markdown?\n# (that has no encoding metadata)? Dunno ...\n# UPDATE: OK, I have configured plumbum to always use utf-8 when\n# there is some conversion to be made between unicode and bytes.\n# BUT how can I deal with stuff (in or out) that are BYTES that\n# may not be utf-8 stuff?\n# Also, I forgot to configure cat for utf-8 ... and is cat available\n# on windows? Use temp files instead, that will solve two issues at\n# the same time (bytes as input and car availability).\n# Arf for the output, this is funny: for docx for example,\n# pandoc (haskell) WONT LET ME USE STDOUT! Which is nice :)\n# Nota: it won't read it either; so basically it manages differently\n# the binary formats. Same thing for epub for examples.\n# The messages are typically \"pandoc: Cannot read archive from stdin\"\n# and \"Specify an output file using the -o option\".\n# So I have to find a list in pandoc of binary vs text/utf-8 formats.\n# OR detect the appropriate error at runtime?\n# And then the bytes vs unicode policy is clear.\n# And I don't need to tweak encoding settings in plumbum since I\n# will use files for input and output anyway.\n#\n# OK, so it's probaly safe to consider a shortlist of \"binary\" formats\n# that are \"doc*\", \"epub*\", \"ppt*\", \"odt\" and to return bytes only\n# for these formats.\n#\n# And yes, working directly with filenames/files should work out\n# of the box, and yes, NOT using files should be ok too (and is\n# still my preferences: it should be simpler; if you need files,\n# use a proper keyword argument).\n\n# TODO: add \".py\" / Python support\n\n_readers = {\n \".xhtml\" : \"html\",\n \".html\" : \"html\",\n \".htm\" : \"html\",\n \".md\" : \"markdown\",\n \".markdown\" : \"markdown\",\n \".muse\" : \"muse\",\n \".tex\" : \"latex\",\n \".latex\" : \"latex\",\n \".ltx\" : \"latex\",\n \".rst\" : \"rst\",\n \".org\" : \"org\",\n \".lhs\" : \"markdown+lhs\",\n \".db\" : \"docbook\",\n \".opml\" : \"opml\",\n \".wiki\" : \"mediawiki\",\n \".dokuwiki\" : \"dokuwiki\",\n \".textile\" : \"textile\",\n \".native\" : \"native\",\n \".json\" : \"json\",\n \".docx\" : \"docx\",\n \".t2t\" : \"t2t\",\n \".epub\" : \"epub\",\n \".odt\" : \"odt\",\n \".pdf\" : \"pdf\",\n \".doc\" : \"doc\",\n}\n\ndef default_reader_name(filename):\n _, ext = os.path.splitext(filename)\n return _readers.get(ext)\n\ndef read(source=None, file=None, format=None, options=None):\n if options is None:\n options = []\n\n filename = None\n if source is None:\n if file is None:\n raise ValueError(\"source or file should be defined.\")\n if not hasattr(file, 'read'):\n filename = file\n file = open(filename, 'rb')\n source = file.read()\n else:\n if file is not None:\n raise ValueError(\"source or file should be defined, not both.\")\n\n tmp_dir = tempfile.mkdtemp()\n if not isinstance(source, bytes):\n source = source.encode('utf-8')\n input_path = os.path.join(tmp_dir, 'input')\n input = open(input_path, 'wb')\n input.write(source)\n input.close()\n\n if format is None and filename is not None:\n format = default_reader_name(filename)\n if format is None:\n format = 'markdown'\n if format != 'json' and _configuration['path'] is None:\n error = \"reading the {0!r} format requires the pandoc program\"\n raise RuntimeError(error.format(format))\n\n if format == 'json':\n json_file = open(input_path, \"r\")\n else:\n if _configuration['path'] is None:\n error = \"reading the {0!r} format requires the pandoc program\"\n raise RuntimeError(error.format(format))\n pandoc = plumbum.machines.LocalCommand(_configuration['path'])\n output_path = os.path.join(tmp_dir, 'output.js') \n options = ['-t', 'json', '-o', output_path] + \\\n list(options) + ['-f', format, input_path]\n pandoc(options)\n json_file = open(output_path, \"r\")\n json_ = json.load(json_file)\n shutil.rmtree(tmp_dir)\n if utils.version_key(_configuration[\"pandoc_types_version\"]) < [1, 17]:\n return read_json_v1(json_)\n else:\n return read_json_v2(json_)\n\n# TODO: add \".py\" / Python support\n\n_writers = { \n \"\" : \"markdown\",\n \".tex\" : \"latex\",\n \".latex\" : \"latex\",\n \".ltx\" : \"latex\",\n \".context\" : \"context\",\n \".ctx\" : \"context\",\n \".rtf\" : \"rtf\",\n \".rst\" : \"rst\",\n \".s5\" : \"s5\",\n \".native\" : \"native\",\n \".json\" : \"json\",\n \".txt\" : \"markdown\",\n \".text\" : \"markdown\",\n \".md\" : \"markdown\",\n \".muse\" : \"muse\",\n \".markdown\" : \"markdown\",\n \".textile\" : \"textile\",\n \".lhs\" : \"markdown+lhs\",\n \".texi\" : \"texinfo\",\n \".texinfo\" : \"texinfo\",\n \".db\" : \"docbook\",\n \".odt\" : \"odt\",\n \".docx\" : \"docx\",\n \".epub\" : \"epub\",\n \".org\" : \"org\",\n \".asciidoc\" : \"asciidoc\",\n \".adoc\" : \"asciidoc\",\n \".fb2\" : \"fb2\",\n \".opml\" : \"opml\",\n \".icml\" : \"icml\",\n \".tei.xml\" : \"tei\",\n \".tei\" : \"tei\",\n \".ms\" : \"ms\",\n \".roff\" : \"ms\",\n \".pptx\" : \"pptx\",\n \".xhtml\" : \"html\",\n \".html\" : \"html\",\n \".htm\" : \"html\",\n}\n\ndef default_writer_name(filename):\n if filename.endswith(\".tei.xml\"):\n filename = filename[:-4]\n _, ext = os.path.splitext(filename)\n if len(ext) == 2 and ext[1] in \"0123456789\":\n return \"man\"\n else:\n return _writers.get(ext)\n\ndef write(doc, file=None, format=None, options=None):\n if _configuration is None:\n configure()\n if options is None:\n options = []\n\n tmp_dir = tempfile.mkdtemp()\n filename = None\n if file is not None and not hasattr(file, 'write'):\n filename = file\n file = open(filename, 'wb')\n\n if format is None and filename is not None:\n format = default_writer_name(filename)\n if format is None:\n format = 'markdown' # instead of html, yep.\n if format != 'json' and _configuration['path'] is None:\n error = \"writing the {0!r} format requires the pandoc program\"\n\n if utils.version_key(_configuration[\"pandoc_types_version\"]) < [1, 17]:\n json_ = write_json_v1(doc)\n else:\n json_ = write_json_v2(doc)\n json_str = json.dumps(json_)\n input_path = os.path.join(tmp_dir, 'input.js') \n input = open(input_path, 'wb')\n input.write(json_str.encode('utf-8'))\n input.close()\n\n if format == 'json':\n output_path = input_path\n else:\n pandoc = plumbum.machines.LocalCommand(_configuration['path'])\n output_path = os.path.join(tmp_dir, 'output')\n options = ['-t', format, '-o', output_path] + \\\n list(options) + ['-f', 'json', input_path]\n pandoc(options)\n\n output_bytes = open(output_path, 'rb').read()\n binary_formats = [\"doc\", \"epub\", \"ppt\", \"odt\"]\n if any(tag in format for tag in binary_formats):\n output = output_bytes\n else: # text format\n output = output_bytes.decode('utf-8')\n shutil.rmtree(tmp_dir)\n\n if file is not None:\n file.write(output_bytes)\n return output\n\n\n# JSON Reader v1\n# ------------------------------------------------------------------------------\ndef read_json_v1(json_, type_=None):\n import pandoc.types as types\n if type_ is None:\n type_ = types.Pandoc\n if isinstance(type_, str):\n type_ = getattr(types, type_)\n if not isinstance(type_, list): # not a type def (yet).\n if issubclass(type_, types.Type):\n type_ = type_._def\n else: # primitive type\n return type_(json_)\n\n if type_[0] == \"type\": # type alias\n type_ = type_[1][1]\n return read_json_v1(json_, type_)\n if type_[0] == \"list\":\n item_type = type_[1][0]\n return [read_json_v1(item, item_type) for item in json_]\n if type_[0] == \"tuple\":\n tuple_types = type_[1]\n return tuple(read_json_v1(item, item_type) for (item, item_type) in zip(json_, tuple_types))\n if type_[0] == \"map\":\n key_type, value_type = type_[1]\n return types.map([(read_json_v1(k, key_type), read_json_v1(v, value_type)) for (k, v) in json_.items()])\n\n data_type = None\n constructor = None\n if type_[0] in (\"data\", \"newtype\"):\n data_type = type_\n constructors = data_type[1][1]\n if len(constructors) == 1:\n constructor = constructors[0]\n else:\n constructor = getattr(types, json_[\"t\"])._def\n elif type_[0][0] == type_[0][0].upper():\n constructor = type_\n constructor_type = getattr(types, constructor[0])\n data_type = constructor_type.__mro__[2]._def\n\n single_type_constructor = (len(data_type[1][1]) == 1)\n single_constructor_argument = (len(constructor[1][1]) == 1)\n is_record = (constructor[1][0] == \"map\")\n\n json_args = None\n args = None\n if not is_record:\n if single_type_constructor:\n json_args = json_\n else:\n json_args = json_[\"c\"]\n if single_constructor_argument:\n json_args = [json_args]\n args = [read_json_v1(jarg, t) for jarg, t in zip(json_args, constructor[1][1])]\n else:\n keys = [k for k,t in constructor[1][1]]\n types_= [t for k, t in constructor[1][1]]\n json_args = [json_[k] for k in keys]\n args = [read_json_v1(jarg, t) for jarg, t in zip(json_args, types_)]\n C = getattr(types, constructor[0])\n return C(*args)\n\n\n# JSON Writer v1\n# ------------------------------------------------------------------------------\ndef write_json_v1(object_):\n import pandoc.types as types\n odict = collections.OrderedDict\n type_ = type(object_)\n if not isinstance(object_, types.Type):\n if isinstance(object_, (list, tuple)):\n json_ = [write_json_v1(item) for item in object_]\n elif isinstance(object_, dict):\n json_ = odict((k, write_json_v1(v)) for k, v in object_.items())\n else: # primitive type\n json_ = object_\n else:\n constructor = type(object_)._def\n data_type = type(object_).__mro__[2]._def\n single_type_constructor = (len(data_type[1][1]) == 1)\n single_constructor_argument = (len(constructor[1][1]) == 1)\n is_record = (constructor[1][0] == \"map\")\n\n json_ = odict()\n if not single_type_constructor:\n json_[\"t\"] = type(object_).__name__\n\n if not is_record:\n c = [write_json_v1(arg) for arg in object_]\n if single_constructor_argument:\n c = c[0]\n if single_type_constructor:\n json_ = c\n else:\n json_[\"c\"] = c\n else:\n keys = [kt[0] for kt in constructor[1][1]]\n for key, arg in zip(keys, object_):\n json_[key] = write_json_v1(arg)\n return json_\n\n\n# JSON Reader v2\n# ------------------------------------------------------------------------------\ndef read_json_v2(json_, type_=None):\n import pandoc.types as types\n if type_ is None:\n type_ = types.Pandoc\n if isinstance(type_, str):\n type_ = getattr(types, type_)\n if not isinstance(type_, list): # not a type def (yet).\n if issubclass(type_, types.Type):\n type_ = type_._def\n else: # primitive type\n return type_(json_)\n\n if type_[0] == \"type\": # type alias\n type_ = type_[1][1]\n return read_json_v2(json_, type_)\n if type_[0] == \"list\":\n item_type = type_[1][0]\n return [read_json_v2(item, item_type) for item in json_]\n if type_[0] == \"tuple\":\n tuple_types = type_[1]\n return tuple(read_json_v2(item, item_type) for (item, item_type) in zip(json_, tuple_types))\n if type_[0] == \"map\":\n key_type, value_type = type_[1]\n return types.map([(read_json_v2(k, key_type), read_json_v2(v, value_type)) for (k, v) in json_.items()])\n\n data_type = None\n constructor = None\n if type_[0] in (\"data\", \"newtype\"):\n data_type = type_\n constructors = data_type[1][1]\n if len(constructors) == 1:\n constructor = constructors[0]\n else:\n constructor = getattr(types, json_[\"t\"])._def\n elif type_[0][0] == type_[0][0].upper():\n constructor = type_\n constructor_type = getattr(types, constructor[0])\n data_type = constructor_type.__mro__[2]._def\n\n single_type_constructor = (len(data_type[1][1]) == 1)\n single_constructor_argument = (len(constructor[1][1]) == 1)\n is_record = (constructor[1][0] == \"map\")\n\n json_args = None\n args = None\n if constructor[0] == \"Pandoc\":\n # TODO; check API version compat\n meta = read_json_v2(json_[\"meta\"], types.Meta)\n blocks = read_json_v2(json_[\"blocks\"], [\"list\", [\"Block\"]])\n return types.Pandoc(meta, blocks)\n elif constructor[0] == \"Meta\":\n type_ = ['map', ['String', 'MetaValue']]\n return types.Meta(read_json_v2(json_, type_)) \n elif not is_record:\n if single_type_constructor:\n json_args = json_\n else:\n json_args = json_.get(\"c\", [])\n if single_constructor_argument:\n json_args = [json_args]\n args = [read_json_v2(jarg, t) for jarg, t in zip(json_args, constructor[1][1])]\n else:\n keys = [k for k,t in constructor[1][1]]\n types_= [t for k, t in constructor[1][1]]\n json_args = [json_[k] for k in keys]\n args = [read_json_v2(jarg, t) for jarg, t in zip(json_args, types_)]\n C = getattr(types, constructor[0])\n return C(*args)\n\n\n# JSON Writer v2\n# ------------------------------------------------------------------------------\ndef write_json_v2(object_):\n import pandoc.types as types\n odict = collections.OrderedDict\n type_ = type(object_)\n if not isinstance(object_, types.Type):\n if isinstance(object_, (list, tuple)):\n json_ = [write_json_v2(item) for item in object_]\n elif isinstance(object_, dict):\n json_ = odict((k, write_json_v2(v)) for k, v in object_.items())\n else: # primitive type\n json_ = object_\n elif isinstance(object_, types.Pandoc):\n version = _configuration[\"pandoc_types_version\"]\n metadata = object_[0]\n blocks = object_[1]\n json_ = odict()\n json_[\"pandoc-api-version\"] = version\n json_[\"meta\"] = write_json_v2(object_[0][0])\n json_[\"blocks\"] = write_json_v2(object_[1])\n else:\n constructor = type(object_)._def\n data_type = type(object_).__mro__[2]._def\n single_type_constructor = (len(data_type[1][1]) == 1)\n single_constructor_argument = (len(constructor[1][1]) == 1)\n is_record = (constructor[1][0] == \"map\")\n\n json_ = odict()\n if not single_type_constructor:\n json_[\"t\"] = type(object_).__name__\n\n if not is_record:\n c = [write_json_v2(arg) for arg in object_]\n if single_constructor_argument:\n c = c[0]\n if single_type_constructor:\n json_ = c\n else:\n if len(c) != []:\n json_[\"c\"] = c\n else:\n keys = [kt[0] for kt in constructor[1][1]]\n for key, arg in zip(keys, object_):\n json_[key] = write_json_v2(arg)\n return json_\n \n\n# Iteration\n# ------------------------------------------------------------------------------\ndef iter(elt, enter=None, exit=None):\n if enter is not None:\n enter(elt)\n yield elt\n if isinstance(elt, dict):\n elt = elt.items()\n if hasattr(elt, \"__iter__\") and not isinstance(elt, types.String):\n for child in elt:\n for subelt in iter(child, enter, exit):\n yield subelt\n if exit is not None:\n exit(elt)\n\ndef iter_path(elt):\n path = []\n def enter(elt_):\n path.append(elt_)\n def exit(elt_):\n path.pop()\n for elt_ in iter(elt, enter, exit):\n yield path\n\ndef get_parent(doc, elt):\n for path in iter_path(doc):\n elt_ = path[-1]\n if elt is elt_:\n parent = path[-2] if len(path) >= 2 else None\n return parent\n\n\n# Main Entry Point\n# ------------------------------------------------------------------------------\n\n# TODO: adapt to the new API or remove (?)\ndef main():\n prog = \"python -m pandoc\"\n description = \"Read/write pandoc JSON documents with Python\"\n parser = argparse.ArgumentParser(prog=prog, description=description)\n\n try:\n stdin = sys.stdin.buffer\n except:\n stdin = sys.stdin\n parser.add_argument(\"input\", \n nargs=\"?\", metavar=\"INPUT\",\n type=argparse.FileType(\"rb\"), default=stdin,\n help=\"input file\")\n try:\n stdout = sys.stdout.buffer\n except:\n stdout = sys.stdout\n parser.add_argument(\"-o\", \"--output\", \n nargs=\"?\", \n type=argparse.FileType(\"wb\"), default=sys.stdout,\n help=\"output file\")\n args = parser.parse_args()\n\n input_text = args.input.read()\n if \"b\" in args.input.mode:\n # given the choice, we interpret the input as utf-8\n input_text = input_text.decode(\"utf-8\")\n\n try: # try JSON content first\n json_ = json.loads(input_text, object_pairs_hook=collections.OrderedDict)\n doc = read(json_)\n except:\n pass # maybe it's a Python document?\n else:\n doc_repr = (repr(doc) + \"\\n\") # this repr is 7-bit safe.\n if \"b\" in args.output.mode:\n # given the choice, we use utf-8.\n doc_repr = doc_repr.encode(\"utf-8\")\n args.output.write(doc_repr)\n return\n \n globs = types.__dict__.copy()\n try:\n doc = eval(input_text, globs)\n json_ = write(doc)\n except:\n pass # not a Python document either ...\n else:\n json_repr = (json.dumps(json_) + \"\\n\") # also 7-bit safe\n if \"b\" in args.output.mode:\n # given the choice, we use utf-8.\n json_repr = json_repr.encode(\"utf-8\")\n args.output.write(json_repr)\n return\n\n sys.exit(\"pandoc (python): invalid input document\")\n\n\n","sub_path":"venv/lib/python3.8/site-packages/pandoc/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":24937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"133107042","text":"\n# server.py\n#\n# Copyright (c) 2016, mar77i \n#\n# This software may be modified and distributed under the terms\n# of the ISC license. See the LICENSE file for details.\n\nfrom datetime import datetime\nfrom json import loads as json_loads\nfrom module import Module\nfrom os import linesep\nfrom subprocess import Popen, PIPE\n\n\nclass Server(Module):\n def __init__(self, main, parent=None):\n super().__init__(main, parent=parent)\n\n def do_action(self):\n if \"reload\" in self.main.post:\n self.parent.ajax()\n self.main.reload()\n elif \"watch\" in self.main.post:\n self.parent.ajax()\n\n def __iter__(self):\n return iter({\n \"label\": self.label,\n }.items())\n\n def logwatch(self, last_ts):\n args = [\"journalctl\", \"-b\", \"-ojson\"]\n skip = False\n if last_ts is not None and len(last_ts) > 0:\n args.append(\"--since={}\".format(last_ts))\n skip = True\n else:\n args.extend((\"-n\", \"10\"))\n sp = Popen(args, stdout=PIPE, universal_newlines=True)\n stdout = sp.communicate()[0]\n if sp.returncode == 0:\n for x in stdout[:-1].split(linesep):\n x = json_loads(x)\n ts = str(datetime.fromtimestamp(\n int(x[\"__REALTIME_TIMESTAMP\"]) / 1000000))\n if skip and ts == last_ts:\n skip = False\n continue\n yield \"{} {} {}: {}\".format(\n ts, x[\"_HOSTNAME\"], x[\"SYSLOG_IDENTIFIER\"], x[\"MESSAGE\"])\n\n def __str__(self):\n if \"reload\" in self.main.post:\n return \"restarting server\"\n elif \"watch\" in self.main.post:\n return linesep.join(\n self.logwatch(self.main.post[\"last_ts\"][1].decode()))\n return self.t.format(**dict(self))\n\n\n__call__ = Server\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"632937720","text":"import web\r\nimport pdb\r\n\r\nurls = (\r\n '/', 'index',\r\n '/(.*)', 'index2' # (.*) is a regex match. See index2 below\r\n)\r\n\r\nclass index:\r\n def GET(self):\r\n #pdb.set_trace()\r\n render = web.template.render('templates/')\r\n #name = 'Bob'\r\n # name will match name GET paremeter i.e. http://127.0.0.1?name=Bob\r\n i = web.input(name=None)\r\n return render.index(i.name)\r\n\r\nclass index2:\r\n def GET(self,name):\r\n #pdb.set_trace()\r\n render = web.template.render('templates/')\r\n return render.index2(name)\r\n\r\nif __name__ == \"__main__\":\r\n app = web.application(urls, globals())\r\n app.run()","sub_path":"experiment/web/web.py/hello2.py","file_name":"hello2.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"416499052","text":"from django.test import SimpleTestCase\nfrom django.utils.safestring import SafeText\nfrom unittest import skip\n\nfrom ..models import TempCommCareUser\n\n\n# NOTE: I do not think this class is used, as the constructor throws an exception\n@skip(\"constructor doesnt work\")\nclass TestTempUser_UsernameInReport(SimpleTestCase):\n def test_unknown_user_generates_correct_template(self):\n user = TempCommCareUser('test_domain', 'unknown_user', 'id')\n html = user.username_in_report\n self.assertEqual(html, 'unknown_user [unregistered]')\n self.assertIsInstance(html, SafeText)\n\n def test_escapes_unknown_username(self):\n user = TempCommCareUser('test_domain', 'unknown&user', 'id')\n html = user.username_in_report\n self.assertEqual(html, 'unknown&user [unregistered]')\n\n def test_demo_user_generates_correct_template(self):\n user = TempCommCareUser('test_domain', 'demo_user', 'id')\n html = user.username_in_report\n self.assertEqual(html, 'demo_user')\n self.assertIsInstance(html, SafeText)\n\n def test_admin_user_generates_correct_template(self):\n user = TempCommCareUser('test_domain', 'admin', 'id')\n html = user.username_in_report\n self.assertEqual(html, 'admin (id)')\n self.assertIsInstance(html, SafeText)\n","sub_path":"corehq/apps/reports/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"286566445","text":"\"\"\"\r\n\t检测文件编码形式,以及文件夹的行数,空格的行数\r\n\"\"\"\r\nfrom chardet import detect\r\ncount,blanks = 0,0\r\nwith open(\"a.txt\", 'rb') as fp:\r\n\t#检测文件编码,编码信息保存到code中\r\n\tcode = detect(fp.read())['encoding']\r\n\tprint(code)\r\n\r\nwith open(\"a.txt\", 'r',encoding=code) as fp:\r\n\twhile True:\r\n\t\tline = fp.readline()\r\n\t\tif not line:\r\n\t\t\tbreak\r\n\t\tif len(line.strip())==0:#【同if (len(line)-1)==0: blanks+=1】运行结果一样\r\n\t\t\tblanks+=1\r\n\t\t# if (len(line)-1)==0:\r\n\t\t# \tblanks+=1\r\n\t\t#print(len(line.strip()))打印每行空格数\r\n\t\tcount+=1\r\nprint(count,blanks)","sub_path":"编码形式、行数、空格、.py","file_name":"编码形式、行数、空格、.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"508311979","text":"#coding=utf-8\nimport cv2\nimport numpy as np\nimport copy\nimport imglab # a thinning algorithm from internet\n\nele1 = [[255,255,255],[-1,0,-1],[0,0,0]]\nele2 = [[-1,255,255],[0,0,255],[0,0,-1]]\nele3 = [[0,-1,255],[0,0,255],[0,-1,255]]\nele4 = [[0,0,-1],[0,0,255],[-1,255,255]]\nele5 = [[0,0,0],[-1,0,-1],[255,255,255]]\nele6 = [[-1,0,0],[255,0,0],[255,255,-1]]\nele7 = [[255,-1,0],[255,0,0],[255,-1,0]]\nele8 = [[255,255,-1],[255,0,0],[-1,0,0]]\n\n# return height,length of img\ndef getImgSize(img):\n return len(img),len(img[0])\n\ndef IsA(array,i,j):\n tmp = [array[i][j],array[i-1][j],array[i-1][j+1],array[i][j+1],array[i+1][j+1],array[i+1][j],array[i+1][j-1],array[i][j-1],array[i-1,j-1]]\n n=0\n for i in range(1,8):\n if tmp[i]==0 and tmp[i+1]==255:\n n = n+1\n return n\n\n# Zhang-thinning algorithmn\ndef Zhangthin(img):\n mark=1\n height = len(img)\n length = len(img[0])\n while mark==1:\n mark=0\n for i in range(1,height-1):\n for j in range(1,length-1):\n cond = 0\n if img[i][j]==255:\n n = 0\n for ii in range(-1,2):\n for jj in range(-1,2):\n n = n+(img[i+ii][j+jj])/255\n if n >=3 and n <= 7:\n cond += 1\n if IsA(img,i,j)==1:\n cond += 1\n if (int(img[i-1][j])*img[i][j+1]*img[i+1][j])==0:\n cond += 1\n if (int(img[i][j+1])*img[i+1][j]*img[i][j-1])==0:\n cond += 1\n if cond == 4:\n mark = 1\n img[i][j] = 0\n\n for i in range(1,height-1):\n for j in range(1,length-1):\n cond = 0\n if img[i][j]==255:\n n = 0\n for ii in range(-1,2):\n for jj in range(-1,2):\n n = n + img[i+ii][j+jj]\n if n>=3 and n<=7:\n cond += 1\n if IsA(img,i,j)==1:\n cond += 1\n if (int(img[i-1][j])*img[i][j+1]*img[i][j-1])==0:\n cond += 1\n if (int(img[i][j-1])*img[i+1][j]*img[i][j-1])==0:\n cond += 1\n if cond == 4:\n mark = 1\n img[i][j] = 0\n\n return img\n\n# fill the img's edge with white color\ndef edgeFilling(img):\n height = len(img)\n length = len(img[0])\n for i in range(0,length):\n img[0][i] = 255\n img[height-1][i] = 255\n for j in range(0,height):\n img[j][0] = 255\n img[j][length-1] = 255\n return img\n\n# hit-or-miss operation\ndef homTransform(img,ele):\n height = len(img)\n length = len(img[0])\n h_off = len(ele)/2\n l_off = len(ele[0])/2\n source = copy.deepcopy(img)\n for i in range(h_off,height-h_off):\n for j in range(l_off,length-l_off):\n flag = 0\n for ii in range(-h_off,h_off+1):\n if flag == 1: break\n for jj in range(-l_off,l_off+1):\n imgbit = source[i+ii][j+jj]\n img[i][j] = 0\n eletmp = ele[h_off+ii][l_off+jj]\n if eletmp == -1: continue\n elif eletmp != imgbit:\n flag = 1\n img[i][j] = 255\n break\n return img\n\n#src and dest must be the same size\ndef diffImg(src,tmp):\n height,length = getImgSize(src)\n for i in range(0,height):\n for j in range(0,length):\n if src[i][j] == 0:\n if tmp[i][j] == 0:\n src[i][j] = 255\n return src\n\n# thinning by a single structuring element\ndef ThinningByEle(img,ele):\n source = copy.deepcopy(img)\n img = homTransform(img,ele)\n return diffImg(source,img)\n\n# thinning by a list of structuring elements\ndef Thinning(img,eleList):\n t = len(eleList)\n for i in range(0,t):\n img = ThinningByEle(img,eleList[i])\n img = edgeFilling(img)\n return img\n\nfileimg = cv2.imread('potota.jpg')\nimg = cv2.cvtColor(fileimg,cv2.COLOR_BGR2GRAY)\nretval, img = cv2.threshold(img, 0, 255, cv2.THRESH_OTSU)\nimg = cv2.bitwise_not(img)\neleList = [ele1,ele2,ele3,ele4,ele5,ele6,ele7,ele8]\nres = Thinning(img,eleList)\nfor i in range(2):\n res = Thinning(res,eleList)\ncv2.imshow(\"result\",res)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"高级形态学运算(额外)/thinning.py","file_name":"thinning.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"201687728","text":"import pygame, enum, os, button, spritesheet, random\nfrom pygame import draw, encode_file_path\nfrom pygame import sprite\n\n\n# INITIALIZATION\n#-----------------------------------------------------------------------------------------\npygame.init()\n\nclock = pygame.time.Clock()\nFPS = 60\ngame_over = 0\nseconds_left = 40\ntimer = pygame.time.get_ticks()\n\n# load music and sounds\npygame.mixer.music.load('audio/Hidden_Highland_Remix.wav')\npygame.mixer.music.set_volume(0.3)\npygame.mixer.music.play(-1, 0.0, 5000)\n\n# game settings\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 640\nFLOOR_POSITION = 500\nGRAVITY = 0.75\nSCROLL_THRESH = 200\n\n# scroll settings\nbg_scroll = 0\nscreen_scroll = 0\n\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\npygame.display.set_caption('Forest')\n\nfont_L = pygame.font.SysFont('arialblack', 45)\nfont_M = pygame.font.SysFont('arialblack', 20)\npokeFont = pygame.font.Font(\"Pokemon GB.ttf\", 16)\narial = pygame.font.Font(None, 45)\n# Colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\n\n# IMAGES\n#-----------------------------------------------------------------------------------------\n# menu\nmenu_bg = pygame.image.load('img/menu/bg.jpg')\nmenu_bg = pygame.transform.scale(menu_bg, (SCREEN_WIDTH, SCREEN_HEIGHT))\nstart_img = pygame.image.load('img/menu/start_btn.png').convert_alpha()\nexit_img = pygame.image.load('img/menu/exit_btn.png').convert_alpha()\nvictory_img = pygame.image.load('img/icons/victory.png').convert_alpha()\ndefeat_img = pygame.image.load('img/icons/defeat.png').convert_alpha()\nrestart_img = pygame.image.load('img/icons/restart.png').convert_alpha()\n\n# background\nsky = pygame.image.load('img/Background/Sky.png').convert_alpha()\nbg_decor = pygame.image.load('img/Background/BG_Decor.png').convert_alpha()\nmiddle_decor = pygame.image.load('img/Background/Middle_Decor.png').convert_alpha()\nforeground = pygame.image.load('img/Background/Foreground.png').convert_alpha()\nground = pygame.image.load('img/Background/Ground.png').convert_alpha()\nfloor = pygame.image.load('img/Background/floor.png').convert_alpha()\nfloor = pygame.transform.scale(floor, (sky.get_width() + 5, floor.get_height()))\n\n# FUNCTIONS\n#-----------------------------------------------------------------------------------------\ndef draw_text(text, font, color, x, y):\n img = font.render(text, True, color)\n screen.blit(img, (x, y))\n\ndef draw_centered_text(text, font, color, x, y, largeFont):\n if largeFont:\n img = font_L.render(text, True, color)\n else:\n img = font_M.render(text, True, color)\n img_rect = img.get_rect(center = (SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2))\n img_rect.y = y\n screen.blit(img, img_rect)\n\ndef draw_score(text, font, color, x, y):\n img = font.render(text, True, color)\n screen.blit(img, (x, y))\n\ndef draw_bg():\n width = sky.get_width()\n for i in range(1000):\n screen.blit(sky, ((i * width) - bg_scroll * 0.5, 0))\n screen.blit(bg_decor, ((i * width) - bg_scroll * 0.6, SCREEN_HEIGHT - bg_decor.get_height() - 300))\n screen.blit(middle_decor, ((i * width) - bg_scroll * 0.7, SCREEN_HEIGHT - middle_decor.get_height() - 150))\n screen.blit(foreground, ((i * width) - bg_scroll * 0.8, SCREEN_HEIGHT - foreground.get_height()))\n screen.blit(ground, ((i * width) - bg_scroll * 0.8, SCREEN_HEIGHT - ground.get_height()))\n screen.blit(floor, ((i * width) - bg_scroll * 0.8, SCREEN_HEIGHT - (SCREEN_HEIGHT - FLOOR_POSITION) - 30))\n\ndef draw_menu_bg():\n screen.blit(menu_bg, (0, 0))\n\n\n# CLASSES\n#-----------------------------------------------------------------------------------------\nclass Velocity():\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\nclass Direction(enum.Enum):\n Right = 1\n Left = -1\n Null = 0\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self, name, x, y, width, height, health, flipped):\n super().__init__()\n\n # variables\n self.moving_right = False\n self.moving_left = False\n self.alive = True\n self.name = name\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.flipped = flipped\n self.direction = 0\n self.acceleration = 1\n self.velocity = Velocity(0, 0)\n self.in_air = False\n self.jump = False\n self.attacking = False\n self.sliding = False\n self.health = health\n self.max_health = health\n self.accel_timer = pygame.time.get_ticks()\n self.speed = 10\n\n\n # ai specific variables\n self.move_counter = 0\n self.vision = pygame.Rect(0, 0, 150, 20)\n self.idling = False\n self.idling_counter = 0\n self.hurt = False\n self.recoil = False\n self.recoil_counter = 0\n self.bounce = False\n self.hit_count = 0\n\n\n # animation variables\n self.frame_index = 0\n self.animation_list = []\n self.action = 0\n self.update_time = pygame.time.get_ticks() # date.now()\n \n if self.name == 'mummy':\n self.generate_mummy_animation_list()\n elif self.name == 'adventurer' or self.name == 'fem_warrior':\n self.generate_player_animation_list()\n self.direction = 1\n elif self.name == 'snake':\n self.generate_snake_animation_list()\n self.direction = -1\n\n\n self.img = self.animation_list[self.action][self.frame_index]\n self.rect = self.img.get_rect()\n self.rect.center = (x, y)\n\n\n def generate_player_animation_list(self):\n # animation images\n animations = ['Idle', 'Attack', 'Run', 'Crouch', 'Jump', 'Fall', 'Slide', 'Hurt', 'Death']\n for animation in animations:\n # reset temporary list of images\n temp_list = []\n # get number of files in the folder\n num_of_frames = len(os.listdir(f'img/{self.name}/{animation}'))\n # loop through each image/frame for the animations\n for i in range(num_of_frames):\n image = pygame.image.load(f'img/{self.name}/{animation}/{i}.png')\n image = pygame.transform.scale(image, (self.width, self.height))\n temp_list.append(image)\n self.animation_list.append(temp_list)\n\n # frame list = [\n # Idle: [image1, image2, image3, image4]\n # Attack: [image1, image2, image3]\n # Run: [image1, image2, image3, image4, image5, image6]\n # Etc...\n # ]\n\n def generate_mummy_animation_list(self):\n # animation images\n # 0: Idle\n # 1: Attack\n # 2: Walk\n # 3: Hurt\n # 4: Death\n\n\n frame_steps = [4, 6, 6, 6]\n\n # Idle\n idle_images = pygame.image.load('img/mummy/Mummy_Idle.png').convert_alpha()\n idle_spritesheet = spritesheet.SpriteSheet(idle_images)\n temp_list = []\n step_counter = 0\n for _ in range(frame_steps[0]):\n img = idle_spritesheet.get_image(step_counter, 48, 48, 3, BLACK)\n temp_list.append(img)\n step_counter += 1\n self.animation_list.append(temp_list)\n\n # Attack\n attack_images = pygame.image.load('img/mummy/Mummy_attack.png').convert_alpha()\n attack_spritesheet = spritesheet.SpriteSheet(attack_images)\n temp_list = []\n step_counter = 0\n for _ in range(frame_steps[1]):\n img = attack_spritesheet.get_image(step_counter, 48, 48, 3, BLACK)\n temp_list.append(img)\n step_counter += 1\n self.animation_list.append(temp_list)\n\n # Walk\n walk_images = pygame.image.load('img/mummy/Mummy_walk.png').convert_alpha()\n walk_spritesheet = spritesheet.SpriteSheet(walk_images)\n temp_list = []\n step_counter = 0\n for _ in range(frame_steps[2]):\n img = walk_spritesheet.get_image(step_counter, 48, 48, 3, BLACK)\n temp_list.append(img)\n step_counter += 1\n self.animation_list.append(temp_list)\n\n # Hurt\n hurt_images = pygame.image.load('img/mummy/Mummy_hurt.png').convert_alpha()\n hurt_spritesheet = spritesheet.SpriteSheet(hurt_images)\n temp_list = []\n step_counter = 0\n for _ in range(frame_steps[2]):\n img = hurt_spritesheet.get_image(step_counter, 48, 48, 3, BLACK)\n temp_list.append(img)\n step_counter += 1\n self.animation_list.append(temp_list)\n\n # Death\n death_images = pygame.image.load('img/mummy/Mummy_death.png').convert_alpha()\n death_spritesheet = spritesheet.SpriteSheet(death_images)\n temp_list = []\n step_counter = 0\n for _ in range(frame_steps[3]):\n img = death_spritesheet.get_image(step_counter, 48, 48, 3, BLACK)\n temp_list.append(img)\n step_counter += 1\n self.animation_list.append(temp_list)\n\n # frame list = [\n # Idle: [image1, image2, image3, image4]\n # Attack: [image1, image2, image3]\n # Run: [image1, image2, image3, image4, image5, image6]\n # Etc...\n # ]\n\n def generate_snake_animation_list(self):\n # animation images\n # 0: Idle\n # 1: Attack\n # 2: Walk\n # 3: Hurt\n # 4: Death\n\n\n frame_steps = [4, 6, 4, 2, 4]\n\n # Idle\n idle_images = pygame.image.load('img/snake/Snake_idle.png').convert_alpha()\n idle_spritesheet = spritesheet.SpriteSheet(idle_images)\n temp_list = []\n step_counter = 0\n for _ in range(frame_steps[0]):\n img = idle_spritesheet.get_image(step_counter, 48, 48, 3, BLACK)\n temp_list.append(img)\n step_counter += 1\n self.animation_list.append(temp_list)\n\n # Attack\n attack_images = pygame.image.load('img/snake/Snake_attack.png').convert_alpha()\n attack_spritesheet = spritesheet.SpriteSheet(attack_images)\n temp_list = []\n step_counter = 0\n for _ in range(frame_steps[1]):\n img = attack_spritesheet.get_image(step_counter, 48, 48, 3, BLACK)\n temp_list.append(img)\n step_counter += 1\n self.animation_list.append(temp_list)\n\n # Walk\n walk_images = pygame.image.load('img/snake/Snake_walk.png').convert_alpha()\n walk_spritesheet = spritesheet.SpriteSheet(walk_images)\n temp_list = []\n step_counter = 0\n for _ in range(frame_steps[2]):\n img = walk_spritesheet.get_image(step_counter, 48, 48, 3, BLACK)\n temp_list.append(img)\n step_counter += 1\n self.animation_list.append(temp_list)\n\n # Hurt\n hurt_images = pygame.image.load('img/snake/Snake_hurt.png').convert_alpha()\n hurt_spritesheet = spritesheet.SpriteSheet(hurt_images)\n temp_list = []\n step_counter = 0\n for _ in range(frame_steps[3]):\n img = hurt_spritesheet.get_image(step_counter, 48, 48, 3, BLACK)\n temp_list.append(img)\n step_counter += 1\n self.animation_list.append(temp_list)\n\n # Death\n death_images = pygame.image.load('img/snake/Snake_death.png').convert_alpha()\n death_spritesheet = spritesheet.SpriteSheet(death_images)\n temp_list = []\n step_counter = 0\n for _ in range(frame_steps[4]):\n img = death_spritesheet.get_image(step_counter, 48, 48, 3, BLACK)\n temp_list.append(img)\n step_counter += 1\n self.animation_list.append(temp_list)\n\n\n # frame list = [\n # Idle: [image1, image2, image3, image4]\n # Attack: [image1, image2, image3]\n # Run: [image1, image2, image3, image4, image5, image6]\n # Etc...\n # ]\n\n def update(self):\n self.update_animation()\n if self.attacking and self.name == \"adventurer\":\n self.update_action(1)\n enemies_attacked = pygame.sprite.spritecollide(player, enemy_group, False)\n if len(enemies_attacked) > 0:\n if enemies_attacked[0].alive:\n self.attack(enemies_attacked[0])\n self.check_alive()\n\n def update_animation(self):\n ANIMATION_COOLDOWN = 100\n if self.action == 3 and self.name == 'snake':\n ANIMATION_COOLDOWN = 40\n\n # update image depending on current frame\n self.img = self.animation_list[self.action][self.frame_index]\n\n # check if enough time has passed since the last update\n if pygame.time.get_ticks() - self.update_time > ANIMATION_COOLDOWN:\n self.update_time = pygame.time.get_ticks()\n self.frame_index += 1\n\n # if animation has run out then reset back to the start\n if self.frame_index >= len(self.animation_list[self.action]):\n # only remove dying snake from its group after the animation is finished\n if self.action == 4 and self.name == \"snake\":\n enemy_group.remove(self)\n if self.action == 3 and self.name == 'snake':\n self.action = 0\n if self.action == 1 and self.name == \"adventurer\":\n self.attacking = False\n self.action = 0\n if self.action == len(self.animation_list) - 1 and self.name == 'snake':\n self.frame_index = len(self.animation_list[self.action]) - 1\n else:\n self.frame_index = 0\n\n def update_action(self, new_action):\n if new_action != self.action:\n self.action = new_action\n # update animation settings\n self.frame_index = 0\n self.update_time = pygame.time.get_ticks()\n\n def move(self):\n # reset movement variables\n screen_scroll = 0\n # dx and dy represent the CHANGE in x and y\n dx = 0\n dy = 0\n\n # slow down if player is sliding\n if self.sliding:\n self.acceleration = 8\n else:\n self.acceleration = 10\n\n\n # assign movement variables if moving left or right\n if self.moving_left:\n dx = -self.speed\n self.flipped = True\n self.direction = -1\n if self.moving_right:\n dx = self.speed\n self.flipped = False\n self.direction = 1\n\n\n\n\n # jump\n if self.jump and not self.in_air:\n self.velocity.y = -11\n self.jump = False\n self.in_air = True\n\n # falling\n if self.velocity.y >= 0 and self.in_air:\n self.update_action(5) # Fall\n\n\n # apply gravity\n self.velocity.y += GRAVITY\n dy += self.velocity.y\n\n # check collision with floor\n if self.rect.bottom + dy > FLOOR_POSITION:\n if self.in_air:\n if self.moving_left or self.moving_right:\n player.update_action(2) # moving\n else:\n player.update_action(0) # idle\n dy = FLOOR_POSITION - self.rect.bottom\n self.in_air = False\n\n\n # check if going off the edges of the screen\n if self.rect.left + dx < 0 or self.rect.right + dx > SCREEN_WIDTH:\n dx = 0\n\n # update player rectangle position\n self.rect.x += dx\n self.rect.y += dy\n\n # update scroll based on player position\n if self.rect.right > SCREEN_WIDTH - SCROLL_THRESH or (self.rect.left < SCROLL_THRESH and bg_scroll > abs(dx)):\n self.rect.x -= dx\n screen_scroll = -dx\n\n return screen_scroll\n\n def enemy_move(self):\n # dx and dy represent the CHANGE in x and y\n dx = 0\n dy = 0\n\n # assign movement variables if moving left or right\n if self.moving_left:\n dx = -self.speed\n self.flipped = True\n self.direction = -1\n if self.moving_right:\n dx = self.speed\n self.flipped = False\n self.direction = 1\n\n # check for recoil\n if self.name == \"snake\" and self.action == 3:\n self.hurt = True\n self.hit_count += 1\n\n # jump\n if self.jump and not self.in_air:\n self.velocity.y = -11\n self.jump = False\n self.in_air = True\n\n\n # Bounce\n if self.hit_count >= 3 and not self.in_air:\n #recoil vertically\n self.velocity.x = 2 * self.direction * (random.uniform(0.1, 3))\n self.velocity.y = -9 * (random.uniform(0.5, 1))\n self.bounce = False\n self.in_air = True\n self.hit_count = 0\n\n\n\n\n\n # apply gravity and recoil\n self.velocity.y += GRAVITY\n dx += self.velocity.x\n dy += self.velocity.y\n\n # check collision with floor\n if self.rect.bottom + dy > FLOOR_POSITION:\n dy = FLOOR_POSITION - self.rect.bottom\n self.in_air = False\n self.bounce = False\n self.velocity.y = 0\n self.velocity.x = 0\n\n # check if going off the edges of the screen\n if self.rect.left + dx < 0 or self.rect.right + dx > SCREEN_WIDTH:\n dx = 0\n\n # update ai rectangle position\n self.rect.x += dx + screen_scroll\n self.rect.y += dy\n\n def enemy(self):\n # Hit recoil\n if self.name == \"snake\" and self.action == 3:\n self.recoil = True\n self.rect.x += 10 * self.direction\n self.rect.y -= 5\n\n if self.alive and player.alive:\n if random.randint(1, 200) == 1 and not self.idling:\n self.update_action(0) # 0: idle\n self.idling = True\n self.idling_counter = 50\n\n # check if the ai is near the player\n if self.vision.colliderect(player.rect):\n # stop running and shoot the player \n self.update_action(1) # 0: Idle\n # if ai doesnt see player\n else:\n if not self.idling:\n if self.direction == 1:\n self.moving_right = True\n elif self.direction == -1:\n self.moving_right = False\n self.moving_left = not self.moving_right\n self.enemy_move()\n self.update_action(2) # walk\n self.move_counter += 1\n\n # update ai vision as the enemy moves\n self.vision.center = (self.rect.centerx + 75 * self.direction, self.rect.centery)\n\n if self.move_counter > 50 :\n self.direction *= -1\n self.move_counter *= -1\n else:\n self.idling_counter -= 1\n self.update_action(0)\n if self.idling_counter <= 0:\n self.idling = False\n\n # scroll\n self.rect.x += screen_scroll\n\n def npc(self):\n if self.alive and player.alive:\n if random.randint(1, 200) == 1 and not self.idling:\n self.update_action(0) # 0: idle\n self.idling = True\n self.idling_counter = 200\n\n\n if not self.idling:\n if self.direction == 1:\n self.moving_right = True\n elif self.direction == -1:\n self.moving_right = False\n self.moving_left = not self.moving_right\n self.enemy_move()\n self.update_action(2)\n self.move_counter += 1\n\n if self.move_counter > 30 :\n self.direction *= -1\n self.move_counter *= -1\n else:\n self.idling_counter -= 1\n if self.idling_counter <= 0:\n self.idling = False\n\n # scroll\n self.rect.x += screen_scroll\n\n def attack(self, target):\n hit_range_rect = pygame.Rect(self.rect.left - 10, self.rect.top - 10, self.rect.width + 10, self.rect.height + 10)\n if hit_range_rect.colliderect(target.rect):\n target.update_action(3)\n target.health -= 3\n target.direction = random.randint(-1, 1)\n\n def draw(self):\n if self.flipped:\n screen.blit(pygame.transform.flip(self.img, True, False).convert_alpha(), self.rect)\n else:\n screen.blit(self.img, self.rect)\n\n def check_alive(self):\n if self.health <= 0:\n self.health = 0 \n self.acceleration = 0\n self.alive = 0\n self.action = len(self.animation_list) - 1\n return False\n else:\n return True\n\n def end_movement(self):\n player.update_action(0)\n player.moving_right = False\n player.moving_left = False\n\nclass HealthBar():\n def __init__(self, x, y, hp, max_hp):\n self.x = x\n self.y = y\n self.hp = hp\n self.max_hp = hp\n\n def draw(self, hp):\n # update with new health\n self.hp = hp\n # calculate health ratio\n ratio = self.hp / self.max_hp\n pygame.draw.rect(screen, RED, (self.x, self.y, 150, 20))\n pygame.draw.rect(screen, GREEN, (self.x, self.y, 150 * ratio, 20))\n\nclass DamageText(pygame.sprite.Sprite):\n def __init__(self, x, y, damage, color):\n super().__init__()\n self.image = pokeFont.render(damage, True, color)\n self.rect = self.image.get_rect()\n self.rect.center = (x, y)\n self.counter = 0\n\n\n def update(self):\n # move damage text up\n self.rect.y -= 1\n\n # delete the text after a few seconds\n self.counter += 1\n\n if self.counter > 30:\n self.kill()\n\n# BUTTONS \n# -----------------------------------------------------------------------------------------\n# create buttons\nstart_button = button.Button(SCREEN_WIDTH // 2 - 130, SCREEN_HEIGHT // 2 + 50, start_img, 1)\nexit_button = button.Button(SCREEN_WIDTH // 2 - 110, SCREEN_HEIGHT // 2 + 200, exit_img, 1)\nrestart_button = button.Button(330, 150, restart_img, 0.6)\nrestart_button.rect.centerx = SCREEN_WIDTH // 2\nvictory_button = button.Button(330, 10, victory_img, 1 )\nvictory_button.rect.centerx = SCREEN_WIDTH // 2\n\n\n\n# MENU \n# -----------------------------------------------------------------------------------------\nstart_game = False\nwhile not start_game:\n draw_menu_bg()\n draw_centered_text(\"Welcome to Forest Invaders!\", arial, (255, 255, 0), 40, 50, True)\n draw_centered_text(\"Created by Yorick-Ntwari Niyonkuru\", arial, (255, 255, 0), 40, 175, False)\n draw_centered_text(\"You must save the magic forest from the \", arial, (255, 255, 0), 40, 200, False)\n draw_centered_text(\"invasion of snakes before time runs out!\", arial, (255, 255, 0), 40, 225, False)\n draw_centered_text(\"W: Jump, D: Move Right, S: Crouch, A: Move Left\", arial, (255, 255, 0), 40, 250, False)\n draw_centered_text(\"SPACEBAR: Attack\", arial, (255, 255, 0), 40, 275, False)\n\n # detect mouse clicks\n if start_button.draw(screen):\n start_game = True\n if exit_button.draw(screen):\n pygame.quit()\n\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n pygame.display.update()\n\n\n\n# GAME LOOP\n# -----------------------------------------------------------------------------------------\n\nquit = False\nlevel = 1\n# Creating players and enemies\nplayer = Player('adventurer', 200, 200, 100, 100, 100, False)\nplayer_healthbar = HealthBar(SCREEN_WIDTH - 250, 40 , player.health, player.max_health)\n# Adding enemies to the group\nenemy_group = pygame.sprite.Group()\nnumber_of_enemies = 10\nfor i in range(number_of_enemies):\n snake = Player('snake', random.randint(10, 10000), 600, 100, 100, 100, False)\n enemy_group.add(snake) \n\nwhile not quit:\n clock.tick(FPS)\n draw_bg()\n if game_over == 0:\n draw_text(str(len(enemy_group)), pygame.font.Font(\"Pokemon GB.ttf\", 64), (255, 255, 0), SCREEN_WIDTH // 2 - 30, 20)\n elif game_over == 1:\n draw_text(\"Press the victory button for the next level\", pygame.font.Font(\"Pokemon GB.ttf\", 16), (255, 255, 0), 75, 250)\n draw_text(\"Level \" + str(level), pygame.font.Font(\"Pokemon GB.ttf\", 32), (255, 200, 0), 10, 20)\n draw_text(\"Time Remaining:\", pygame.font.Font(\"Pokemon GB.ttf\", 20), (255, 200, 0), 10, 75)\n if seconds_left == 1:\n draw_text(str(seconds_left) + \" second left\", pygame.font.Font(\"Pokemon GB.ttf\", 20), (255, 200, 0), 10, 100)\n else:\n draw_text(str(seconds_left) + \" seconds left\", pygame.font.Font(\"Pokemon GB.ttf\", 20), (255, 200, 0), 10, 100)\n player_healthbar.draw(player.health)\n draw_text(\"Health: \" + str(player.health), pygame.font.Font(\"Pokemon GB.ttf\", 20), (255, 200, 0), SCREEN_WIDTH - 250, 10)\n\n # check if a second has passed to reduce seconds left\n if pygame.time.get_ticks() - timer > 1000 and game_over == 0:\n seconds_left -= 1\n timer = pygame.time.get_ticks()\n if seconds_left <= 0:\n game_over = -1\n \n\n\n # check if game is over\n if len(enemy_group) <= 0 and game_over == 0:\n game_over = 1\n if player.health <= 0:\n game_over = -1\n\n\n if game_over != 0:\n if game_over == 1:\n player.end_movement()\n if victory_button.draw(screen):\n level = level + 1\n timer = pygame.time.get_ticks()\n seconds_left = 30 * level * (1 - (level//10))\n player = Player('adventurer', 200, 200, 100, 100, 100, False)\n enemy_group = pygame.sprite.Group()\n number_of_enemies = number_of_enemies + 10\n for i in range(number_of_enemies):\n snake = Player('snake', random.randint(10, 10000), 600, 100, 100, 100, False)\n enemy_group.add(snake)\n game_over = 0\n if game_over == -1:\n player.end_movement()\n defeat_rect = defeat_img.get_rect()\n defeat_rect.centerx = SCREEN_WIDTH // 2\n defeat_rect.y = 10\n screen.blit(defeat_img, (defeat_rect.x, defeat_rect.y))\n if restart_button.draw(screen):\n timer = pygame.time.get_ticks()\n seconds_left = 30 * level * (1 - (level//10))\n screen_scroll = 0\n player = Player('adventurer', 200, 200, 100, 100, 100, False)\n enemy_group = pygame.sprite.Group()\n for i in range(number_of_enemies):\n snake = Player('snake', random.randint(10, 10000), 600, 100, 100, 100, False)\n enemy_group.add(snake)\n game_over = 0\n\n player.update()\n screen_scroll = player.move()\n bg_scroll -= screen_scroll\n player.draw()\n\n for enemy in enemy_group:\n enemy.update()\n enemy.enemy_move()\n enemy.draw()\n\n # Event Handling\n for event in pygame.event.get():\n # quit game\n if event.type == pygame.QUIT:\n quit = True\n # Player action\n if game_over == 0:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_d:\n player.moving_right = True\n player.moving_left = False\n player.direction = 1\n player.flipped = False\n player.update_action(2) # Run\n if event.key == pygame.K_a:\n player.moving_left = True\n player.moving_right = False\n player.direction = -1\n player.flipped = True\n player.update_action(2) # Run\n if event.key == pygame.K_w:\n player.jump = True\n player.update_action(4) # Jump\n if event.key == pygame.K_s and not player.in_air:\n if player.moving_left or player.moving_right:\n player.sliding = True\n player.update_action(6) # Slide\n else:\n player.update_action(3) # Crouch\n if event.key == pygame.K_SPACE:\n player.update_action(1) # Attack\n player.attacking = True\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_d:\n player.moving_right = False\n player.moving_left = False\n player.update_action(0) # Idle\n if event.key == pygame.K_a:\n player.moving_right = False\n player.moving_left = False\n player.update_action(0) # Idle\n if event.key == pygame.K_s:\n if player.sliding:\n player.update_action(2)\n player.sliding = False\n else:\n player.update_action(0)\n if event.key == pygame.K_SPACE:\n player.update_action(0) # Idle\n player.moving_right = False\n player.moving_left = False\n player.attacking = False\n\n pygame.display.update()\n\n\npygame.quit()\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":29247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"315464990","text":"# Author: Jishi Tong\r\n# Email: tjishi@adobe.com\r\n# Last update: 2011-01-05\r\n# Getting project status from skippy.adobe.com\r\n\r\nfilename='list.txt' #default file name of agenda.\r\nlog='log.txt' #the file name to save the report\r\nretry=10 #retry times if fail connection\r\nt1=['FR','DE','JP','UK','SE']\r\nt2=['IT','NO','DK','FI','NL','ES']\r\nt3=['BR','KR','CN','TW','HK_ZH','LA','MX','CAFR','MENA','EEUROPE','SEA','AP','AFRICA']\r\nt4=['EE','LT','LV','SK','HU','BG','CZ','HR','PL','RO','RS','RU','SI','TR','UA']\r\n\r\ndef unescape(s):\r\n s = s.replace(\"<\", \"<\")\r\n s = s.replace(\">\", \">\")\r\n s = s.replace(\""\", '\"')\r\n s = s.replace(\"\\\\r\\\\n\",'\\n')\r\n s = s.replace(\"\\\\n\",'\\n')\r\n s = s.replace(\"\\\\t\", \"\\t\")\r\n # this has to be last:\r\n s = s.replace(\"&\", \"&\")\r\n return s\r\n\r\nimport urllib.request,urllib.parse,re,http.cookiejar,getpass, codecs\r\nimport xlwt3 as xlwt\r\nfrom datetime import datetime\r\n\r\n_monthnames = ['Jan', 'Feb', 'Mar', 'April', 'May', 'June', 'July',\r\n 'Aug', 'Sept', 'Oct', 'Nov', 'Dec']\r\nwhile True:\r\n try:\r\n f=open(filename,'rb')\r\n content=f.read()\r\n pt=re.compile(r'Project No.\\s*([\\d\\w-]+)')\r\n project_list=list(re.findall(pt,str(content)))\r\n total=len(project_list)\r\n if total==0:\r\n print('No project name found in \"'+filename+'\", please check and input the agenda file name: ')\r\n filename=input().strip()\r\n continue\r\n print(str(total)+' projects in total.')\r\n break\r\n except exception as e:\r\n filename=input().strip()\r\n\r\ndescription_needed=True\r\nlog=open(log,'w')\r\n\r\nwb = xlwt.Workbook()\r\nws = wb.add_sheet('Result')\r\nstrStyle = xlwt.easyxf('font: name Calibri')\r\ndateStyle = xlwt.easyxf('font: name Calibri',num_format_str='YY/MM/DD')\r\n\r\nws.write(0, 0, 'Project Id', strStyle)\r\nws.write(0, 1, 'Status', strStyle)\r\nws.write(0, 2, 'Start Date', strStyle)\r\nws.write(0, 3, 'Due Date', strStyle)\r\nws.write(0, 4, 'Countries involved', strStyle)\r\nws.write(0, 5, 'Countries done', strStyle)\r\nws.write(0, 6, 'Countries staged', strStyle)\r\nws.write(0, 7, 'In progress', strStyle)\r\nws.write(0, 8, 'Idiom', strStyle)\r\nws.write(0, 9, 'Description', strStyle)\r\nws.write(0, 10, 'Notes', strStyle)\r\nws.write(0, 11, 'Priority', strStyle)\r\nws.write(0, 12, 'Link', strStyle)\r\n\r\ntotal=len(project_list)\r\n\r\ncookie=http.cookiejar.CookieJar()\r\nopener=urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookie))\r\nurllib.request.install_opener(opener)\r\n\r\nwhile True:\r\n print('Username: ')\r\n username=input().strip()\r\n psd=getpass.getpass().strip()\r\n params=urllib.parse.urlencode({\"username\":username,\"password\":psd,\"login\":1,\"submit\":\"login\"})\r\n params=params.encode()\r\n print('Connecting and authenticating to skippy...')\r\n\r\n i=0\r\n while i.*?(?:)?(.*?)
',content).group(1)\r\n except:countries_involve=''\r\n try:priority=re.search(r'name=\"priority\" value=\"(..)\" checked>',content).group(1)\r\n except:priority='N/A'\r\n if '','')\r\n countries_involve=countries_involve.replace('','')\r\n \r\n if_done=re.search(r'No more changes can be made to the project.',content)!=None\r\n try:status=re.search(r'name=\"status\" value=\"([^<>]*?)\" checked=\"checked\" />',content).group(1)\r\n except:status='Done' \r\n try:title=re.search(r'.*?',content).group(1)\r\n except:title=re.search(r'.*?
\\\\n\\s*(.*?)\\\\n ',content).group(1)\r\n start_date=re.search(r'.*\\n.*\\n.*\\n(.*)\\n',unescape(content)).group(1).strip()\r\n month_due=re.search(r'name=\"monthDue\".*?selected=\"selected\".*?>(.*?)<',content).group(1)\r\n day_due=re.search(r'name=\"dayDue\".*?selected=\"selected\".*?>(.*?)<',content).group(1)\r\n year_due=re.search(r'name=\"yearDue\".*?selected=\"selected\".*?>(.*?)<',content).group(1)\r\n due_date=month_due+' '+day_due+' '+year_due\r\n description=re.search(r'

Description

.*?textarea.*?>(.*?)',content).group(1)\r\n remarks=re.search(r'

Remarks:

.*?textarea.*?>(.*?)',content).group(1)\r\n try:countries_ready=re.search(r'Countries Ready.*?
.*?n\\s*(.*?)
',content,re.S).group(1)\r\n except:countries_ready=''\r\n countries_staged=str(re.findall(r'\\d{8}.*?>([^\\\\]*?)staged?',content,re.S))\r\n countries_staged=re.split(r\"['|\\[|\\]|\\s|,| ]\",countries_staged)\r\n countries_ready_temp=countries_ready.split(',')\r\n if 'T1' in countries_ready_temp: countries_ready_temp=countries_ready_temp+t1\r\n if 'T2' in countries_ready_temp: countries_ready_temp=countries_ready_temp+t2\r\n if 'T3' in countries_ready_temp: countries_ready_temp=countries_ready_temp+t3\r\n if 'T4' in countries_ready_temp: countries_ready_temp=countries_ready_temp+t4\r\n countries_staged=[i for i in countries_staged if i!='' and i not in countries_ready_temp]\r\n countries_staged=re.sub(r\"[\\[|\\]|']\",'',str(countries_staged))\r\n \r\n countries_ready_temp=countries_staged.split(', ')+countries_ready.split(',')\r\n if 'T1' in countries_ready_temp: countries_ready_temp=countries_ready_temp+t1\r\n if 'T2' in countries_ready_temp: countries_ready_temp=countries_ready_temp+t2\r\n if 'T3' in countries_ready_temp: countries_ready_temp=countries_ready_temp+t3\r\n if 'T4' in countries_ready_temp: countries_ready_temp=countries_ready_temp+t4\r\n \r\n countries_involve_temp=countries_involve.replace(' ','').split(',')\r\n if 'T1' in countries_involve_temp: countries_involve_temp=countries_involve_temp+t1\r\n if 'T2' in countries_involve_temp: countries_involve_temp=countries_involve_temp+t2\r\n if 'T3' in countries_involve_temp: countries_involve_temp=countries_involve_temp+t3\r\n if 'T4' in countries_involve_temp: countries_involve_temp=countries_involve_temp+t4\r\n countries_not_ready=[i for i in countries_involve_temp if i not in countries_ready_temp]\r\n countries_not_ready=re.sub(r\"[\\[|\\]|']\",'',str(countries_not_ready))\r\n countries_not_ready=re.sub(r'(T1,|T2,|T3,|T4,)','',countries_not_ready)\r\n \r\n try:idiom_name=re.search(r'(?:WorldServer|Idiom project name).*?([\\d\\w_-]+)',unescape(content),re.I|re.S).group(1)\r\n except:idiom_name=''\r\n #print(idiom_name)\r\n\r\n\r\n ########## write to file ############\r\n log.write(p+' - '+unescape(title)+'\\n')\r\n ws.write(count, 0, p+' - '+unescape(title), strStyle)\r\n if if_done:\r\n log.write('**CLOSED**'+'\\n')\r\n ws.write(count, 1, 'Done', strStyle)\r\n else:\r\n ws.write(count, 1, status, strStyle)\r\n log.write('Priority: '+priority+'\\n')\r\n log.write('Start Date: '+ start_date+'\\n')\r\n try:[mm, dd, yy] = start_date.split(' ')\r\n except Exception as e:\r\n err=open('error.log','a')\r\n err.write('https://skippy.adobe.com/cgi-bin/admin.cgi?Tabs=project&jumpto='+p)\r\n \r\n ws.write(count, 2, datetime(int(yy), _monthnames.index(mm)+1, int(dd)), dateStyle)\r\n\r\n log.write('Due Date: '+due_date+'\\n') \r\n [mm, dd, yy] = due_date.split(' ')\r\n ws.write(count, 3, datetime(int(yy), _monthnames.index(mm)+1, int(dd)), dateStyle)\r\n \r\n log.write('Countries involved: '+ str(countries_involve)+'\\n')\r\n ws.write(count, 4, str(countries_involve), strStyle)\r\n log.write('Countries Ready: '+ str(countries_ready)+'\\n') \r\n ws.write(count, 5, str(countries_ready), strStyle)\r\n log.write('Countries Staged: '+ str(countries_staged)+'\\n')\r\n ws.write(count, 6, str(countries_staged), strStyle)\r\n log.write('Countries NOT ready nor staged: '+str(countries_not_ready)+'\\n')\r\n ws.write(count, 7, str(countries_not_ready), strStyle)\r\n log.write('Idiom Name: '+idiom_name+'\\n')\r\n ws.write(count, 8, idiom_name, strStyle)\r\n log.write('-'*10+'\\n')\r\n log.write(unescape(description)+'\\n')\r\n ws.write(count, 9, unescape(description),strStyle)\r\n log.write('-'*10+'\\n')\r\n log.write(unescape(remarks)+'\\n')\r\n ws.write(count, 10, unescape(remarks),strStyle)\r\n ws.write(count, 11, priority, strStyle)\r\n ws.write(count, 12, 'https://skippy.adobe.com/cgi-bin/admin.cgi?Tabs=project&jumpto='+p)\r\n log.write('\\n'+'='*80+'\\n')\r\n log.flush()\r\n wb.save('result.xls')\r\n\r\n","sub_path":"rz/grab_skippy.py","file_name":"grab_skippy.py","file_ext":"py","file_size_in_byte":9413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"3157798","text":"import numpy \nimport logging \n\nfrom apgl.util.Parameter import Parameter\nfrom exp.sandbox.GraphMatch import GraphMatch \n\nclass HIVGraphMetrics2(object): \n def __init__(self, realGraph, breakDist=0.2, matcher=None, T=1000):\n \"\"\"\n A class to model metrics about and between HIVGraphs such as summary \n statistics and distances. In this case we perform graph matching \n using the PATH algorithm and other graph matching methods. \n \n :param realGraph: The target epidemic graph \n \n :param epsilon: The max mean distance before we break the simulation\n \n :param matcher: The graph matcher object to compute graph distances. \n \n :param T: The end time of the simulation. If the simulation quits before T, then distance = 1.\n \"\"\"\n \n self.dists = [] \n self.graphDists = []\n self.labelDists = []\n self.realGraph = realGraph\n self.breakDist = breakDist \n self.breakIgnore = 3 \n self.T = T \n self.times = []\n \n if matcher == None: \n self.matcher = GraphMatch(\"U\")\n else: \n self.matcher = matcher \n \n def addGraph(self, graph): \n \"\"\"\n Compute the distance between this graph and the realGraph at the time \n of the last event of this one. \n \"\"\"\n t = graph.endTime()\n subgraph = graph.subgraph(graph.removedIndsAt(t)) \n subRealGraph = self.realGraph.subgraph(self.realGraph.removedIndsAt(t)) \n \n #Only add distance if the real graph has nonzero size\n if subRealGraph.size != 0: \n permutation, distance, time = self.matcher.match(subgraph, subRealGraph)\n lastDist, lastGraphDist, lastLabelDist = self.matcher.distance(subgraph, subRealGraph, permutation, True, False, True) \n \n logging.debug(\"Distance at time \" + str(t) + \" is \" + str(lastDist) + \" with simulated size \" + str(subgraph.size) + \" and real size \" + str(subRealGraph.size)) \n \n self.dists.append(lastDist)\n self.graphDists.append(lastGraphDist)\n self.labelDists.append(lastLabelDist)\n self.times.append(t) \n else: \n logging.debug(\"Not adding distance at time \" + str(t) + \" with simulated size \" + str(subgraph.size) + \" and real size \" + str(subRealGraph.size))\n \n def distance(self): \n \"\"\"\n If we have the required number of time steps, return the mean distance \n otherwise return a distance of 1 (the max distance).\n \"\"\"\n if len(self.times) != 0 and self.times[-1] >= self.T: \n return self.meanDistance()\n else: \n return 1 \n \n def meanDistance(self):\n \"\"\"\n This is the mean distance of the graph matches so far. \n \"\"\"\n dists = numpy.array(self.dists)\n if dists.shape[0]!=0: \n return dists.mean()\n else: \n return 0\n \n def meanGraphDistance(self):\n \"\"\"\n This is the mean graph distance of the graph matches so far. \n \"\"\"\n graphDists = numpy.array(self.graphDists)\n if graphDists.shape[0]!=0: \n return graphDists.mean()\n else: \n return 0\n \n def meanLabelDistance(self):\n \"\"\"\n This is the mean label distance of the graph matches so far. \n \"\"\"\n labelDists = numpy.array(self.labelDists)\n if labelDists.shape[0]!=0: \n return labelDists.mean()\n else: \n return 0\n \n def shouldBreak(self): \n if len(self.dists) < self.breakIgnore: \n return False \n else:\n return self.meanDistance() > self.breakDist \n ","sub_path":"exp/viroscopy/model/HIVGraphMetrics2.py","file_name":"HIVGraphMetrics2.py","file_ext":"py","file_size_in_byte":3805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"467647200","text":"import netCDF4\nimport h5py\nimport numpy as np\nimport datetime\nfrom math import isnan, pi\n\nfrom qgis.core import QgsProject, QgsVectorLayer, QgsFeature, QgsField, QgsGeometry, QgsMessageLog, Qgis, QgsPointXY\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QMessageBox\n\n\nclass hydroDataset():\n def __init__(self, file, type = 'nc'):\n if file==None:\n quit('A netcdf or hdf5 file is needed')\n else:\n self.file\n self.type = type\n if self.type != 'nc' and self.type !='hdf5':\n quit('type must be nc or hdf5')\n\n def read(self):\n if self.type=='nc':\n self.ds = netCDF4.Dataset(self.file)\n times_in = getvar_standardname(self.ds, ['time'])\n self.time = netCDF4.num2date(times_in[:], units=times_in.units)[0]\n self.lat = getvar_standardname(self.ds, ['latitude'])[:]\n self.lon = getvar_standardname(self.ds, ['longitude'])[:]\n self.u = getvar_standardname(self.ds, ['surface_eastward_sea_water_velocity',\n 'eastward_sea_water_velocity'])[:]\n self.v = getvar_standardname(self.ds, ['surface_northward_sea_water_velocity',\n 'northward_sea_water_velocity'])[:]\n else:\n self.ds = h5py.File(self.file, 'r')\n self.time = self.ds.get('time')\n self.lat = self.ds.get('latitude')\n self.lon = self.ds.get('longitude')\n self.u = self.ds.get('uo')\n self.v = self.ds.get('vo')\n\n def uv2md(self):\n self.mod = pow((pow(self.u, 2) + pow(self.v, 2)), .5)\n self.dir = (180 * np.arctan2(self.u, self.v)) / pi\n\n\ndef add_current_layer(h_ds):\n\n project = QgsProject.instance()\n\n layer = QgsVectorLayer(\"Point?crs=epsg:4326\", \"currents\", \"memory\")\n layer.dataProvider().addAttributes([QgsField(\"water_u\", QVariant.Double),\n QgsField(\"water_v\", QVariant.Double),\n QgsField(\"mod\", QVariant.Double),\n QgsField(\"dir\", QVariant.Double)])\n layer.updateFields()\n features = []\n for i, lon in enumerate(h_ds.lon):\n for j, lat in enumerate(h_ds.lat):\n if not isnan(h_ds.u[j][i]):\n feature = QgsFeature()\n feature.setFields(layer.fields())\n pt = QgsPointXY(lon, lat)\n geom = QgsGeometry.fromPointXY(pt)\n feature.setGeometry(geom)\n feature.setAttributes(\n [float(h_ds.u[j][i]), float(h_ds.v[j][i]), float(h_ds.mod[j][i]), float(h_ds.dir[j][i])])\n features.append(feature)\n layer.dataProvider().addFeatures(features)\n project.addMapLayer(layer)\n return layer\n\n\n\n\n\ndef unix_time(dt):\n \"\"\" Seconds since 01_01_1970.\"\"\"\n epoch = datetime.datetime.utcfromtimestamp(0)\n delta = dt - epoch\n return delta.total_seconds()\n\n\ndef getvar_standardname(f, nome_standards):\n \"\"\"Return values using the CF standard name of a variable in a netCDF file.\"\"\"\n for var in f.variables:\n for atributo in (f.variables[var].ncattrs()):\n if atributo == 'standard_name':\n nome_atributo = (getattr(f.variables[var], 'standard_name'))\n for nome_standar in nome_standards:\n if nome_atributo == nome_standar:\n return f.variables[var]\n print('standard_name = {0} not found'.format(nome_standar))\n\n\ndef getvar_longname(f, nome_longs):\n \"\"\"Return values using the CF long name of a variable in a netCDF file.\"\"\"\n for var in f.variables:\n for atributo in (f.variables[var].ncattrs()):\n if atributo == 'long_name':\n nome_atributo = (getattr(f.variables[var], 'long_name'))\n for nome_long in nome_longs:\n if nome_atributo == nome_long:\n return f.variables[var]\n print('long_name = {0} not found'.format(nome_long))\n\n\n\ndef hf():\n file_in = r'http://150.145.136.27:8080/thredds/dodsC/Ibiza_NRT212/2020/2020_02/2020_02_12/HFR-Ibiza-Total_2020_02_12_1700.nc'\n\n print('vou a ler {0}'.format(file_in))\n\n f = netCDF4.Dataset(file_in)\n\n nc_attrs = f.ncattrs()\n\n detailed_text = 'NetCDF Global Attributes:\\n\\n'\n for nc_attr in nc_attrs:\n value = '%s' % repr(f.getncattr(nc_attr), )\n spam = f'- {nc_attr}: {value}; \\n'\n detailed_text += spam\n\n # Radial or Total file\n total = False\n # if f.getncattr('data_type') == 'HF radar total data':\n total = True\n\n # Extension\n\n lat_min = float(f.getncattr('geospatial_lat_min'))\n lat_max = float(f.getncattr('geospatial_lat_max'))\n lon_min = float(f.getncattr('geospatial_lon_min'))\n lon_max = float(f.getncattr('geospatial_lon_max'))\n extension = [lat_min, lat_max, lon_min, lon_max]\n\n # Variables with time\n\n times_in = getvar_standardname(f, ['time'])\n tempos = netCDF4.num2date(times_in[:], units=times_in.units)\n lat_in = getvar_standardname(f, ['latitude'])[:]\n lon_in = getvar_standardname(f, ['longitude'])[:]\n\n u_in = getvar_standardname(f, ['surface_eastward_sea_water_velocity',\n 'eastward_sea_water_velocity'])[:]\n # if u_in is None:\n # u_in = getvar_standardname(f, 'eastward_sea_water_velocity')[:]\n v_in = getvar_standardname(f, ['surface_northward_sea_water_velocity',\n 'northward_sea_water_velocity'])[:]\n # if v_in is None:\n # v_in = getvar_standardname(f, 'northward_sea_water_velocity')[:]\n print(v_in.shape)\n\n tempo = netCDF4.num2date(times_in[:], units=times_in.units)[0]\n times = unix_time(tempo)\n print(tempo)\n\n water_u = u_in[:]\n water_v = v_in[:]\n water_u = water_u[0][0]\n water_v = water_v[0][0]\n\n mod = pow((pow(water_u, 2) + pow(water_v, 2)), .5)\n dir = (180 * np.arctan2(water_u, water_v)) / pi\n\n f.close()\n\n # msg = QMessageBox()\n # msg.setText(f'file: {file_in}')\n # msg.setInformativeText(\"fe\")\n # msg.setWindowTitle(\"Open NetCDF\")\n # msg.setDetailedText(detailed_text)\n # msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n # msg.exec_()\n\n proyecto = QgsProject.instance()\n print(proyecto)\n\n layer = QgsVectorLayer(\"Point?crs=epsg:4326\", \"capa_radar\", \"memory\")\n layer.dataProvider().addAttributes([QgsField(\"water_u\", QVariant.Double),\n QgsField(\"water_v\", QVariant.Double),\n QgsField(\"mod\", QVariant.Double),\n QgsField(\"dir\", QVariant.Double)])\n layer.updateFields()\n features = []\n\n if total:\n\n for i, lon in enumerate(lon_in):\n for j, lat in enumerate(lat_in):\n if not isnan(water_u[j][i]):\n feature = QgsFeature()\n feature.setFields(layer.fields())\n pt = QgsPointXY(lon, lat)\n geom = QgsGeometry.fromPointXY(pt)\n feature.setGeometry(geom)\n feature.setAttributes(\n [float(water_u[j][i]), float(water_v[j][i]), float(mod[j][i]), float(dir[j][i])])\n features.append(feature)\n layer.dataProvider().addFeatures(features)\n proyecto.addMapLayer(layer)\n\n return layer\n\n\ndef sinrampa(layer):\n # You can alter a single property...\n symbol = QgsMarkerSymbol.createSimple({'name': 'square', 'color': 'red'})\n renderer = QgsSingleSymbolRenderer(symbol)\n layer.setRenderer(renderer)\n\n layer.renderer().symbol().symbolLayer(0).setSize(3)\n # ... but not all properties are accessible from methods,\n # you can also replace the symbol completely:\n props = layer.renderer().symbol().symbolLayer(0).properties()\n props['color'] = 'red'\n props['name'] = 'arrow'\n layer.renderer().setSymbol(QgsMarkerSymbol.createSimple(props))\n # show the changes\n # layer.triggerRepaint()\n\n layer.triggerRepaint()\n\n\ndef rampa(layer):\n vals = []\n fld = 'mod'\n for f in layer.getFeatures():\n vals.append(f[fld])\n # If you don't like these colors, change them out for ones you do, using hexcodes,\n # RGB codes etc. as long as the items in this list are valid strings you\n # can pass to a QColor constructor\n colors = ['#0011FF', '#0061FF', '#00D4FF', '#00FF66', '#00FF00', '#E5FF32', '#FCFC0C', '#FF9F00', '#FF3F00',\n '#FF0000']\n lower = sorted(vals)[0]\n upper = sorted(vals)[-1]\n step = (upper - lower) / len(colors)\n range_list = []\n for c in colors:\n cat = [lower, lower + step, c]\n # sym = QgsSymbol.defaultSymbol(layer.geometryType())\n sym = QgsMarkerSymbol.createSimple({'name': 'arrow'})\n\n sym.setColor(QColor(cat[2]))\n rng = QgsRendererRange(cat[0], cat[1], sym, '{0:.1f}-{1:.1f}'.format(cat[0], cat[1]))\n range_list.append(rng)\n lower = (lower + step) + 0.1\n # sym.setSize(3)\n # sym.setName('arrow')\n # sym.symbolLayer(0)\n print(QgsProperty.fromField(fld).asExpression())\n sym.symbolLayer(0).setDataDefinedProperty(QgsSymbolLayer.PropertyAngle, QgsProperty.fromField('dira'))\n renderer = QgsGraduatedSymbolRenderer(fld, range_list)\n\n renderer.setSymbolSizes(0, 10)\n layer.setRenderer(renderer)\n layer.triggerRepaint()\n\n # proyecto.addMapLayer(layer)\n\n\ndef rampa2(layer):\n vals = []\n fld = 'mod'\n for f in layer.getFeatures():\n vals.append(f[fld])\n print(sorted(vals)[-1])\n # If you don't like these colors, change them out for ones you do, using hexcodes,\n # RGB codes etc. as long as the items in this list are valid strings you\n # can pass to a QColor constructor\n colors = ['#0011FF', '#0061FF', '#00D4FF', '#00FF66', '#00FF00', '#E5FF32', '#FCFC0C', '#FF9F00', '#FF3F00',\n '#FF0000']\n lower = sorted(vals)[0]\n upper = sorted(vals)[-1]\n step = (upper - lower) / len(colors)\n print(lower, upper, step)\n range_list = []\n for c in colors:\n cat = [lower, lower + step, c]\n # sym = QgsSymbol.defaultSymbol(layer.geometryType())\n sym = QgsMarkerSymbol.createSimple({'name': 'arrow'})\n\n sym.setColor(QColor(cat[2]))\n rng = QgsRendererRange(cat[0], cat[1], sym, '{0:.1f}-{1:.1f}'.format(cat[0], cat[1]))\n print(cat[0], cat[1], cat[2])\n range_list.append(rng)\n lower = (lower + step) + 0.001\n # sym.setSize(3)\n # sym.setName('arrow')\n # sym.symbolLayer(0)\n sym.symbolLayer(0).setDataDefinedProperty(QgsSymbolLayer.PropertyAngle, QgsProperty.fromField('dir'))\n renderer = QgsGraduatedSymbolRenderer(fld, range_list)\n\n renderer.setSymbolSizes(1, 5)\n layer.setRenderer(renderer)\n layer.triggerRepaint()\n\n # proyecto.addMapLayer(layer)\n\n\ndef ftle():\n file_in = r'http://thredds-gfnl.usc.es/thredds/dodsC/MYCOASTLCS/MYCOASTLCS_Vigo_20210216.nc'\n print('vou a ler {0}'.format(file_in))\n\n f = netCDF4.Dataset(file_in)\n\n nc_attrs = f.ncattrs()\n\n detailed_text = 'NetCDF Global Attributes:\\n\\n'\n for nc_attr in nc_attrs:\n value = '%s' % repr(f.getncattr(nc_attr), )\n spam = f'- {nc_attr}: {value}; \\n'\n detailed_text += spam\n msg = QMessageBox()\n msg.setText(f'file: {file_in}')\n msg.setInformativeText(\"fe\")\n msg.setWindowTitle(\"Open NetCDF\")\n msg.setDetailedText(detailed_text)\n msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n msg.exec_()\n\n\n\n # Variables with time\n\n times_in = getvar_longname(f, ['time'])\n print(times_in)\n tempos = netCDF4.num2date(times_in[:], units=times_in.units)\n lat_in = getvar_standardname(f, ['latitude'])[:]\n lon_in = getvar_standardname(f, ['longitude'])[:]\n\n ftle_in = getvar_standardname(f, ['forward_FTLE'])[:]\n\n\n tempo = netCDF4.num2date(times_in[:], units=times_in.units)[0]\n times = unix_time(tempo)\n print(tempo)\n\n ftle = ftle_in[:]\n \n ftle = ftle[0]\n print(ftle)\n\n\n\n f.close()\n\n msg = QMessageBox()\n msg.setText(f'file: {file_in}')\n msg.setInformativeText(\"fe\")\n msg.setWindowTitle(\"Open NetCDF\")\n msg.setDetailedText(detailed_text)\n msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n msg.exec_()\n\n proyecto = QgsProject.instance()\n print(proyecto)\n\n layer = QgsVectorLayer(\"Point?crs=epsg:4326\", \"capa_radar\", \"memory\")\n layer.dataProvider().addAttributes([QgsField(\"FTLE\", QVariant.Double)])\n layer.updateFields()\n features = []\n\n for i, lon in enumerate(lon_in):\n for j, lat in enumerate(lat_in):\n if not isnan(ftle[j][i]):\n feature = QgsFeature()\n feature.setFields(layer.fields())\n pt = QgsPointXY(lon, lat)\n geom = QgsGeometry.fromPointXY(pt)\n feature.setGeometry(geom)\n feature.setAttributes(\n [float(ftle[j][i])])\n features.append(feature)\n layer.dataProvider().addFeatures(features)\n proyecto.addMapLayer(layer)\n\n return layer\n","sub_path":"others/vectors.py","file_name":"vectors.py","file_ext":"py","file_size_in_byte":13219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"545609902","text":"\"\"\"\n File: JukeBox.py\n Purpose: This file implements the JukeBox class.\n\"\"\"\n\nimport pygame\nimport os\nfrom PersistentState import get_persistent_state\nfrom ExceptionClasses import InvalidPath\nfrom ExceptionClasses import LevelNotFound\nfrom FileOperations import log\n\n# Jukebox that will be used once the function get_jukebox is called\nJUKEBOX = None\n\nLOG = log(__name__)\n\nPYGAME_MIXER = pygame.mixer\n\ndef get_jukebox():\n \"\"\"\n This gets the jukebox if its already created, or else it creates one\n and returns it.\n \"\"\"\n global JUKEBOX\n if JUKEBOX == None:\n LOG.info(\"Jukebox has been initialized.\")\n JUKEBOX = JukeBox()\n\n return JUKEBOX\n\n\ndef play_sound(path):\n \"\"\"\n Plays the sound in the given path.\n \"\"\"\n assert path is not None, \"Given path is of None type!\"\n assert type(path) is str, \"Given path is not a string!\"\n\n try:\n jukebox = get_jukebox()\n jukebox.play_sound(path)\n except InvalidPath as error:\n LOG.error('Path \"{0}\" invalid.'.format(error.path))\n except Exception as error:\n LOG.error(\"Unknown error ocurred.\")\n\ndef get_song_normalization(name):\n \"\"\"\n Adds a default volume normalization to all musics in the\n game except the title one.\n name: String\n This represents the name of the music.\n \"\"\"\n assert name is not None, \"Given name is of None type!\"\n assert type(name) is str, \"Given name is not a string!\"\n\n if name == 'title':\n normalization = 1\n else:\n normalization = .3\n\n LOG.info(\"Normalization of song is: \" + str(normalization))\n\n return normalization\n\nclass JukeBox(object):\n \"\"\"\n This class is responsible to manage the in game music\n \"\"\"\n def __init__(self):\n \"\"\"\n Initialize the jukebox with a certain sound volume if setted\n beforehand or set it to default 70.\n Also initialize all the music names in a list.\n \"\"\"\n # Sets the initial value of the sound\n PYGAME_MIXER.init()\n self.__current = None # Current sound being played\n self.__music_volume = 0\n self.__sfx_volume = 0\n\n persistent_state = get_persistent_state()\n # In case there's a pre existing config file\n if persistent_state.forever.get('sfx') != None:\n music = persistent_state.get_int_forever('music')\n sfx = persistent_state.get_int_forever('sfx')\n self.set_music_volume(music)\n self.set_sfx_volume(sfx)\n else:\n self.set_music_volume(70)\n self.set_sfx_volume(70)\n self.__sounds = {} # List of sounds loaded in memory\n # List of musics by level\n self.__music_map = {'intro': 'biologytake2', '12-0': 'chemistry', \\\n '13-0': 'chemistry', '14-0': 'chemistry', \\\n '17-3': 'chemistry', '18-0': 'chemistry', \\\n '19a-0': 'chemistry', '19b-1': 'chemistry', \\\n '19-0': 'chemistry', '99-0': 'bossmusic',}\n\n\n def set_music_volume(self, percent):\n \"\"\"\n Sets the music volume.\n percent: Int\n This represents the percent of the volume to be setted.\n \"\"\"\n assert percent is not None, \"Given percent is of None type!\"\n assert type(percent) is int, \"Given percent is not a integer number!\"\n LOG.info(\"Percent of music to be set is: \" + str(percent))\n\n # Gets the upper bound if its bellow 100 so it can be normalized later\n upper_bound = min(100, int(percent))\n \n # Gets the actual percent of volume to be set\n percent = max(0, upper_bound)\n self.__music_volume = percent\n self.update_volume(self.__current)\n\n def update_volume(self, song):\n \"\"\"\n Updates the volume of given song.\n song: String\n This represent the song to be updated.\n \"\"\"\n if song != None: # Check to see if there is a music playing\n # Multiplies volume by normalization to get the correct volume\n volume = (self.__music_volume * \\\n get_song_normalization(song))\n\n #This sets the volume between 0 and 1\n volume /= 100.0\n PYGAME_MIXER.music.set_volume(volume)\n LOG.info(\"Volume has been updated.\")\n \n def set_sfx_volume(self, percent):\n \"\"\"\n Sets the sound effects volume.\n percent: Int\n This represents the percent of the volume to be setted.\n \"\"\"\n assert percent is not None, \"Given percent is of None type!\"\n assert type(percent) is int, \"Given percent is not a integer number!\"\n LOG.info(\"Percent of music to be set is: \" + str(percent))\n\n # Gets the upper bound if its bellow 100 so it can be normalized later\n upper_bound = min(100, int(percent))\n\n # Gets the actual percent of volume to be set\n percent = max(0, upper_bound)\n self.__sfx_volume = percent\n self.__sounds = {}\n\n def get_music_volume(self):\n \"\"\"\n This gets the current music volume.\n \"\"\"\n return self.__music_volume\n\n def get_sfx_volume(self):\n \"\"\"\n This gets the current sound effects volume.\n \"\"\"\n return self.__sfx_volume\n\n def play_sound(self, path):\n \"\"\"\n This play the sound in the given path.\n path: String\n path where the sound is.\n \"\"\"\n ###BadName###\n assert path is not None, \"Given path is of None type!\"\n assert type(path) is str, \"Given path is not a string!\"\n\n LOG.debug(\"Given path of sound is: \" + path)\n\n # This gets the sound if pygame already loaded it\n snd = self.__sounds.get(path)\n if snd == None:\n ###BadName###\n fpath = 'sound/SFX/' + path + '.ogg' # Relative path to sound file\n fpath = fpath.replace('.wav', '.ogg')\n fpath = fpath.replace('/', os.sep)\n fpath = fpath.replace('\\\\', os.sep)\n fpath = fpath.replace('.ogg.ogg', '.ogg')\n # Check if the file is file\n # type is .ogg else swap else\n # replace with .wav\n if not os.path.exists(fpath):\n fpath = fpath.replace('.ogg', '.wav')\n snd = self.__sounds.get(fpath)\n if snd == None:\n snd = PYGAME_MIXER.Sound(fpath)\n volume = self.__sfx_volume / 100.0\n if path.startswith('talk'):\n if 'high' in path:\n volume = volume / 2\n volume = volume / 3\n if 'menumove' in path:\n volume = volume / 4\n snd.set_volume(volume)\n self.__sounds[path] = snd\n else:\n self.__sounds[path] = snd\n\n if snd == None:\n raise InvalidPath(path)\n\n LOG.debug(\"Sound volume is: \" + str(snd.get_volume()))\n\n snd.play()\n\n def ensure_current_song(self, song):\n \"\"\"\n This ensures that if current song playing is None, then it\n stops the music else it tries to find the given song in\n folder.\n song: String\n Song to be verified.\n \"\"\"\n assert song is not None, \"Given song is of None type!\"\n assert type(song) is str, \"Given song is not a string!\"\n\n if song == 'bossmusic' and self.__current == 'stringtheory':\n self.ensure_current_song('stringtheory')\n return\n if song == None: # Stops the music if there's nothing to be played\n LOG.info(\"There is nothing to be played.\")\n PYGAME_MIXER.music.stop()\n else:\n # Adds the relative path to the given song\n song = 'sound/music/' + song + '.mp3'\n song = song.replace('/', os.sep)\n song = song.replace('\\\\', os.sep)\n # Play the song if the song is not already playing\n if self.__current != song:\n self.__current = song\n self.update_volume(song)\n PYGAME_MIXER.music.load(song)\n PYGAME_MIXER.music.play(-1)\n PYGAME_MIXER.music.play(-1)\n\n def __get_song_for_level(self, level):\n \"\"\"\n Gets the song of given level\n level: String\n level in which resides the song\n \"\"\"\n assert level is not None, \"Given level is of None type!\"\n assert type(level) is str, \"Given level is not a string!\"\n try:\n level_song = self.__music_map.get(level, 'astrophysics')\n if level_song == None:\n raise LevelNotFound(level)\n except LevelNotFound as error:\n level = error.level\n LOG.error('Level {0} not found in music map.'.format(level))\n\n return level_song\n\n ###BadName###\n def update(self, levelname):\n \"\"\"\n This updates the jukebox given the levelname\n levelname: String\n Name of the level to update the music in the jukebox\n \"\"\"\n assert levelname is not None, \"Given levelname is of None type!\"\n assert type(levelname) is str, \"Given levelname is not a string!\"\n song = self.__get_song_for_level(levelname)\n\n self.ensure_current_song(song)\n","sub_path":"classes/JukeBox.py","file_name":"JukeBox.py","file_ext":"py","file_size_in_byte":9471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"531990433","text":"# <>\n# Copyright 2022, Lawrence Livermore National Security, LLC.\n# See the top-level COPYRIGHT file for details.\n# \n# SPDX-License-Identifier: BSD-3-Clause\n# <>\n\nimport abc\nimport numpy\n\nfrom LUPY import ancestry as ancestryModule\nfrom PoPs.quantities import quantity as quantityModule\n\n\"\"\"\nDefines incident-energy-dependent functions representing the contribution to the resolved region cross section\nfrom resonances external to the evaluated set.\nThe external R-Matrix is added to the R-Matrix diagonal during resonance reconstruction.\n\"\"\"\n\n\nclass ExternalRMatrix(ancestryModule.AncestryIO, metaclass=abc.ABCMeta):\n \"\"\"\n Abstract base class inherited by the Froehner and SAMMY classes.\n \"\"\"\n moniker = 'externalRMatrix'\n\n def __init__(self, **kwargs):\n\n super().__init__()\n provided_terms = set(kwargs.keys())\n required_terms = {'singularityEnergyBelow', 'singularityEnergyAbove'}\n if not required_terms.issubset(provided_terms):\n missing = required_terms.difference(provided_terms)\n raise AttributeError(\"%s external R-Matrix is missing required terms: %s\" % (self.type, \", \".join(missing)))\n extra = provided_terms.difference(self.ancestryMembers)\n if extra:\n raise AttributeError(\"%s external R-Matrix received unexpected terms: %s\" % (self.type, \", \".join(extra)))\n self._terms = kwargs\n\n @property\n @abc.abstractmethod\n def type(self): pass\n\n @property\n @abc.abstractmethod\n def terms(self): pass\n\n @abc.abstractmethod\n def evaluate(self, energies): pass\n\n def getTerm(self, key, unit):\n result = self.terms.get(key)\n if result is None:\n return 0\n return result.float(unit)\n\n def toXML_strList(self, indent='', **kwargs):\n\n indent2 = indent + ' '\n xmlString = ['%s<%s type=\"%s\">' % (indent, self.moniker, self.type)]\n for key in self.ancestryMembers:\n term = self.terms.get(key)\n if term is not None:\n xmlString += term.toXML_strList(indent=indent2, **kwargs)\n xmlString[-1] += ('' % self.moniker)\n\n return xmlString\n\n @classmethod\n def parseNodeUsingClass(cls, element, xPath, linkData, **kwargs):\n\n xPath.append(element.tag)\n\n terms = {term.get(\"label\"): quantityModule.Double.parseNodeUsingClass(term, xPath, linkData, **kwargs)\n for term in element.findall(\"double\")}\n\n class_ = {\n 'Froehner': Froehner,\n 'SAMMY': SAMMY\n }[element.get(\"type\")]\n\n result = class_(**terms) # FIXME2, this is not using cls.\n\n xPath.pop()\n\n return result\n\n\nclass Froehner(ExternalRMatrix):\n \"\"\"\n Froehner's external R-Matrix parametrization.\n \"\"\"\n ancestryMembers = ('averageRadiationWidth', 'constantExternalR', 'poleStrength', 'singularityEnergyBelow',\n 'singularityEnergyAbove')\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n @property\n def type(self):\n return \"Froehner\"\n\n @property\n def terms(self):\n return self._terms\n\n def evaluate(self, energies):\n \"\"\"\n Evaluate Fröhner's external R-Matrix parametrization at the given energy or energies.\n @param energies: single energy or numpy array of energies\n @return: tuple(real part, imaginary part)\n \"\"\"\n R0 = self.getTerm('constantExternalR', '')\n sc = self.getTerm('poleStrength', '')\n Gamma = self.getTerm('averageRadiationWidth', 'eV')\n Edown = self.getTerm('singularityEnergyBelow', 'eV')\n Eup = self.getTerm('singularityEnergyAbove', 'eV')\n Ebar = (Eup + Edown) / 2\n I = Eup - Edown\n realTerm = R0 + 2 * sc * numpy.arctan2(energies - Ebar, I/2)\n imaginaryTerm = (Gamma * I / 4) / (I**2 / 4 - (energies - Ebar)**2)\n return realTerm, imaginaryTerm\n\n\nclass SAMMY(ExternalRMatrix):\n \"\"\"\n External R-Matrix parametrization from SAMMY\n \"\"\"\n ancestryMembers = ('constantExternalR', 'linearExternalR', 'quadraticExternalR',\n 'constantLogarithmicCoefficient', 'linearLogarithmicCoefficient',\n 'singularityEnergyBelow', 'singularityEnergyAbove')\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n @property\n def type(self):\n return \"SAMMY\"\n\n @property\n def terms(self):\n return self._terms\n\n def evaluate(self, energies):\n \"\"\"\n Evaluate the SAMMY external R-Matrix parametrization at the given energy or energies.\n @param energies: single energy or numpy array of energies\n @return: tuple(real part, imaginary part)\n \"\"\"\n Rcon = self.getTerm('constantExternalR', '')\n Rlin = self.getTerm('linearExternalR', '1/eV')\n Rquad = self.getTerm('quadraticExternalR', '1/eV**2')\n scon = self.getTerm('constantLogarithmicCoefficient', '')\n slin = self.getTerm('linearLogarithmicCoefficient', '1/eV')\n Edown = self.getTerm('singularityEnergyBelow', 'eV')\n Eup = self.getTerm('singularityEnergyAbove', 'eV')\n logTerm = numpy.log((Eup - energies) / (energies - Edown))\n realTerm = Rcon + Rlin * energies + Rquad * energies**2 - slin * (Eup - Edown) - (scon + slin * energies) * logTerm\n return realTerm, 0\n","sub_path":"fudge/resonances/externalRMatrix.py","file_name":"externalRMatrix.py","file_ext":"py","file_size_in_byte":5410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"19186285","text":"import unittest\nfrom typing import Iterable\n\nfrom thriftybuilder.checksums import DockerChecksumCalculator\nfrom thriftybuilder.build_configurations import DockerBuildConfiguration\nfrom thriftybuilder.containers import BuildConfigurationContainer\nfrom thriftybuilder.tests._common import COPY_DOCKER_COMMAND, ADD_DOCKER_COMMAND, RUN_DOCKER_COMMAND\nfrom thriftybuilder.tests._common import TestWithDockerBuildConfiguration\nfrom thriftybuilder.tests._examples import EXAMPLE_FILE_NAME_1, EXAMPLE_FILE_CONTENTS_1, \\\n EXAMPLE_FILE_NAME_2, EXAMPLE_FILE_CONTENTS_2, EXAMPLE_RUN_COMMAND, EXAMPLE_IMAGE_NAME\n\n\nclass TestDockerChecksumCalculator(TestWithDockerBuildConfiguration):\n \"\"\"\n Tests for `DockerChecksumCalculator`.\n \"\"\"\n def setUp(self):\n super().setUp()\n self.checksum_calculator = DockerChecksumCalculator()\n\n def test_calculate_checksum_with_configurations(self):\n configurations = [\n self.create_docker_setup()[1],\n self.create_docker_setup(commands=(EXAMPLE_RUN_COMMAND))[1],\n self.create_docker_setup(commands=(EXAMPLE_RUN_COMMAND, EXAMPLE_RUN_COMMAND))[1],\n ]\n self._assert_different_checksums(configurations)\n\n def test_calculate_checksum_when_used_files(self):\n add_file_1_command = f\"{ADD_DOCKER_COMMAND} {EXAMPLE_FILE_NAME_1} files_1\"\n copy_file_2_command = f\"{COPY_DOCKER_COMMAND} {EXAMPLE_FILE_NAME_2} files_2\"\n configurations = [\n self.create_docker_setup()[1],\n self.create_docker_setup(\n commands=(add_file_1_command, ),\n context_files={EXAMPLE_FILE_NAME_1: EXAMPLE_FILE_CONTENTS_1})[1],\n self.create_docker_setup(\n commands=(copy_file_2_command, ),\n context_files={EXAMPLE_FILE_NAME_2: EXAMPLE_FILE_CONTENTS_2})[1],\n self.create_docker_setup(\n commands=(add_file_1_command, copy_file_2_command),\n context_files={EXAMPLE_FILE_NAME_1: EXAMPLE_FILE_CONTENTS_1,\n EXAMPLE_FILE_NAME_2: EXAMPLE_FILE_CONTENTS_2})[1],\n self.create_docker_setup(\n commands=(add_file_1_command, copy_file_2_command),\n context_files={EXAMPLE_FILE_NAME_1: EXAMPLE_FILE_CONTENTS_2,\n EXAMPLE_FILE_NAME_2: EXAMPLE_FILE_CONTENTS_2})[1],\n self.create_docker_setup(\n commands=(add_file_1_command, copy_file_2_command),\n context_files={EXAMPLE_FILE_NAME_1: EXAMPLE_FILE_CONTENTS_1,\n EXAMPLE_FILE_NAME_2: EXAMPLE_FILE_CONTENTS_1})[1]]\n self._assert_different_checksums(configurations)\n\n def test_calculate_checksum_with_changing_from_image(self):\n _, from_configuration_1 = self.create_docker_setup(\n image_name=EXAMPLE_IMAGE_NAME)\n _, from_configuration_2 = self.create_docker_setup(\n image_name=EXAMPLE_IMAGE_NAME, commands=(f\"{RUN_DOCKER_COMMAND} other\", ))\n\n _, configuration = self.create_docker_setup(from_image_name=EXAMPLE_IMAGE_NAME)\n\n self.checksum_calculator.managed_build_configurations.add(from_configuration_1)\n checksum_1 = self.checksum_calculator.calculate_checksum(configuration)\n self.checksum_calculator.managed_build_configurations.add(from_configuration_2)\n checksum_2 = self.checksum_calculator.calculate_checksum(configuration)\n self.assertNotEqual(checksum_1, checksum_2)\n\n def test_calculate_checksum_with_changing_from_from_image(self):\n grandparent_name = \"grandparent\"\n parent_name = \"parent\"\n\n _, grandparent_configuration_1 = self.create_docker_setup(image_name=grandparent_name)\n _, grandparent_configuration_2 = self.create_docker_setup(\n image_name=grandparent_name, commands=(EXAMPLE_RUN_COMMAND, ))\n _, parent_configuration = self.create_docker_setup(image_name=parent_name, from_image_name=grandparent_name)\n _, configuration = self.create_docker_setup(from_image_name=parent_name)\n\n self.checksum_calculator.managed_build_configurations.add(parent_configuration)\n self.checksum_calculator.managed_build_configurations.add(grandparent_configuration_1)\n checksum_1 = self.checksum_calculator.calculate_checksum(configuration)\n self.checksum_calculator.managed_build_configurations.add(grandparent_configuration_2)\n checksum_2 = self.checksum_calculator.calculate_checksum(configuration)\n self.assertNotEqual(checksum_1, checksum_2)\n\n def test_calculate_checksum_type(self):\n configuration = self.create_docker_setup()[1]\n calculator = DockerChecksumCalculator(\n managed_build_configurations=BuildConfigurationContainer([configuration]))\n self.assertIsInstance(calculator.calculate_checksum(configuration), str)\n\n def _assert_different_checksums(self, configurations: Iterable[DockerBuildConfiguration]):\n \"\"\"\n Assert that the given configurations all have different checksums.\n :param configurations: the configurations to consider\n :raises AssertionError: when the assertion fails\n \"\"\"\n checksums = set()\n i = 0\n for configuration in configurations:\n checksum = self.checksum_calculator.calculate_checksum(configuration)\n self.assertNotIn(checksum, checksums)\n checksums.add(checksum)\n i += 1\n if len(checksums) != i:\n raise AssertionError()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"thriftybuilder/tests/test_checksums.py","file_name":"test_checksums.py","file_ext":"py","file_size_in_byte":5566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"128176205","text":"from typing import List\nclass Solution:\n def triangleNumber(self, nums: List[int]) -> int:\n nums.sort()\n count = 0\n for i in range(len(nums)-1,1,-1):\n anchor = nums[i]\n low = 0\n high = i - 1\n while low < high:\n if nums[high]+nums[low] > anchor:\n count += high-low\n high -= 1\n else:\n low += 1\n return count\n\nobj = Solution()\ntc = [[2,2,3,4], [1,2,3,4,5,6]]\nfor t in tc:\n print(obj.triangleNumber(t))\n","sub_path":"611-Valid-Triangle-Number.py","file_name":"611-Valid-Triangle-Number.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"275545956","text":"def solution(nums):\n count = 0\n nums.sort()\n big = nums[-3] + nums[-2] + nums[-1]\n table = [False] + [True] * big\n for i in range (2, len(table)):\n if table[i - 1]:\n for j in range (2 * i, len(table), i):\n table[j - 1] = False\n i = j = k = 0\n for i in range (len(nums) - 2):\n for j in range (i + 1, len(nums) -1):\n for k in range (j + 1, len(nums)):\n if table[(nums[i] + nums[j] + nums[k]) - 1]:\n count += 1\n return count","sub_path":"소수 만들기.py","file_name":"소수 만들기.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"243662884","text":"import pytest\n\nimport ibis\nimport ibis.expr.datatypes as dt\n\n\ndef test_timestamp_accepts_date_literals(alltypes):\n date_string = '2009-03-01'\n param = ibis.param(dt.timestamp).name('param_0')\n expr = alltypes.mutate(param=param)\n params = {param: date_string}\n result = expr.compile(params=params)\n expected = \"\"\"\\\nSELECT *, @param AS `param`\nFROM `ibis-gbq.testing.functional_alltypes`\"\"\"\n assert result == expected\n\n\n@pytest.mark.parametrize(\n ('distinct', 'expected_keyword'),\n [\n (True, 'DISTINCT'),\n (False, 'ALL'),\n ]\n)\ndef test_union(alltypes, distinct, expected_keyword):\n expr = alltypes.union(alltypes, distinct=distinct)\n result = expr.compile()\n expected = \"\"\"\\\nSELECT *\nFROM `ibis-gbq.testing.functional_alltypes`\nUNION {}\nSELECT *\nFROM `ibis-gbq.testing.functional_alltypes`\"\"\".format(expected_keyword)\n assert result == expected\n\n\ndef test_ieee_divide(alltypes):\n expr = alltypes.double_col / 0\n result = expr.compile()\n expected = \"\"\"\\\nSELECT IEEE_DIVIDE(`double_col`, 0) AS `tmp`\nFROM `ibis-gbq.testing.functional_alltypes`\"\"\"\n assert result == expected\n\n\ndef test_identical_to(alltypes):\n t = alltypes\n pred = t.string_col.identical_to('a') & t.date_string_col.identical_to('b')\n expr = t[pred]\n result = expr.compile()\n expected = \"\"\"\\\nSELECT *\nFROM `ibis-gbq.testing.functional_alltypes`\nWHERE (((`string_col` IS NULL) AND ('a' IS NULL)) OR (`string_col` = 'a')) AND\n (((`date_string_col` IS NULL) AND ('b' IS NULL)) OR (`date_string_col` = 'b'))\"\"\" # noqa: E501\n assert result == expected\n","sub_path":"ibis/bigquery/tests/test_compiler.py","file_name":"test_compiler.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"635704602","text":"\n\n#calss header\nclass _AWAY():\n\tdef __init__(self,): \n\t\tself.name = \"AWAY\"\n\t\tself.definitions = [u'somewhere else, or to or in a different place, position, or situation: ', u'at a distance (of or from here): ', u'in or into the usual or a suitable place, especially one that can be closed: ', u'gradually until mostly or completely gone: ', u'in the future: ', u'continuously or repeatedly, or in a busy way: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adverbs'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adverbs/_away.py","file_name":"_away.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"336813178","text":"import numpy as np\nimport open3d\nimport argparse\n\nfrom sunutils import *\n\nparser = argparse.ArgumentParser(description='VoteNet PyTorch training data visualization')\nparser.add_argument('--id', default='010335', type=str, help='id of the scene')\nparser.add_argument('--path', default='/Users/francesco/Desktop/mysunrgbd/', type=str, help='SUNRGBD processed path')\nargs = parser.parse_args()\n\nidx = args.id\n\nPATH_DATASET = args.path\n\nlabels = open(\"{}{}{}{}\".format(PATH_DATASET, \"label_dimension/\", idx, \".txt\"), \"r\")\npoints = []\nclass2color = {}\nclasses = []\n\ncalib = SUNRGBD_Calibration(\"{}{}{}{}\".format(PATH_DATASET, \"calib/\", idx, \".txt\"))\npc_upright_depth = np.loadtxt(\"{}{}{}{}\".format(PATH_DATASET, \"depth/\", idx, \".txt\"))\npc_upright_camera = np.zeros_like(pc_upright_depth)\npc_upright_camera[:, 0:3] = calib.project_upright_depth_to_upright_camera(pc_upright_depth[:, 0:3])\nrgb = pc_upright_camera[:, 3:]\npc_upright_camera[:, 3:] = pc_upright_depth[:, 3:]\npc_image_coord, _ = calib.project_upright_depth_to_image(pc_upright_depth)\n\nfor line in labels:\n obj = SUNObject3d(line)\n if obj.classname not in class2color:\n class2color[obj.classname] = [np.random.rand(1), np.random.rand(1), np.random.rand(1)]\n\n box3d_pts_2d, box3d_pts_3d = compute_box_3d(obj, calib)\n box3d_pts_3d = calib.project_upright_depth_to_upright_camera(box3d_pts_3d)\n\n points.append(box3d_pts_3d)\n classes.append(obj.classname)\n \nlabels.close()\n\n''' Draw 3d bounding box\n 1 -------- 0\n /| /|\n 2 -------- 3 .\n | | | |\n . 5 -------- 4\n |/ |/\n 6 -------- 7\n'''\nlines = [[0, 1], [0, 3], [1, 2], [2, 3], [4, 5], [5, 6], [4, 7], [6, 7],\n [0, 4], [1, 5], [2, 6], [3, 7]]\n\npcd = open3d.geometry.PointCloud()\nxyz = np.array(pc_upright_camera[:, :3])\nrgb = np.array(rgb)\n\npcd.points = open3d.utility.Vector3dVector(xyz)\npcd.colors = open3d.utility.Vector3dVector(rgb)\nline_sets = [pcd]\n\nfor i in range(len(points)):\n line_set = open3d.geometry.LineSet()\n line_set.points = open3d.utility.Vector3dVector(points[i])\n line_set.lines = open3d.utility.Vector2iVector(lines)\n colors = [class2color[classes[i]] for j in range(len(lines))]\n line_set.colors = open3d.utility.Vector3dVector(colors)\n line_sets.append(line_set)\n\n## Flip it, otherwise the pointcloud and the bounding boxes will be upside down\nfor geom in line_sets:\n geom.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n\nopen3d.visualization.draw_geometries(line_sets)\n\n","sub_path":"votenet/training_data_viz.py","file_name":"training_data_viz.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"528682904","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 28 15:53:00 2018\n\n@author: giguerf\n\"\"\"\nfrom PMG.COM.writebook import writeHDF5\nfrom PMG.COM.openbook import openHDF5\nfrom PMG.COM import table as tb, data as dat\n\ntable = tb.get('SLED')\n\nSLED = 'P:/SLED/Data/'\nchlist = ['S0SLED000000ACXD','12HEAD0000Y7ACZA',\n '12HEAD0000Y7ACXA','12HEAD0000Y6ACXA','12HEAD0000Y2ACXA',\n '12CHST0000Y7ACXC','12CHST0000Y6ACXC','12CHST0000Y2ACXC',\n '12PELV0000Y7ACXA','12PELV0000Y6ACXA','12PELV0000Y2ACXA',\n '12CHST0000Y7ACRC', '12CHST0000Y7DSXB', '12HEAD0000Y7ACRA']\n\nwriteHDF5(SLED, chlist)\n\ntime, fulldata = openHDF5(SLED, chlist)\n\n#time, clean_data = dat.import_data(SLED, chlist, check=True)\n","sub_path":"SLED/SLED.py","file_name":"SLED.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"616722044","text":"#!/usr/bin/env python3\n##################################################################\n# Usage: ATGC.py input_file output_file r/c/rc #\n# Manual: r/R:reverse c/C:complement rc/RC: reverse complement #\n##################################################################\n\n#def Complement/Reverse/Reverse_Complement algorithm\ndef seq(x,y):\n\tcomp = []\n\tfor i in x:\n\t\tif i == 'A': comp.append('T')\n\t\telif i == 'a': comp.append('t')\n\t\telif i == 'T': comp.append('A')\n\t\telif i == 't': comp.append('a')\n\t\telif i == 'G': comp.append('C')\n\t\telif i == 'g': comp.append('c')\n\t\telif i == 'C': comp.append('G')\n\t\telif i == 'c': comp.append('g')\n\t\telif i == 'U': comp.append('A')\n\t\telif i == 'u': comp.append('a')\n\tif y.upper() == 'C':\n\t\treturn ''.join(comp)\n\tif y.upper() == 'R':\n\t\treturn ''.join(x)[::-1]\n\tif y.upper() == 'RC':\n\t\treturn ''.join(comp)[::-1]\n\telse:return ''.join(x)\n\n#def base\nbase=['A','a','T','t','G','g','C','c','U','u']\t\n\t\n#def get list from input or file\ndef add(x):\n\ts = []\n\tn = len(x)\n\t#get list\n\tfor i in range(n):\n\t\tif x[i] in base:\n\t\t\ts.append(x[i])\n\t#RNA sequence \"U\" warning\n\tif 'U' in s or 'u' in s:\n\t\t\tprint('!!!The sequence contains \"U\" or \"u\", Using A:U pair!!!')\n\treturn s\n\t\t\t\n####MAIN####\n#from file (arguments)\ntry:\n\timport sys\n\tin_file = open(sys.argv[1])\n\tout_file = open(sys.argv[2], 'w')\n\tmode = sys.argv[3]\n\tcontent = in_file.read()\n\tsequence = add(content)\n\tout_file.writelines(seq(sequence, mode))\n\tout_file.close()\n\n#from raw input\nexcept IndexError:\n\twhile True:\n\t\tcontent = eval(input('Enter the input sequence:'))\n\t\tif not content: break\n\t\t#kind of output\n\t\tmode = eval(input('Reverse(r) or Complement(c) or Reverse Complement(rc):'))\t\n\t\tsequence = add(content)\n\t\tprint((seq(sequence, mode)))\t\n\n################ END ################\n# Created by Aone #\n# zhaoshuoxp@whu.edu.cn #\n################ END ################","sub_path":"ATGC.py","file_name":"ATGC.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"406485937","text":"\"\"\"Vacancies application models.\"\"\"\n\nfrom django.contrib import admin\n\nfrom .models import City, Vacancy\n\n\nclass CityInline(admin.TabularInline):\n \"\"\"City inline admin model.\"\"\"\n\n model = City\n\n\n@admin.register(Vacancy)\nclass VacancyAdmin(admin.ModelAdmin):\n \"\"\"Vacancies model admin.\"\"\"\n\n list_display = ('name',)\n filter_horizontal = ('cities',)\n\n\nadmin.site.register(City)\n","sub_path":"cleanok/vacancies/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"366434692","text":"\"\"\"\nAssignment configurations for Otter Assign\n\"\"\"\n\nimport yaml\n\nfrom .constants import BLOCK_QUOTE\nfrom .utils import get_source, get_spec\nfrom ...utils import convert_config_description_dict\n\n_DEFAULT_ASSIGNMENT_CONFIGURATIONS_WITH_DESCRIPTIONS = [\n {\n \"key\": \"requirements\",\n \"description\": \"the path to a requirements.txt file\",\n \"default\": None,\n },\n {\n \"key\": \"overwrite_requirements\",\n \"description\": \"whether to overwrite Otter's default requirement.txt in Otter Generate\",\n \"default\": False,\n },\n {\n \"key\": \"environment\",\n \"description\": \"the path to a conda environment.yml file\",\n \"default\": None,\n },\n {\n \"key\": \"run_tests\",\n \"description\": \"whether to run the assignment tests against the autograder notebook\",\n \"default\": True,\n },\n {\n \"key\": \"solutions_pdf\",\n \"description\": \"whether to generate a PDF of the solutions notebook\",\n \"default\": False,\n },\n {\n \"key\": \"template_pdf\",\n \"description\": \"whether to generate a filtered Gradescope assignment template PDF\",\n \"default\": False,\n },\n {\n \"key\": \"init_cell\",\n \"description\": \"whether to include an Otter initialization cell in the output notebooks\",\n \"default\": True,\n },\n {\n \"key\": \"check_all_cell\",\n \"description\": \"whether to include an Otter check-all cell in the output notebooks\",\n \"default\": True,\n },\n {\n \"key\": \"export_cell\",\n \"description\": \"whether to include an Otter export cell in the output notebooks\",\n \"default\": [\n {\n \"key\": \"instructions\",\n \"description\": \"additional submission instructions to include in the export cell\",\n \"default\": \"\",\n },\n {\n \"key\": \"pdf\",\n \"description\": \"whether to include a PDF of the notebook in the generated zip file\",\n \"default\": True,\n },\n {\n \"key\": \"filtering\",\n \"description\": \"whether the generated PDF should be filtered\",\n \"default\": True,\n },\n {\n \"key\": \"force_save\",\n \"description\": \"whether to force-save the notebook with JavaScript (only works in \" \\\n \"classic notebook)\",\n \"default\": False,\n },\n {\n \"key\": \"run_tests\",\n \"description\": \"whether to run student submissions against local tests during export\",\n \"default\": False,\n },\n\n ],\n },\n {\n \"key\": \"seed\",\n \"description\": \"a seed for intercell seeding\",\n \"default\": None,\n },\n {\n \"key\": \"generate\",\n \"description\": \"grading configurations to be passed to Otter Generate as an \"\\\n \"otter_config.json; if false, Otter Generate is disabled\",\n \"default\": False,\n },\n {\n \"key\": \"save_environment\",\n \"description\": \"whether to save the student's environment in the log\",\n \"default\": False,\n },\n {\n \"key\": \"variables\",\n \"description\": \"a mapping of variable names to type strings for serlizing environments\",\n \"default\": {},\n },\n {\n \"key\": \"ignore_modules\",\n \"description\": \"a list of modules to ignore variables from during environment serialization\",\n \"default\": [],\n },\n {\n \"key\": \"files\",\n \"description\": \"a list of other files to include in the output directories and autograder\",\n \"default\": [],\n },\n {\n \"key\": \"autograder_files\",\n \"description\": \"a list of other files only to include in the autograder\",\n \"default\": [],\n },\n {\n \"key\": \"plugins\",\n \"description\": \"a list of plugin names and configurations\",\n \"default\": [],\n },\n {\n \"key\": \"test_files\",\n \"description\": \"whether to store tests in separate .py files rather than in the notebook \" \\\n \"metadata\",\n \"default\": True,\n },\n {\n \"key\": \"colab\",\n \"description\": \"whether this assignment will be run on Google Colab\",\n \"default\": False,\n },\n]\n\nclass Assignment:\n \"\"\"\n A class that houses configurations for an assignment. Contains a dictionary of default arguments\n that can be updated in an instance using the ``update()`` method. Functions similarly to an \n ``AttrDict`` in that keys of the configuration can be accessed as ``assignment.``.\n\n To access a configuration value, use the dot syntax. For example, to access the ``generate`` key\n of an ``Assignment`` instance ``assignment``:\n \n .. code-block::python\n \n assignment.generate\n \n If ``generate`` is present in ``assignment.config``, then the value in that dictionary will be \n returned. If it is not, the value in ``Assignment.defaults`` will be returned instead. Configurations\n can also be updated using dot syntax:\n \n .. code-block:: python\n \n assignment.generate = True\n\n If a key not present in ``Assignment.defaults`` is attempted to be accessed or set, an \n ``AttributeError`` will be thrown.\n\n Attributes:\n config (``dict``): the configurations specific to this assignment; keys in this dictionary\n are used before the defaults if present.\n \"\"\"\n defaults = {\n \"master\": None,\n \"result\": None,\n \"seed_required\": False,\n \"_otter_config\": None,\n \"lang\": None,\n \"_temp_test_dir\": None, # path to a temp dir for tests for otter generate\n **convert_config_description_dict(_DEFAULT_ASSIGNMENT_CONFIGURATIONS_WITH_DESCRIPTIONS),\n }\n\n def __init__(self):\n self.config = type(self).defaults.copy()\n\n def __getattr__(self, attr):\n if attr in type(self).defaults:\n return self.config.get(attr, type(self).defaults[attr])\n raise AttributeError(f\"Assignment has no attribute {attr}\")\n\n def __setattr__(self, attr, value):\n if attr == \"config\":\n self.__dict__[attr] = value\n elif attr in type(self).defaults:\n self.config[attr] = value\n else:\n raise AttributeError(f\"Assignment has no attribute {attr}\")\n\n def update(self, config):\n \"\"\"\n Updates the configuration stored in this assignment using keys and values in the dictionary\n ``config``\n\n Args:\n config (``dict``): new configurations\n \"\"\"\n for k in config.keys():\n if k not in self.allowed_configs:\n raise ValueError(f\"Unexpected assignment config: '{k}'\")\n self.config.update(config)\n\n @property\n def is_r(self):\n \"\"\"\n Whether the language of the assignment is R\n \"\"\"\n return self.lang == \"r\"\n \n @property\n def is_python(self):\n \"\"\"\n Whether the language of the assignment is Python\n \"\"\"\n return self.lang == \"python\"\n\n @property\n def is_rmd(self):\n \"\"\"\n Whether the input file is an RMarkdown document\n \"\"\"\n return self.master.suffix.lower() == \".rmd\"\n \n @property\n def allowed_configs(self):\n \"\"\"\n The list of allowed configuration keys\n \"\"\"\n return type(self).defaults.keys()\n\ndef read_assignment_metadata(cell):\n \"\"\"\n Return assignment metadata from an assignment cell\n \n Args:\n cell (``nbformat.NotebookNode``): the assignment cell\n \n Returns:\n ``dict``: assignment metadata\n \"\"\"\n source = get_source(cell)\n begin_assignment_line = get_spec(source, \"assignment\")\n i, lines = begin_assignment_line + 1, []\n while source[i].strip() != BLOCK_QUOTE:\n lines.append(source[i])\n i = i + 1\n metadata = yaml.full_load('\\n'.join(lines))\n return metadata\n\ndef is_assignment_cell(cell):\n \"\"\"\n Returns whether cell contains BEGIN ASSIGNMENT in a block quote\n \n Args:\n cell (``nbformat.NotebookNode``): notebook cell\n \n Returns:\n ``bool``: whether the current cell is an assignment definition cell\n \"\"\"\n if cell.cell_type != 'markdown':\n return False\n return get_spec(get_source(cell), \"assignment\") is not None\n","sub_path":"otter/assign/v0/assignment.py","file_name":"assignment.py","file_ext":"py","file_size_in_byte":8346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"614454291","text":"# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: GNU General Public License v3. See license.txt\n\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe import _\nfrom frappe.model.document import Document\n\nclass Note(Document):\n\tdef autoname(self):\n\t\t# replace forbidden characters\n\t\timport re\n\t\tself.name = re.sub(\"[%'\\\"#*?`]\", \"\", self.title.strip())\n\n\tdef before_print(self):\n\t\tself.print_heading = self.name\n\t\tself.sub_heading = \"\"\n\ndef get_permission_query_conditions(user):\n\tif not user: user = frappe.session.user\n\n\tif user == \"Administrator\":\n\t\treturn \"\"\n\n\treturn \"`tabNote`.public=1\"\n\ndef has_permission(doc, ptype, user):\n\tif doc.public == 1 or user == \"Administrator\":\n\t\treturn True\n\n\tif user == doc.owner:\n\t\treturn True\n\n\tnote_user_map = dict((d.user, d) for d in doc.get(\"share_with\"))\n\tif user in note_user_map:\n\t\tif ptype == \"read\":\n\t\t\treturn True\n\t\telif note_user_map.get(user).permission == \"Edit\":\n\t\t\treturn True\n\n\treturn False\n","sub_path":"python/erpnext/2015/4/note.py","file_name":"note.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"623336197","text":"\nimport numpy as np #importing library\nl1 = float (input(\"Enter the length end-to-end(in m)->\")) #taking data from user\nl2 = float (input(\"Enter second length end-to-end(in m)->\"))\nlm = max (l1 , l2)\nln = min (l1 , l2)\nd = float (input(\"Enter the Depth of slab(in m)->\"))\nw = float (input(\"Enter the width of beam(in m)->\"))\nl = float (input(\"Enter the U.D.Live Load(in kN/m^2)->\"))\nprint(\"\\nEnter the conditions available from the given data \\n\\n 1.Interior Panel \\n 2.One short Edge continuous \\n 3.One Long Edge Discontinuous \\n 4.Two Adjacent edges Discontinuous \\n 5.Two short Edges Discontinuous \\n 6.Two long Edges Discontinuous \\n 7.Three edges Discontinuous(One long Edge Discontinuous) \\n 8.Three edges discontinuous(One short Edge Continuous) \\n 9.Four edges discontinuous \\n\")\nl3 = int(input(\"Enter the Condition->\"))\n\nif l3 == 1: #different conditions for effective length\n ly = lm - w\n lx = ln - w\nelif l3 == 2:\n ly = lm - w\n lx = ln - (0.5*w+w)\nelif l3 == 3:\n ly = lm - 1.5*w\n lx = ln-w\nelif l3 == 4:\n ly = lm - 1.5*w\n lx = ln - 1.5*w\nelif l3 == 5:\n ly = lm-w\n lx = ln-w\nelif l3 == 6:\n ly = lm-w\n lx = ln-w\nelif l3 == 7:\n ly = lm-1.5*w\n lx = ln-w\nelif l3 == 8:\n ly = lm-w\n lx = ln-1.5*w\nelif l3 == 9:\n ly = lm-w\n lx = ln-w\n\nprint(\"Effective length(ly)= \", ly) #printing the value of effective length\nprint(\"Effective length(lx)= \", lx)\n\nratio = ly/lx #calculating the ratio for alpha value or type of slab\nvolume = lm*ln*d #calculating volume\ndead_load = 2.5*volume #calculating dead load\ntotal_load = dead_load+l #total load\nw = 1.5*total_load\n\nprint(\"W is \",w) #Weight of the slab\n\nwith open(\"ffs.txt\") as f: #open file where the value of alpha is stored\n a = f.readlines()\n\nb = []\nfor line in a:\n array = [float(x) for x in line[:-1].split(\" \")] #add data to matrix\n b.append(array)\nb = np.asarray(b)\n\nr=len(b)\nc=len(b[0])\nans=-1\n\nfor i in range(0,c):\n if b[0][i] >= ratio:\n ans=i #getting the value of condition\n break;\n\nrn = 2*l3-1\nrp = 2*l3\n\nif ans==-1:\n print(\"One way slab i.e. ratio is greater than 2.0\")\nelse:\n if b[0][ans]==ratio:\n alphaxn = b[rn][ans]\n #print(alphaxn)\n alphaxp = b[rp][ans]\n else:\n alphaxn2=b[rn][ans]\n alphaxn1=b[rn][ans-1] #getting the value of alpha\n alphaxp2=b[rp][ans]\n alphaxp1=b[rp][ans-1]\n alphaxn=alphaxn1 + (alphaxn2-alphaxn1)*(ratio-b[0][ans-1])/(b[0][ans]-b[0][ans-1]) #interpolating the value of alpha\n alphaxp=alphaxp1 + (alphaxp2-alphaxp1)*(ratio-b[0][ans-1])/(b[0][ans]-b[0][ans-1])\n\n Mxn = round(alphaxn*w*lx*lx, 2) #calculating moment for alpha x\n Mxp = round(alphaxp*w*lx*lx, 2)\n\n print(\"\\nAlpha x(-) = \"+str(alphaxn))\n print(\"Alpha x(+) = \"+str(alphaxp))\n\n if Mxn == 0:\n print(\"Mx(-) condition doesnot exist\") #Special conditions of moment\n else:\n print(\"Mx(-) =\", Mxn)\n if Mxp == 0:\n print(\"Mx(+) condition doesnot exist\")\n else:\n print(\"Mx(+) =\", Mxp)\nansy=c-1\nalphayn = b[rn][ansy]\nalphayp = b[rp][ansy]\n\nMyn = round(alphayn*w*lx*lx, 2) #calculating moment for apha y\nMyp = round(alphayp*w*lx*lx, 2)\n\nif Myn == 0:\n print(\"My(-) condition doesnot exist\")\nelse:\n print(\"My(-) =\", Myn)\nif Myp == 0:\n print(\"My(+) condition doesnot exist\") #printing the value of moment\nelse:\n print(\"My(+) =\", Myp)\nif ratio <=2:\n print(\"It's a two way slab\")\nelse:\n print(\"One way slab\")\n\nprint(\"Ratio is =\",ratio)\n\nlx1 = 100*ln #defining the pixles of graph\nly1 = 100*lm\n\nfrom tkinter import Tk, Canvas, Frame, BOTH, W #importing library for graph\nclass Example(Frame):\n \n def __init__(self):\n super().__init__() \n \n self.initUI()\n \n \n def initUI(self):\n \n self.master.title(\"Colours\") \n self.pack(fill=BOTH, expand=1)\n\n canvas = Canvas(self)\n canvas.create_rectangle(20, lx1+20, ly1+20, 20, #drawing the slab\n outline=\"#fb0\", fill=\"#fb0\")\n canvas.create_text(0, 10, anchor=W, font=\"Purisa\",\n text=\"(0,\"+str(ln)+\")\")\n canvas.create_text(0, 30+lx1, anchor=W, font=\"Purisa\",\n text=\"(0,0)\")\n canvas.create_text(ly1, 10, anchor=W, font=\"Purisa\",\n text=\"(\"+str(lm)+\",\"+str(ln)+\")\")\n canvas.create_text(ly1, 30+lx1, anchor=W, font=\"Purisa\",\n text=\"(\"+str(lm)+\",0)\")\n canvas.create_line(ly1/2+10, lx1/2+20, ly1/2+30, lx1/2+20)\n canvas.create_line( ly1/2+20,lx1/2 + 10, ly1/2+20, lx1/2+30)\n canvas.create_text(ly1/2+30, lx1/2+20, anchor=W, font=\"Purisa\",\n text=\"My+ =(\"+str(Myp)+\")\")\n #canvas.create_text(ly1/2+70, lx1/2+20, anchor=W, font=\"Purisa\",\n #text=\"(\"+str(Mxp)+\")\")\n canvas.create_text(ly1+30, lx1/2+20, anchor=W, font=\"Purisa\", #printing the values of moment\n text=\"My- =(\"+str(Myn)+\")\")\n canvas.create_text(ly1/2+10, lx1/2, anchor=W, font=\"Purisa\",\n text=\"Mx+ =(\"+str(Mxp)+\")\")\n canvas.create_text(ly1/2, lx1+40, anchor=W, font=\"Purisa\",\n text=\"Mx- =(\"+str(Mxn)+\")\") \n canvas.pack(fill=BOTH, expand=1)\n\n\ndef main():\n \n root = Tk()\n ex = Example()\n root.geometry(\"400x100+300+300\")\n root.mainloop() \n\n\nif __name__ == '__main__':\n main()\n","sub_path":"slab.py","file_name":"slab.py","file_ext":"py","file_size_in_byte":6236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"211963788","text":"print('Введите стих, отделяя строки нажатием Enter, последней строкой введите слово Конец')\nwhile(1==1):\n s=str(input())\n if((s=='Конец') or(s=='конец')):\n break\n k=0\n for x in s:\n if(x in 'аеёиоуыэюя'):\n k+=1\n print(k)","sub_path":"count_letters.py","file_name":"count_letters.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"423386976","text":"\"\"\"\na est l'instant d'arriver de la requête \nd est le dernier instant que peut tolérer la requête \nm est l'identifiant du contenu (message) demandé par la requête \ne l'énergie nécessaire pour répondre à la requête \n\n\nUne partition contient un ou plusieurs envois\n\"\"\"\n\n\n\nimport numpy as np\n\n\ndef energy(reqs):\n \"\"\"Maximum d'energy d'un ensemble de requetes\n\n Args:\n reqs ([](a, d, m, e)): Un ensmble de requetes\n\n Returns:\n [float]: L'energie qui prends pour envoye cet ensemble\n \"\"\"\n return np.max(reqs[:,3])\n\n\n\n\ndef delais(reqs):\n \"\"\"Le delais pour servier d'un ensemble de requetes\n\n Args:\n reqs ([][(a, d, m, e)]): Un ensmble de requetes\n\n Returns:\n [float]: Une accumation de delais pour l'nnvoie de des requetes\n \"\"\"\n def exceed_by(req, t_envoie):\n ex = t_envoie - req[1]\n return ex if ex > 0 else 0\n # TransmisTransmission ne peut avoir que que si tous les\n # requêtes sont arrivées \n t_envoie = np.max(reqs[:,0])\n acc =0\n for r in reqs:\n acc = acc + exceed_by(r, t_envoie)\n return acc\n\n\ndef partition(collection):\n \"\"\"Une generateur de toutes les combinaisons d'un ensemble\n Source : https://stackoverflow.com/a/30134039 \n\n Args:\n collection ([]): [description]\n\n Yields:\n [type]: une combinaison\n \"\"\"\n if len(collection) == 1:\n yield [ collection ]\n return\n\n first = collection[0]\n for smaller in partition(collection[1:]):\n # insert `first` in each of the subpartition's subsets\n for n, subset in enumerate(smaller):\n yield smaller[:n] + [[ first ] + subset] + smaller[n+1:]\n # put `first` in its own subset \n yield [ [ first ] ] + smaller\n\n\n\ndef is_partition_ok(p):\n \"\"\"Verifier si un envoie reponds qu'a un seul contenu\n La condition qu'un seul meessage a la fois\n\n Args:\n p ([][](a, d, m, e))): une partition, un ensemble d'envoie\n\n Returns:\n [bool]: \n \"\"\"\n chck = []\n for envoie in p:\n un = np.array(envoie)\n # Un seul contenu par envoie\n chck += [len(np.unique(un[:,2]))==1]\n \n # Tous les envoies doivent etre cool\n return all(chck)\n\n\n\ndef ok_partition(entries):\n \"\"\"Generer et filtrer les partitions\n seulement les partitions qui respecte un seul envoie a la fois \n\n Args:\n entries ([](a, d, m, e))): L'ensemble des demande\n\n Yields:\n [type]: une bonne partition\n \"\"\"\n for p in partition(entries):\n if is_partition_ok(p):\n yield p\n return","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"647006517","text":"# -*- coding:utf-8 -*-\n\nimport pandas as pd\nimport datetime as dt\nimport numpy as np\nimport ctypes as ct\nimport time\nimport talib as ta\nimport argparse\nfrom utility import round_series, get_realtime_all_st, st_pattern, sendmail\nimport logging\nimport sys\n\nfrom shutil import copyfile\n\n\nashare_pattern = r'^0|^3|^6'\n\n\ndef prepare():\n t1 = time.clock()\n day = pd.read_hdf('d:/hdf5_data/dailydata.hdf', columns=['open', 'high', 'low', 'close', 'hfqratio', 'stflag'], where='date > \\'2007-1-1\\'')\n day = day[day.open > 0]\n day['openorg'] = day.open\n day['open'] = day.open * day.hfqratio\n day['high'] = day.high * day.hfqratio\n day['low'] = day.low * day.hfqratio\n day['close'] = day.close * day.hfqratio\n day['ocmax'] = day[['open', 'close']].max(axis=1).groupby(level=0, group_keys=False).rolling(window=67).max()\n day['ocmin'] = day[['open', 'close']].min(axis=1).groupby(level=0, group_keys=False).rolling(window=67).min()\n day['ocrate'] = day.ocmax / day.ocmin\n\n fd = pd.read_hdf('d:/hdf5_data/fundamental.hdf')\n day['eps'] = fd['每股收益_调整后(元)']\n day['kama'] = day.groupby(level=0).apply(\n lambda x: pd.Series(ta.KAMA(x.close.values, timeperiod=22), x.index.get_level_values(1)))\n day['kamapct'] = day.kama.groupby(level=0).pct_change()+1\n day['kamaind'] = day.kamapct.groupby(level=0, group_keys=False).rolling(window=2).max()\n\n a = day.groupby(level=0).last()\n a['date'] = dt.datetime.today().date()\n a = a.set_index([a.index, 'date'])\n a['open'] = 0\n day = pd.concat([day, a])\n\n pday = day.groupby(level=0, group_keys=False).rolling(window=2).apply(lambda x: x[0])\n day['phigh'] = pday.high\n day['popen'] = pday.open\n day['plow'] = pday.low\n day['pclose'] = pday.close\n day['pkamaind'] = pday.kamaind\n day['highlimit'] = round_series(pday.close / day.hfqratio * 1.09)\n day['lowlimit'] = round_series(pday.close / day.hfqratio * 0.906)\n\n day['ppocrate'] = day.ocrate.groupby(level=0, group_keys=False).rolling(window=3).apply(lambda x: x[0])\n day['ppocmax'] = day.ocmax.groupby(level=0, group_keys=False).rolling(window=3).apply(lambda x: x[0])\n\n\n day = day.reset_index()\n day = day.set_index(['date', 'code'], drop=False)\n day['date'] = day.date.apply(lambda x: np.int64(time.mktime(x.timetuple())))\n day['code'] = day.code.apply(lambda x: np.int64(x))\n day = day.rename(columns={'date': 'idate', 'code': 'icode'})\n day = day.groupby(level=0, group_keys=False).apply(lambda x: x.sort_values('ppocrate')).dropna()\n day.to_hdf('d:/hdf5_data/pttp.hdf', 'day', mode='w', format='t', complib='blosc')\n logging.info('all done...' + str(time.clock()-t1))\n\ndef initializeholding(type, prjname):\n BLSHdll = ct.cdll.LoadLibrary('D:/pttp.dll')\n\n BLSHdll.initialize.argtypes = [ct.c_void_p, ct.POINTER(ct.c_double), ct.POINTER(ct.c_double), ct.c_void_p,ct.POINTER(ct.c_double), ct.POINTER(ct.c_double), ct.c_void_p, ct.c_int, ct.c_double,ct.c_double, ct.c_int, ct.c_char_p]\n\n if type == 0:\n ll = 0\n cash = 300000\n total = 300000\n\n BLSHdll.initialize(ct.c_void_p(), ct.POINTER(ct.c_double)(), ct.POINTER(ct.c_double)(), ct.c_void_p(), ct.POINTER(ct.c_double)(), ct.POINTER(ct.c_double)(), ct.c_void_p(), ct.c_int(ll), ct.c_double(cash), ct.c_double(total), ct.c_int(type), ct.c_char_p(''.encode('ascii')))\n return\n\n initholding = pd.read_csv('d:/trade/%s/holding_pttp.csv' % (prjname), header=None, parse_dates=True, names=['date', 'code', 'buyprc','buyhfqratio', 'vol', 'daystosell', 'historyhigh', 'amount', 'cash', 'total'], dtype={'code': np.int64, 'buyprc': np.float64, 'buyhfqratio': np.float64, 'vol': np.int64, 'daystosell': np.int64, 'historyhigh': np.float64, 'amount': np.float64, 'cash': np.float64, 'total': np.float64}, index_col='date')\n\n if len(initholding) > 1:\n initholding = initholding.loc[initholding.index[-1]]\n\n ccode = initholding.code.get_values().ctypes.data_as(ct.c_void_p)\n cbuyprc = initholding.buyprc.get_values().ctypes.data_as(ct.POINTER(ct.c_double))\n cbuyhfqratio = initholding.buyhfqratio.get_values().ctypes.data_as(ct.POINTER(ct.c_double))\n cvol = initholding.vol.get_values().ctypes.data_as(ct.c_void_p)\n chistoryhigh = initholding.historyhigh.get_values().ctypes.data_as(ct.POINTER(ct.c_double))\n camount = initholding.amount.get_values().ctypes.data_as(ct.POINTER(ct.c_double))\n cdaystosell = initholding.daystosell.get_values().ctypes.data_as(ct.c_void_p)\n\n ll = len(initholding)\n cash = 200000\n total = 200000\n if ll > 0:\n cash = initholding.cash.get_values()[0]\n total = initholding.total.get_values()[0]\n\n BLSHdll.initialize(ccode, cbuyprc, cbuyhfqratio, cvol, chistoryhigh, camount, cdaystosell, int(ll), ct.c_double(cash), ct.c_double(total), int(type), ct.c_char_p(prjname.encode('ascii')))\n\ndef doProcessing(df, params):\n\n dll = ct.cdll.LoadLibrary('d:/pttp.dll')\n\n c_double_p = ct.POINTER(ct.c_double)\n\n # process\n dll.process.restype = ct.c_double\n dll.process.argtypes = [ct.c_void_p, ct.c_void_p, c_double_p, c_double_p, c_double_p, c_double_p, c_double_p, c_double_p, c_double_p, c_double_p, c_double_p, c_double_p, c_double_p, c_double_p, c_double_p, ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_int64, c_double_p]\n\n cdate = df.idate.get_values().ctypes.data_as(ct.c_void_p)\n ccode = df.icode.get_values().ctypes.data_as(ct.c_void_p)\n cp1 = df.eps.get_values().ctypes.data_as(c_double_p)\n cp2 = df.openorg.get_values().ctypes.data_as(c_double_p)\n cp3 = df.close.get_values().ctypes.data_as(c_double_p)\n cp4 = df.high.get_values().ctypes.data_as(c_double_p)\n cp5 = df.low.get_values().ctypes.data_as(c_double_p)\n cp6 = df.pkamaind.get_values().ctypes.data_as(c_double_p)\n cp7 = df.ppocrate.get_values().ctypes.data_as(c_double_p)\n cp8 = df.pclose.get_values().ctypes.data_as(c_double_p)\n cp9 = df.phigh.get_values().ctypes.data_as(c_double_p)\n cp10 = df.ppocmax.get_values().ctypes.data_as(c_double_p)\n cp11 = df.highlimit.get_values().ctypes.data_as(c_double_p)\n cp12 = df.lowlimit.get_values().ctypes.data_as(c_double_p)\n hfq = df.hfqratio.get_values().ctypes.data_as(c_double_p)\n cstflag = df.stflag.get_values().ctypes.data_as(ct.c_void_p)\n cactiveparam = params.get_values().ctypes.data_as(c_double_p)\n\n ret = dll.process(cdate, ccode, cp1, cp2, cp3, cp4, cp5, cp6, cp7, cp8, cp9, cp10, cp11, cp12, hfq, cstflag, cstflag, cstflag, len(df), cactiveparam)\n return ret\n\ndef regressionTest():\n logging.info('reading dayk tmp...' + str(dt.datetime.now()))\n df = pd.read_hdf('d:/HDF5_Data/pttp.hdf', where='date > \\'2008-1-1\\'')\n\n\n\n t1 = time.clock()\n '''\n g_maxfallback = activeparam[0]\n epsflag = activeparam[1]\n ocrateflag = activeparam[2]\n buyselladj = activeparam[3]\n g_DELAYNOSELL = int64_t(activeparam[4])\n '''\n params = pd.Series([0.92, 1.2, 1.19, 0.999, 12, 1199116800])\n\n for g_maxfallback in [0.92,]:\n params[0] = g_maxfallback\n for epsflag in [1.2,]:\n params[1] = epsflag\n for ocrateflag in [1.19, ]:\n params[2] = ocrateflag\n for buyselladj in [0]:\n params[3] = buyselladj\n for g_DELAYNOSELL in [12,]:\n params[4] = g_DELAYNOSELL\n for startdate in [1199116800]:\n params[5] = startdate\n\n initializeholding(0, '')\n ret = doProcessing(df, params)\n hfile = 'h_' + '_'.join(str(x) for x in params) + '.csv'\n tfile = 't_' + '_'.join(str(x) for x in params) + '.csv'\n logging.info(hfile + str(ret))\n copyfile('d:/tradelog/transaction_pttp_c.csv', 'd:/tradelog/pttp/' + tfile)\n copyfile('d:/tradelog/holding_pttp_c.csv', 'd:/tradelog/pttp/' + hfile)\n logging.info('doProcessing...'+str(time.clock()-t1))\n logging.info('finished...' + str(ret))\n\ndef morningTrade(prjname):\n logging.info('retrieving today all...'+ str(dt.datetime.now()))\n realtime = pd.DataFrame()\n retry = 0\n get = False\n while not get and retry < 15:\n try:\n retry += 1\n # today = get_today_all()\n realtime = get_realtime_all_st()\n realtime = realtime.set_index('code')\n if realtime.index.is_unique and len(realtime[realtime.open > 0]) > 500:\n get = True\n except Exception:\n logging.error('retrying...')\n time.sleep(1)\n\n if realtime.sort_values('date').date.iloc[-1].date() < dt.date.today():\n logging.info('today ' + str(dt.date.today()) + ' is holiday, no trading...')\n return\n\n logging.info('reading temp file...' + str(dt.datetime.now()))\n df = pd.read_hdf('d:/HDF5_Data/pttp.hdf', 'day', where='date = \\''+str(dt.date.today()) + '\\'')\n\n realtime = realtime[realtime.pre_close > 0]\n df = df.reset_index(0)\n df = df.reindex(realtime.index, fill_value=0)\n\n df.date = dt.date.today()\n df.idate = np.int64(time.mktime(dt.date.today().timetuple()))\n df.open = realtime.open\n df.low = 0.0\n df.high = 9999.0\n df.hfqratio = df.pclose / realtime.pre_close\n df.loc[realtime.name.str.contains(st_pattern), 'stflag'] = 1\n\n df = df[df.hfqratio > 1]\n df = df.sort_values('ppocrate')\n df.set_index([df.index, 'date']).to_hdf('d:/trade/%s/today.hdf'%prjname, 'day', format='t')\n\n logging.info('initializing holding...' + str(dt.datetime.now()))\n initializeholding(1, prjname)\n\n logging.info('doProcessing...' + str(dt.datetime.now()))\n params = pd.Series([0.92, 1.2, 1.19, 0.999, 12, 1199116800])\n doProcessing(df, params)\n\n logging.info('sending mail...' + str(dt.datetime.now()))\n transactions = pd.read_csv('d:/trade/%s/transaction_pttp.csv'%(prjname), header=None, parse_dates=True, names=['date', 'type', 'code', 'buyprc', 'sellprc', 'vol', 'amount', 'fee', 'cash'], index_col='date')\n\n try:\n transactions.type.replace({0:'buy', 9:'fallback', 8:'kama', 7:'st'}, inplace=True)\n transactions = transactions.loc[dt.date.today()]\n except KeyError:\n sendmail(\"no transaction today...\", prjname)\n else:\n sendmail(transactions.to_string(), prjname)\n logging.info('finished %s...' % prjname)\n\n\n\ndef getArgs():\n parse=argparse.ArgumentParser()\n parse.add_argument('-t', type=str)\n parse.add_argument('-n', type=str)\n\n args=parse.parse_args()\n return vars(args)\n\nif __name__==\"__main__\":\n args = getArgs()\n type = args['t']\n prjname = args['n']\n\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='d:/tradelog/pttp.log'\n )\n log = logging.getLogger()\n stdout_handler = logging.StreamHandler(sys.stdout)\n log.addHandler(stdout_handler)\n\n if (type == 'regression'):\n regressionTest()\n elif (type == 'prepare'):\n prepare()\n elif (type == 'trade'):\n morningTrade(prjname)\n\n\n","sub_path":"pttp.py","file_name":"pttp.py","file_ext":"py","file_size_in_byte":11303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"476789315","text":"from __future__ import absolute_import\n\nimport maya.cmds as cmds\n\nfrom rigging.library.utils import snapJoint as rlu_snapJoint\n\n\nclass MessageAttribute:\n def __init__(self, fkik_ctrl, ball=False):\n\n if not cmds.objExists('%s.%s' % (fkik_ctrl, 'middle_ref_jnt')):\n self.middle_ref_message_jnt = self.add_attr_transform(obj=fkik_ctrl, attr_name='middle_ref_jnt',\n attr_type='message')\n if not cmds.objExists('%s.%s' % (fkik_ctrl, 'lower_ref_jnt')):\n self.lower_ref_message_jnt = self.add_attr_transform(obj=fkik_ctrl, attr_name='lower_ref_jnt',\n attr_type='message')\n\n if not cmds.objExists('%s.%s' % (fkik_ctrl, 'upper_limb_jnt')):\n self.upper_limb_jnt = self.add_attr_transform(obj=fkik_ctrl, attr_name='upper_limb_jnt',\n attr_type='message')\n\n if not cmds.objExists('%s.%s' % (fkik_ctrl, 'middle_limb_jnt')):\n self.middle_limb_jnt = self.add_attr_transform(obj=fkik_ctrl, attr_name='middle_limb_jnt',\n attr_type='message')\n\n if not cmds.objExists('%s.%s' % (fkik_ctrl, 'lower_limb_jnt')):\n self.lower_limb_jnt = self.add_attr_transform(obj=fkik_ctrl, attr_name='lower_limb_jnt',\n attr_type='message')\n\n if not cmds.objExists('%s.%s' % (fkik_ctrl, 'upper_limb_fk_ctrl')):\n self.upper_limb_fk_ctrl = self.add_attr_transform(obj=fkik_ctrl, attr_name='upper_limb_fk_ctrl',\n attr_type='message')\n\n if not cmds.objExists('%s.%s' % (fkik_ctrl, 'middle_limb_fk_ctrl')):\n self.middle_limb_fk_ctrl = self.add_attr_transform(obj=fkik_ctrl, attr_name='middle_limb_fk_ctrl',\n attr_type='message')\n\n if not cmds.objExists('%s.%s' % (fkik_ctrl, 'lower_limb_fk_ctrl')):\n self.lower_limb_fk_ctrl = self.add_attr_transform(obj=fkik_ctrl, attr_name='lower_limb_fk_ctrl',\n attr_type='message')\n\n if not cmds.objExists('%s.%s' % (fkik_ctrl, 'upper_limb_ik_ctrl')):\n self.upper_limb_ik_ctrl = self.add_attr_transform(obj=fkik_ctrl, attr_name='upper_limb_ik_ctrl',\n attr_type='message')\n\n if not cmds.objExists('%s.%s' % (fkik_ctrl, 'poleVector_ctrl')):\n self.poleVector_ctrl = self.add_attr_transform(obj=fkik_ctrl, attr_name='poleVector_ctrl',\n attr_type='message')\n\n if not cmds.objExists('%s.%s' % (fkik_ctrl, 'lower_limb_ik_ctrl')):\n self.lower_limb_ik_ctrl = self.add_attr_transform(obj=fkik_ctrl, attr_name='lower_limb_ik_ctrl',\n attr_type='message')\n\n if not cmds.objExists('%s.%s' % (fkik_ctrl, 'fk_ik_arm_ctrl')):\n self.fk_ik_arm_ctrl = self.add_attr_transform(obj=fkik_ctrl, attr_name='fk_ik_arm_ctrl',\n attr_type='message')\n\n if not cmds.objExists('%s.%s' % (fkik_ctrl, 'fk_ik_leg_ctrl')):\n self.fk_ik_leg_ctrl = self.add_attr_transform(obj=fkik_ctrl, attr_name='fk_ik_leg_ctrl',\n attr_type='message')\n if ball:\n if not cmds.objExists('%s.%s' % (fkik_ctrl, 'end_limb_fk_jnt')):\n self.end_limb_fk_jnt = self.add_attr_transform(obj=fkik_ctrl, attr_name='end_limb_fk_jnt',\n attr_type='message')\n if not cmds.objExists('%s.%s' % (fkik_ctrl, 'end_limb_jnt')):\n self.end_limb_jnt = self.add_attr_transform(obj=fkik_ctrl, attr_name='end_limb_jnt',\n attr_type='message')\n\n if not cmds.objExists('%s.%s' % (fkik_ctrl, 'end_limb_fk_ctrl')):\n self.end_limb_fk_ctrl = self.add_attr_transform(obj=fkik_ctrl, attr_name='end_limb_fk_ctrl',\n attr_type='message')\n if not cmds.objExists('%s.%s' % (fkik_ctrl, 'toe_wiggle_attr')):\n self.toe_wiggle_attr = self.add_attr_transform(obj=fkik_ctrl, attr_name='toe_wiggle_attr',\n attr_type='message')\n\n def connect_message_to_attribute(self, object_target, fkik_ctrl, object_connector):\n cmds.connectAttr('%s.message' % (object_target), '%s.%s' % (fkik_ctrl, object_connector))\n\n # add attribute on transform\n def add_attr_transform(self, obj, attr_name, attr_type, edit=False, keyable=False, channel_box=False, **kwargs):\n if cmds.nodeType(obj) == \"transform\":\n cmds.addAttr(obj, ln=attr_name, at=attr_type, **kwargs)\n cmds.setAttr('%s.%s' % (obj, attr_name), e=edit, k=keyable, cb=channel_box)\n return attr_name\n else:\n cmds.error('object is not transform')\n\n def add_joint_reference(self, upper_limb_jnt, middle_limb_jnt, lower_limb_jnt, side_LFT, side_RGT, side):\n upper_ref_joint = rlu_snapJoint.joint(limb_jnt=upper_limb_jnt, side_LFT=side_LFT, side_RGT=side_RGT, side=side)\n self.middle_ref_joint = rlu_snapJoint.joint(limb_jnt=middle_limb_jnt, side_LFT=side_LFT, side_RGT=side_RGT,\n side=side)\n self.lower_ref_joint = rlu_snapJoint.joint(limb_jnt=lower_limb_jnt, side_LFT=side_LFT, side_RGT=side_RGT,\n side=side)\n\n # parent\n cmds.parent(self.lower_ref_joint, self.middle_ref_joint)\n cmds.parent(self.middle_ref_joint, upper_ref_joint)\n\n # lock attribute\n","sub_path":"rigging/library/utils/addAttrMessage.py","file_name":"addAttrMessage.py","file_ext":"py","file_size_in_byte":6078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"409570780","text":"# %%\nimport json\nfrom datetime import datetime\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nimport pickle\n\n# %%\ndef load_json(path):\n data = []\n with open(path) as json_file:\n data = json.load(json_file)\n return data\n\n\ndef nytimes_analysis(nytimes, analyzer):\n result = []\n count = 0\n\n for news in nytimes:\n try:\n pub_date = datetime.strptime(news['pub_date'], '%Y-%m-%dT%H:%M:%S%z')\n sentiment_headline = analyzer.polarity_scores(news['headline']['main'])\n sentiment_abstract = analyzer.polarity_scores(news['abstract'])\n\n data = dict(\n pub_date=pub_date,\n headline=news['headline']['main'],\n abstract=news['abstract'],\n source=\"nytimes\",\n sentiment_headline=sentiment_headline,\n sentiment_abstract=sentiment_abstract\n )\n result.append(data)\n count += 1\n if count % 1000 == 0:\n print(\"%s %s/%s\" % (\"nytimes\", count, len(nytimes)))\n except Exception as ex:\n print(\"%s-%s\" % (news, ex))\n\n return result\n\n\ndef dj_wsj_analysis(data_list, analyzer, type):\n result = []\n count = 0\n\n for news in data_list:\n try:\n pub_date = datetime.strptime(news['date'], '%d %B %Y')\n sentiment_headline = analyzer.polarity_scores(news['header'])\n sentiment_abstract = analyzer.polarity_scores(news['abstract'])\n\n data = dict(\n pub_date=pub_date,\n headline=news['header'],\n abstract=news['abstract'],\n source=type,\n sentiment_headline=sentiment_headline,\n sentiment_abstract=sentiment_abstract\n )\n result.append(data)\n count += 1\n if count % 1000 == 0:\n print(\"%s %s/%s\" % (type, count, len(data_list)))\n except Exception as ex:\n print(\"%s-%s\" % (news, ex))\n\n return result\n\n\ndef main():\n nytimes_path = \"./data/201001_201912_nytimes_tech_news.json\"\n dj_path = \"./data/201001_202007_DJ_tech_news.json\"\n wsj_path = \"./data/201001_202007_WSJ_Tech_News.json\"\n out_path = \"./data/out.dat\"\n\n analyzer = SentimentIntensityAnalyzer()\n nytimes = load_json(nytimes_path)\n dj = load_json(dj_path)\n wsj = load_json(wsj_path)\n\n result = []\n nytimes_s = nytimes_analysis(nytimes, analyzer)\n dj_s = dj_wsj_analysis(dj, analyzer, \"dj\")\n wsj_s = dj_wsj_analysis(wsj, analyzer, \"wsj\")\n\n result.extend(nytimes_s)\n result.extend(dj_s)\n result.extend(wsj_s)\n\n with open(out_path, 'wb') as outfile:\n pickle.dump(result, outfile)\n\n ## Get result\n # with open(out_path, \"rb\") as f:\n # result = pickle.load(f)\n# %%\n\nif __name__ == '__main__':\n main()\n","sub_path":"news_nlp.py","file_name":"news_nlp.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"592547997","text":"# %%\nimport pandas as pd\nimport numpy as np\ndata = pd.read_csv('/home/DS-DH/notebooks/digitalHouse/properatti.csv')\n\n# %%\nmatriz = pd.read_csv('/home/DS-DH/notebooks/digitalHouse/matriz.csv',sep='|')\n\n# %%\ndata_= pd.read_csv('/home/DS-DH/notebooks/digitalHouse/matriz.csv',sep='|')\n\n# %%\n\n\n# %%\ndef outliers(p_data):\n\n\n data_modificada = p_data\n\n # GENERO CULUMNA DE MEDIAS AGRUPANDO POR PCIA, BARRIO, TIPO DE PROPIEDAD\n data_modificada ['media_supTotal'] = data_modificada.groupby(['localidad','barrio','propiedad'])['superficie_total'].transform('mean')\n data_modificada ['media_supCubierta'] = data_modificada.groupby(['localidad','barrio','propiedad'])['superficie_cubierta_m2'].transform('mean')\n data_modificada ['media_PrecioM2'] = data_modificada.groupby(['localidad','barrio','propiedad'])['precio_m2'].transform('mean')\n data_modificada ['media_PrecioAproxUSD'] = data_modificada.groupby(['localidad','barrio','propiedad'])['precio_aprox_usd'].transform('mean')\n\n\n #GENERO COLUMNA DE STD AGRUPANDO POR PCIA, BARRIO, TIPO DE PROPIEDAD\n data_modificada ['std_supTotal'] = data_modificada.groupby(['localidad','barrio','propiedad'])['superficie_total'].transform('std')\n data_modificada ['std_supCubierta'] = data_modificada.groupby(['localidad','barrio','propiedad'])['superficie_cubierta_m2'].transform('std')\n data_modificada ['std_PrecioM2'] = data_modificada.groupby(['localidad','barrio','propiedad'])['precio_m2'].transform('std')\n data_modificada ['std_PrecioAproxUSD'] = data_modificada.groupby(['localidad','barrio','propiedad'])['precio_aprox_usd'].transform('std')\n\n\n #GENERO COLUMNA CON LA FORMULA DE CHEUVENET PARA EL CALCULO DE OUTLIERS\n data_modificada ['supTotal_criterio_cheuvenet'] = (abs(data_modificada.superficie_total-data_modificada.media_supTotal))/(data_modificada.std_supTotal)\n data_modificada ['supCubierta_criterio_cheuvenet'] = (abs(data_modificada.superficie_cubierta_m2-data_modificada.media_supCubierta))/(data_modificada.std_supCubierta)\n data_modificada ['priceM2_criterio_cheuvenet'] = (abs(data_modificada.precio_m2-data_modificada.media_PrecioM2))/(data_modificada.std_PrecioM2)\n data_modificada ['priceAprox_criterio_cheuvenet'] = (abs(data_modificada.precio_aprox_usd-data_modificada.media_PrecioAproxUSD))/(data_modificada.std_PrecioAproxUSD)\n\n \n \n # % OUTLIERS SUPERFICIES TOTALES\n data_modificada.loc[data_modificada.supTotal_criterio_cheuvenet>2].shape[0]/data_modificada.shape[0] \n data_modificada.superficie_total.loc[data_modificada.supTotal_criterio_cheuvenet>2] = np.nan\n \n \n # % OUTLIERS SUPERFICIES CUBIERTAS\n data_modificada.loc[data_modificada.supCubierta_criterio_cheuvenet>2].shape[0]/data_modificada.shape[0] \n data_modificada.superficie_cubierta_m2.loc[data_modificada.supCubierta_criterio_cheuvenet>2] = np.nan\n \n \n # % OUTLIERS DE PRECIOS APROX USD\n data_modificada.precio_aprox_usd.loc[data_modificada.priceAprox_criterio_cheuvenet>2].shape[0]/data_modificada.shape[0] \n data_modificada.precio_aprox_usd.loc[data_modificada.priceAprox_criterio_cheuvenet>2] = np.nan\n \n # % OUTLIERS DE PRECIOS POR M2\n data_modificada.loc[data_modificada.priceM2_criterio_cheuvenet>2].shape[0]/data_modificada.shape[0]\n data_modificada.precio_m2.loc[data_modificada.priceM2_criterio_cheuvenet>2] = np.nan\n \n \n \n return data_modificada \n\n# %%\ndata_ = outliers(matriz)\n\n# %%\n\n\n# %%\n#GENERO AMBIENTES DE MATRIZ\n#data_['ambientes']=data_.merge(matriz,how='left', left_index=True, right_index=True)['ambientes']\n\n#GENERO DUMMYS DE AMBIENTES\ndata_['1_AMBIENTE'] = (data_.ambientes>=1)&(data_.ambientes<2)\ndata_['2_AMBIENTE'] = (data_.ambientes>=2)&(data_.ambientes<3)\ndata_['3_AMBIENTE'] = (data_.ambientes>=3)&(data_.ambientes<4)\ndata_['4_AMBIENTE'] = (data_.ambientes>=4)&(data_.ambientes<5)\ndata_['5_AMBIENTE'] = (data_.ambientes>=5)&(data_.ambientes<6)\ndata_['6_AMBIENTE'] = (data_.ambientes>=6)&(data_.ambientes<7)\ndata_['7_AMBIENTE'] = (data_.ambientes>=7)&(data_.ambientes<8)\n\n\ndata_[['1_AMBIENTE','2_AMBIENTE','3_AMBIENTE','4_AMBIENTE', '5_AMBIENTE','6_AMBIENTE','7_AMBIENTE']] = data_[['1_AMBIENTE','2_AMBIENTE','3_AMBIENTE','4_AMBIENTE', '5_AMBIENTE','6_AMBIENTE','7_AMBIENTE']].applymap(lambda x : 1 if (x) else 0)\n\n\n#GENERO DUMMYS TIPO DE PROPIEDAD \ndata_['CASA'] = data_.propiedad.str.contains('house')\ndata_['PH'] = data_.propiedad.str.contains('PH')\ndata_['DTO'] = data_.propiedad.str.contains('apartment')\ndata_[['CASA','PH','DTO']] = data_[['CASA','PH','DTO']].applymap(lambda x : 1 if x else 0)\n\n\n#ELIMINO REGISTROS NULOS DE VARIABLES A UTILIZAR EN EL MODELO\ndata_=data_[data_.precio_m2.notnull()]\ndata_=data_[data_.superficie_total.notnull()]\ndata_=data_[data_.ambientes.notnull()]\n\n\n#GENERO DUMMYS DE BARRIOS\n\n#QUITO NULOS DE LA COLUMNA STATE_NAME\ndata_ = data_[data_.barrio.notnull()]\n\n\n#CREO LISTA DE BARRIOS \nbarrios = data_[data_.localidad.str.contains('capital')].barrio.unique()\n\n\n#GENERO DUMMYS\nfor barrio in barrios:\n indices_barrios = (data_.index[data_.barrio.str.contains(barrio)])\n barrio = barrio.lower().replace(' ','_')\n df = data_\n df.barrio = df.barrio.apply(lambda x : x.lower().replace(' ','_'))\n df[barrio] = df.barrio.str.contains(barrio)\n\n\n\nnumero_barrios = len(data_.barrio[data_.localidad.str.contains('capital')].unique())\nindices_dummys_barrios = data_.shape[1]-numero_barrios\n\n#CREO EL DATAFRAME CON LAS DUMMYS DE BARRIOS\ndummys_barrios = data_.iloc[:,indices_dummys_barrios:]\n\n\ndummys_barrios = dummys_barrios.applymap(lambda x : 1 if (x) else 0)\n\n\n#GENERO DUMMYS DE BARRIOS EN EL DATAFRAME\ndata_.iloc[:,indices_dummys_barrios:] = dummys_barrios\n\n\n# %%\n\n\n# %%\n\n\n# %%\n\n\n# %%\n\n\n# %%\n#SKLEARN\n\n#GENERO VARIABLES INDEPENDIENTES\nx_feactures = data_.iloc[:,31:]\ndf1 = data_['superficie_total']\nxs = pd.concat([df1,x_feactures],axis=1)\n\n# %%\n\n#GENERO VARIABLE DEPENDIENTE\ny = data_.precio_m2\n\n# %%\n#TRANSFORMO VARIABLES INDEPENDIENTES EN FORMATO MATRIZ\nxs = xs.as_matrix()\n\n\n#TRANSFORMO VARIABLE DEPENDIENTE EN FORMATO MATRIZ\ny = y.as_matrix()\n\n\n# %%\n#IMPORTAR LIBRERIAS DE SKLEARN\nfrom sklearn import linear_model\nfrom sklearn.model_selection import train_test_split\n\n# %%\n#PARTICIONAR DATOS DE ENTRENAMIENTO Y TESTING\nx_train, x_test, y_train, y_test = train_test_split(xs, y, test_size=0.2)\n\n# %%\n#FIT \nmodelo = linear_model.LinearRegression()\nmodelo.fit(x_train,y_train)\n\n# %%\n#PREDECIR DATOS \"Y\" DE \"X\" TEST \ny_predict = modelo.predict(x_test)\n\n\n# %%\n#PENDIENTES\npendientes = modelo.coef_\n\n# %%\n#ORDENADA \nordenada = modelo.intercept_\n\n# %%\n#R2\n'EL RESULTADO DEL MODELO ES DE {}'.format(modelo.score(x_train,y_train))\n\n# %%\n\n\n# %%\n\n\n# %%\n\n\n# %%\nimport matplotlib.pyplot as plt\n\n#GENERO EJE X -> SUPERFICIE TOTAL\nx1 = x_test[:,0]\n\n#GENERO EJE Y -> PRECIO M2 DE TEST\nx2 = y_test\n\n# EJE Y -> PRECIO M2 PREDICHO\nx3 = y_predict\n\n\n#PLOT\nplt.scatter(x1,x2,label='test modelo', color='blue')\nplt.scatter(x1,x3,label='prediccion modelo', color='red')\nplt.title('grafico modelo')\nplt.show()\n\n# %%\n\n\n# %%\n\n\n# %%\n\n\n# %%\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# %%\n\n\n# %%\n","sub_path":"py/leer_properatty.py","file_name":"leer_properatty.py","file_ext":"py","file_size_in_byte":7069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"178818822","text":"from CookieTTS.utils.text.symbols import symbols\n\ndef create_hparams(hparams_string=None, verbose=False):\n \"\"\"Create model hyperparameters. Parse nondefault from given string.\"\"\"\n\n from CookieTTS.utils.utils_hparam import HParams\n hparams = HParams(\n random_segments=True,# DONT MODIFY\n \n #################################\n ## Experiment Parameters ##\n #################################\n epochs = 1000,\n \n n_models_to_keep=4,# NOT IMPLEMENTED # if over this number, will delete oldest checkpoint(s). Will never delete \"best\" checkpoints (as shown below).\n save_best_val_model = True,# save best MFSE as a seperate checkpoint.\n # This is basically best audio quality model, it does not represent most accurate speaker\n \n dynamic_loss_scaling=True,\n fp16_run = False,# requires 20 Series or Better (e.g: RTX 2080 Ti, RTX 2060, Tesla V100, Tesla A100)\n fp16_run_optlvl = '2',\n \n distributed_run = False,\n dist_backend = \"nccl\",\n dist_url = \"tcp://127.0.0.1:54321\",\n \n cudnn_enabled = True,\n cudnn_benchmark = False,\n seed = 1234,\n \n #################################\n ## Freezing/Reseting Modules ##\n #################################\n print_layer_names_during_startup = False,# will print every modules key to be used below.\n ignore_layers = [\"layers_here\"],# for `--warm_start`-ing\n frozen_modules = [\"layers_here\"],# only the module names are required e.g: \"encoder.\" will freeze all parameters INSIDE the encoder recursively\n unfrozen_modules = [\"layers_here\"],# modules that are unfrozen\n \n #################################\n ## Logging / Verbosity ##\n #################################\n n_tensorboard_outputs=8,# number of items from validation so show in Tensorboard\n \n #################################\n ## Batch Size / Segment Length ##\n #################################\n batch_size =16,# controls num of files processed in parallel per GPU\n val_batch_size=16,# for more precise comparisons between models, constant val_batch_size is useful\n \n use_TBPTT =False,# continue processing longer files into the next training iteration\n max_segment_length=1024,# max mel length till a segment is sliced.\n \n num_workers =8,# (train) Number of threads for dataloading per GPU\n val_num_workers=8,# (eval) Number of threads for dataloading per GPU\n prefetch_factor=8,# NOT IMPLEMENTED # Number of samples loaded in advance by each worker.\n \n ###################################\n ## Dataset / Filelist Parameters ##\n ###################################\n data_source = 1,# 0 to use nvidia/tacotron2 filelists, 1 to use automatic dataset processor\n \n # if data_source is 0:\n speakerlist ='/media/cookie/Samsung 860 QVO/ClipperDatasetV2/filelists/speaker_ids.txt',\n training_files ='/media/cookie/Samsung 860 QVO/ClipperDatasetV2/filelists/train_taca2.txt',\n validation_files='/media/cookie/Samsung 860 QVO/ClipperDatasetV2/filelists/validation_taca2.txt',\n \n # if data_source is 1:\n dataset_folder = '/media/cookie/WD6TB/TTS/HiFiDatasets',\n dataset_audio_filters= ['*.wav','*.flac',],\n dataset_audio_rejects= ['*_Noisy_*','*_Very Noisy_*',],\n dataset_p_val = 0.005,# portion of dataset for Validation # default of 0.5% may be too small depending on the size of your dataset.\n dataset_min_duration = 1.5,# minimum duration in seconds for audio files to be added.\n dataset_max_duration = 30.0,# maximum duration in seconds for audio files being added.\n # use max_segment_length to control how much of each audio file can be used to fill VRAM during training.\n dataset_min_chars = 16,# min number of letters/text that a transcript should have to be added to the audiofiles list.\n dataset_max_chars = 256,# min number of letters/text that a transcript should have to be added to the audiofiles list.\n # use max_chars_length to control how much of text from each audio file can be used to fill VRAM during training.\n \n n_speakers = 2048,\n \n force_load = True,# if a file fails to load, replace it with a random other file.\n ##################################\n ## Text / Speaker Parameters ##\n ##################################\n use_saved_speakers = False,# use the speaker lookups saved inside the model instead of generating again\n numeric_speaker_ids = True,# sort speaker_ids in filelist numerically, rather than alphabetically.\n # e.g:\n # [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] -> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n # instead of,\n # [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] -> [0, 1, 10, 2, 3, 4, 5, 6, 7, 8, 9]\n # Mellotron repo has this off by default, but ON makes the most logical sense to me.\n \n check_files= True,# check all files exist, aren't corrupted, have text, good length, and other stuff before training.\n # This can take a while as it has to load the entire dataset once.\n ##################################\n ## Audio Parameters ##\n ##################################\n sampling_rate= 44100,\n target_lufs = -27.0,# Loudness each file is rescaled to, use None for original file loudness.\n \n trim_enable = True,# set to False to disable trimming completely\n trim_cache_audio = False,# save trimmed audio to disk to load later. Saves CPU usage, uses more disk space.\n # modifications to params below do not apply to already cached files.\n trim_margin_left = [0.0125]*3,\n trim_margin_right = [0.0125]*3,\n trim_ref = ['amax']*3,\n trim_top_db = [ 48, 46, 46],\n trim_window_length= [16384, 4096, 2048],\n trim_hop_length = [ 2048, 1024, 512],\n trim_emphasis_str = [ 0.0, 0.0, 0.0],\n \n ##################################\n ## Spectrogram Parameters ##\n ##################################\n filter_length = 2048,\n hop_length = 512,\n win_length = 2048,\n n_mel_channels = 80,\n mel_fmin = 20.0,\n mel_fmax = 11025.0,\n stft_clamp_val = 1e-5,# 1e-5 = original\n \n cache_mel=False,# save spectrograms to disk to load later. Saves CPU usage, uses more disk space.\n # modifications to params below do not apply to already cached files.\n \n silence_value = -11.5129,# = ln(1e-5)\n silence_pad_start = 0,# frames to pad the start of each spectrogram\n silence_pad_end = 0,# frames to pad the end of each spectrogram\n # These frames will be added to the loss functions and Tacotron must predict and generate the padded silence.\n \n ######################################\n ## Synthesis / Inference Parameters ##\n ######################################\n gate_threshold = 0.5, # to be removed\n gate_delay = 10, # to be removed\n max_decoder_steps = 3000, # to be removed\n n_symbols=len(symbols), # to be removed\n symbols_embedding_dim=512, # to be removed\n gate_positive_weight =10, # to be removed\n p_teacher_forcing = 1.00,# to be removed\n teacher_force_till = 20, # to be removed\n val_p_teacher_forcing = 0.80,# to be removed\n val_teacher_force_till = 20, # to be removed\n \n ##################################\n ## Model Parameters ##\n ##################################\n \n # (Misc)\n use_causal_convs=False,# this will remove the ability for any conv layers to use information from the future\n # which will roughly half the inference latency of this network, but may negatively affect audio quality\n \n # (Encoder) Encoder parameters\n speaker_encoder_dim = 256,# <- don't change this\n \n enc_conv_dim = 512,\n n_enc_layers = 3,\n \n bottleneck_dim = 32,# the quantity of information the encoder is able to give.\n # Must be very restrictive to force the network to seperate the speaker and content internally.\n # Too low will cause the model outputs to be very blurred and unintelligible\n # Too high will allow the model to ignore the speaker encoder and the model will not attempt to change it's speaker during inference.\n freq = 16,# number of frames between encoder samples\n # larger values mean the model gets more freedom to flow on it's own, but decreases the audio quality and intelligibility of the outputs\n # smaller values force the network to directly clone the input speech, and likely outputs will not resemble the target speaker as well.\n \n # (Decoder) Decoder parameters\n decoder_conv_dim = 512,\n decoder_n_conv_layers = 3,\n \n decoder_lstm_dim = 1024,\n decoder_n_lstm_layers = 2,\n \n # (Postnet) Mel-post processing network parameters\n postnet_embedding_dim = 512,\n \n ##################################\n ## Optimization Hyperparameters ##\n ##################################\n use_saved_learning_rate=False,\n learning_rate = 0.1e-5,# overriden by 'run_every_epoch.py'\n grad_clip_thresh=1.0, # overriden by 'run_every_epoch.py'\n weight_decay = 1e-6,\n )\n\n if hparams_string:\n print('Parsing command line hparams: %s', hparams_string)\n hparams.parse(hparams_string)\n\n if verbose:\n print('Final parsed hparams: %s', hparams.values())\n\n return hparams\n","sub_path":"CookieTTS/_2_ttm/auto_vc/hparams.py","file_name":"hparams.py","file_ext":"py","file_size_in_byte":10431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"214077256","text":"import requests\nimport re\nimport os\nimport html\nimport urllib.request\nfrom urllib.parse import urlparse\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\n\n\nfrom .models import Laptop, VGAInfo, CPUInfo\nfrom logs.my_log import AnphatLogger\nfrom anphat_pc.settings import BASE_DIR, MEDIA_ROOT, DOWNLOAD_ROOT\n\nutils_log = AnphatLogger('AP_UTILS')\nerror_urls_path = os.path.join(BASE_DIR, 'products', 'error_url.txt')\nif not os.path.exists(error_urls_path):\n f = open(error_urls_path, 'w+')\n f.close()\nerror_urls_file = open(error_urls_path, 'a')\n\n\ndef test():\n # content = '08/10/2017 = 747 days old'\n # soup = BeautifulSoup(content, 'lxml')\n # a = re.findall(r'\\d\\d/\\d\\d/\\d\\d\\d\\d', soup.text)[0]\n # release_date = datetime.strptime(a, '%m/%d/%Y')\n # print(release_date)\n\n # get_gpu_url_list()\n # crawl_cpu_spec()\n crawl_anphat_laptop('https://www.anphatpc.com.vn/laptop-dell-xps15-9570-70158746_id26442.html')\n\n\ndef count_laptops():\n return Laptop.objects.count()\n\n\ndef get_gpu_url_list():\n url_list = []\n base_url = 'https://www.notebookcheck.net/NVIDIA-GeForce-RTX-2080-Ti-Desktop-Graphics-Card.386296.0.html'\n r = requests.get(url=base_url, verify=False)\n soup = BeautifulSoup(r.text, 'lxml')\n classes = soup.find(id=\"c5498226\").div.div.find_all('span')\n for cls in classes:\n divs = cls.find_all('div')\n for div in divs:\n try:\n url_list.append(div.a['href'])\n except TypeError as e:\n utils_log.info(e)\n continue\n error_url_list = []\n for url in url_list:\n try:\n crawl_gpu_spec(url)\n except Exception as e:\n utils_log.error(e)\n error_url_list.append(url)\n error_urls_file.write(url + '\\n')\n continue\n utils_log.info('-----------------LIST OF ERROR URL-----------------')\n if error_url_list:\n for url in error_url_list:\n utils_log.info(url)\n\n\ndef crawl_gpu_spec(url):\n r = requests.get(url=url, verify=False)\n soup = BeautifulSoup(r.text, 'lxml')\n\n spec_divs = soup.find(id=\"content\").find_all('div')\n count = 0\n for div in spec_divs:\n try:\n name = div.find('div', {'class': 'tx-nbc2fe-pi1'}).h1.string\n vga = VGAInfo.objects.filter(name=name)\n if not vga:\n gpu_specs = div.find('div', {'class': 'tx-nbc2fe-pi1'}). \\\n find('table', {'class': 'gputable'}).find_all('tr')\n manufacturer, architecture, pipelines, core_speed, memory_speed, memory_bus_width, memory_type, \\\n max_memory_size, shared_memory, direct_x, technology, features, release_date,\\\n link_to_manufacture = '', '', '', '', '', '', '', '', '', '', '', '', '', ''\n for tr in gpu_specs:\n tds = tr.find_all('td')\n if tds[0].string == 'Manufacturer':\n manufacturer = tds[1].string\n elif tds[0].string == 'Architecture':\n architecture = tds[1].string\n elif tds[0].string == 'Pipelines':\n pipelines = tds[1].string\n elif tds[0].string == 'Core Speed':\n core_speed = tds[1].string\n elif tds[0].string == 'Memory Speed':\n memory_speed = tds[1].string\n elif tds[0].string == 'Memory Bus Width':\n memory_bus_width = tds[1].string\n elif tds[0].string == 'Memory Type':\n memory_type = tds[1].string\n elif tds[0].string == 'Max. Amount of Memory':\n max_memory_size = tds[1].string\n elif tds[0].string == 'Shared Memory':\n shared_memory = tds[1].string\n elif tds[0].string == 'DirectX':\n direct_x = tds[1].string\n elif tds[0].string == 'technology':\n technology = tds[1].string\n elif tds[0].string == 'Features':\n features = tds[1].string\n elif tds[0].string == 'Date of Announcement':\n a = re.findall(r'\\d\\d.\\d\\d.\\d\\d\\d\\d', str(tds[1]))[0]\n release_date = datetime.strptime(a.split(' ')[0], '%d.%m.%Y')\n elif tds[0].string == 'Link to Manufacturer Page':\n link_to_manufacture = tds[1].string\n vga = VGAInfo(\n name=name,\n use_type=get_gpu_use_for(lower_name=name.lower()),\n manufacturer=manufacturer,\n architecture=architecture,\n pipelines=pipelines,\n core_speed=core_speed,\n memory_speed=memory_speed,\n memory_bus_width=memory_bus_width,\n memory_type=memory_type,\n max_memory_size=max_memory_size,\n shared_memory=shared_memory,\n direct_x=direct_x,\n technology=technology,\n features=features,\n release_date=release_date,\n link_to_manufacture=link_to_manufacture,\n )\n vga.save()\n break\n except AttributeError as ae:\n count += 1\n utils_log.error(ae)\n continue\n if count == len(spec_divs):\n error_urls_file.write(url + '\\n')\n\n\ndef get_gpu_use_for(lower_name):\n for t in ['laptop', 'desktop']:\n if t in lower_name:\n return t\n return 'unknown'\n\n\n# ----------------CRAWL CPU SPECIFICATIONS-------------------\n\ndef crawl_cpu_spec():\n base_url = 'https://www.notebookcheck.net/Mobile-Processors-Benchmark-List.2436.0.html'\n\n soup = BeautifulSoup(requests.get(base_url, verify=False).text, 'lxml')\n cpu_tags = soup.find(id=\"sortierbare_tabelle\").find_all('tr', {'class': ['odd', 'even', 'desk_odd', 'desk_even',\n 'smartphone_odd', 'smartphone_even']})\n html_file = open(os.path.join(BASE_DIR, 'products', 'cpu_table.html'), 'w+', encoding='utf-8')\n url_list_file = open(os.path.join(BASE_DIR, 'products', 'url_list.txt'), 'w+', encoding='utf-8')\n i = 0\n count = 0\n # get_spec = False\n\n for tag in cpu_tags:\n i += 1\n count += 1\n index_tag = tag.find(lambda t: t.name == 'td' and t['class'] == ['specs', 'poslabel'])\n index = int(''.join(c for c in index_tag.label.get_text() if c.isdigit()))\n try:\n html_file.write('
{0} ({1}).
\\n'.format(count, index))\n name_tag = index_tag.findNext('td')\n url_tag = name_tag.a\n if url_tag:\n html_file.write(str(url_tag) + '
\\n')\n url_list_file.write(url_tag['href'].strip() + '\\n')\n # if url_tag['href'].strip() \\\n # == 'https://www.notebookcheck.net/Apple-A8-SoC.127992.0.html':\n # get_spec = True\n # if get_spec:\n get_cpu_spec_from_url(url_tag['href'].strip())\n else:\n html_file.write(name_tag.get_text() + '
\\n')\n pass\n if i < index:\n print(list(k for k in range(i, index)))\n i = index\n except UnicodeEncodeError or AttributeError as error:\n print(error)\n break\n\n # html_file.close()\n\n\ndef get_cpu_spec_from_url(url):\n print(url)\n soup = BeautifulSoup(requests.get(url, verify=False).text, 'lxml')\n content_div = soup.find(id=\"content\").find_all('div')\n count = 0\n for div in content_div:\n info_div = div.find('div', {'class': 'tx-nbc2fe-pi1'})\n if info_div:\n if info_div.h1:\n name = info_div.h1.get_text().strip()\n cpu = CPUInfo.objects.filter(name=name)\n if not cpu:\n manufacture = name.split(' ')[0]\n reference_link = url\n\n series, code_name, clock_rate, l1_cache, l2_cache, l3_cache, power_consumption, transistor_count, \\\n die_size, technology, max_temp, socket, features, gpu, sixty_four_bit = \\\n '', '', '', '', '', '', '', '', '', '', '', '', '', '', ''\n core, threads, announce_date = None, None, None\n\n spec_div = info_div.find('table', {'class': 'gputable'})\n for tr in spec_div.find_all('tr'):\n tds = tr.find_all('td')\n if tds[0].text.strip().lower() == 'Series'.lower():\n series = tds[1].text.strip()\n elif tds[0].text.strip().lower() == 'Codename'.lower():\n code_name = tds[1].text.strip()\n elif tds[0].text.strip().lower() == 'Clock Rate'.lower():\n clock_rate = tds[1].text.strip()\n elif tds[0].text.strip().lower() == 'Level 1 Cache'.lower():\n l1_cache = tds[1].text.strip()\n elif tds[0].text.strip().lower() == 'Level 2 Cache'.lower():\n l2_cache = tds[1].text.strip()\n elif tds[0].text.strip().lower() == 'Level 3 Cache'.lower():\n l3_cache = tds[1].text.strip()\n elif 'Power Consumption'.lower() in tds[0].text.strip().lower():\n power_consumption = tds[1].text.strip()\n elif tds[0].text.strip().lower() == 'Transistor Count'.lower():\n transistor_count = tds[1].text.strip()\n elif tds[0].text.strip().lower() == 'Die Size'.lower():\n die_size = tds[1].text.strip()\n elif tds[0].text.strip().lower() == 'Manufacturing Technology'.lower():\n technology = tds[1].text.strip()\n elif tds[0].text.strip().lower() == 'Max. Temperature'.lower():\n max_temp = tds[1].text.strip()\n elif tds[0].text.strip().lower() == 'Socket'.lower():\n socket = tds[1].text.strip()\n elif tds[0].text.strip().lower() == 'Features'.lower():\n features = tds[1].text.strip()\n elif tds[0].text.strip().lower() == 'GPU'.lower():\n gpu = tds[1].text.strip()\n elif tds[0].text.strip().lower() == '64 Bit'.lower():\n sixty_four_bit = tds[1].text.strip()\n elif tds[0].text.strip().lower() == 'Number of Cores / Threads'.lower():\n core_threads = tds[1].text.strip().split('/')\n core = int(core_threads[0])\n if len(core_threads) > 1:\n threads = int(core_threads[1])\n elif tds[0].text.strip().lower() == 'Announcement Date'.lower():\n a = re.findall(r'\\d\\d/\\d\\d/\\d\\d\\d\\d', tds[1].text)[0]\n announce_date = datetime.strptime(a, '%m/%d/%Y')\n\n print(name)\n cpu = CPUInfo(\n name=name,\n manufacture=manufacture,\n series=series,\n code_name=code_name,\n clock_rate=clock_rate,\n l1_cache=l1_cache,\n l2_cache=l2_cache,\n l3_cache=l3_cache,\n core=core,\n threads=threads,\n power_consumption=power_consumption,\n transistor_count=transistor_count,\n die_size=die_size,\n technology=technology,\n max_temp=max_temp,\n socket=socket,\n features=features,\n gpu=gpu,\n sixty_four_bit=sixty_four_bit,\n announce_date=announce_date,\n reference_link=reference_link\n )\n cpu.save()\n else:\n count += 1\n continue\n else:\n count += 1\n continue\n if count == len(content_div):\n error_urls_file.write('CASE 2: {0}\\n'.format(url))\n\n\n# -----------------------CRAWL ANPHAT LAPTOP------------------------\ndef crawl_anphat_laptop(url):\n root_url = 'https://anphatpc.com.vn'\n soup = BeautifulSoup(requests.get(url, verify=False).text, 'lxml')\n\n # get product image\n image_rel_url = soup.find(id=\"Zoomer\")['href'].strip()\n if image_rel_url:\n image_url = root_url + image_rel_url\n print(image_url)\n img_file_name = get_img_filename_from_url(image_url)\n if img_file_name:\n print(img_file_name)\n # urllib.request.urlretrieve(image_url, os.path.join(MEDIA_ROOT, img_file_name))\n\n # get product name\n product_name = soup.find('h1', {'class': 'txt_b'}).get_text().strip()\n if product_name:\n print(product_name)\n if product_name.lower().startswith('laptop '):\n product_name = product_name[product_name.index(' ') + 1:]\n print(product_name)\n brand = product_name[:product_name.index(' ')]\n print(brand)\n\n # get product information\n cpu, vga = None, None\n ram, hard_disk, screen, operation_system, pin, weight = '', '', '', '', '', ''\n product_info = soup.find(id=\"detail_summary\")\n if product_info:\n spans = product_info.find_all('span', {'class': 'item'})\n if spans:\n for span in spans:\n text = span.text\n if ':' in text:\n field = text.split(':')[0]\n content = text.split(':')[1]\n if 'cpu' in field.lower():\n pass\n elif 'vga' in field.lower():\n pass\n elif 'ram' in field.lower():\n ram = content.strip()\n elif 'hdd' in field.lower():\n hard_disk = content.strip()\n elif 'màn hình' in field.lower():\n screen = content.strip()\n elif 'os' in field.lower():\n operation_system = content.strip()\n elif 'pin' in field.lower():\n pin = content.strip()\n elif 'cân nặng' in field.lower():\n weight = content.strip()\n print(ram, hard_disk, screen, operation_system, pin, weight)\n\n\n\n\n\ndef get_img_filename_from_url(url):\n images = re.findall(r'([-\\w]+\\.(?:jpg|jpeg|gif|png))', url)\n if images:\n return images[0]\n else:\n return None\n\n\ndef down_load_product_image(url, product_id, image_name):\n urllib.request.urlretrieve(url, os.path.join(MEDIA_ROOT, product_id + '_' + image_name))\n","sub_path":"products/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":15527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"261341178","text":"\"\"\"myevent URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', 'events.views.home', name='home'),\n url(r'^events/$', 'events.views.list', name='event_list'),\n url(r'^events/(?P\\d+)/$', 'events.views.detail', name='event_detail'),\n\n url(r'^register/$', 'events.views.register', name='register'),\n url(r'^login/$', auth_view.login, name='login', kwargs={'template_name': 'users/login.html'}),\n url(r'^logout/$', auth_view.logout, name='logout', kwargs={'next_page': '/'}),\n\n url(r'^events/join/(?P\\d+)/$', 'events.views.join', name='event_join'),\n url(r'^events/cancel/(?P\\d+)/$', 'events.views.cancel', name='event_cancel'),\n url(r'^user_event/(?P\\d+)/$', 'events.views.user_event', name='user_event')\n]\n","sub_path":"myevent/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"372389856","text":"class DoublyLinkedListNode:\n\n def __init__(self, next=None, prev=None, data=None):\n self.next = next\n self.prev = prev\n self.data = data\n\n\ndef sortedInsert(head, data):\n courrnt = head\n node = DoublyLinkedListNode(data)\n if not courrnt.next:\n head.next = node\n node.prev = head\n return head\n while courrnt:\n if courrnt.data < data < courrnt.next.data or courrnt.data == data:\n node.next = courrnt.next\n courrnt.next.prev = node\n node.prev = courrnt\n courrnt.next = node\n return\n courrnt = courrnt.next\n if courrnt.data < data or courrnt.data == data:\n node.prev = courrnt\n courrnt.next = node\n return head\n","sub_path":"link-list/sort-double.py","file_name":"sort-double.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"47322935","text":"\"\"\" Script de test de retropropagation : deux classes A (+1) et B (-1). On utilise le plan, et on se place\n sous les contraintes 0 <= x <= 10 et 0 <= y <= 10. Les classes A et B sont séparées par\n l'hyperplan d'équation y = 10-x .\"\"\"\n\nfrom random import *\nimport retroGradientOpt as rgOpt\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport listeOperation as lo\n\nNc = 5\nu = [1,1] #vecteur normal à l'hyperplan séparant les données\nd = 2\nc1 = 10**(-8)\nc2 = 10**(-8)\nmu_0 = 10**(-3)\n\nprint(abs(lo.ps(u,[5,5]))/(lo.ps(u,u)))\n\nfor k in range(1):\n\n \" Generation des donnees \"\n y = []\n Z = []\n nbExemples = 0\n nbRejetes = 0\n\n absA, absB = [], []\n ordA, ordB = [], []\n\n cardA = 0\n cardB = 0\n\n while (nbExemples < 500):\n a = randint(0,100)/10\n b = randint(0,100)/10\n dist = abs((lo.ps(u,[a,b]))/(lo.ps(u,u))-5)\n valide = (d==0) or ((d!=0) & (dist > 2))\n if (a > 10-b) & valide:\n Z.append([1,a,b])\n absA.append(a)\n ordA.append(b)\n y.append(1)\n nbExemples += 1\n cardA += 1\n elif (b < 10-b) & valide:\n Z.append([1,a,b])\n absB.append(a)\n ordB.append(b)\n y.append(-1)\n nbExemples += 1\n cardB += 1\n else:\n nbRejetes += 1\n\n plt.scatter(absA,ordA,s=10,c='r',marker='*')\n plt.scatter(absB,ordB,s=10,c='b',marker='o')\n plt.plot([0,10],[10,0],'orange')\n plt.title(\"Demi-distance à l'hyperplan : \" + str(k/2) +\"\\n card(A) = \"+str(cardA)+\" ; card(B) = \"+str(cardB))\n plt.show()\n\n print(3*np.eye(3,3))\n\n print(\"Session n° \" + str(k+1))\n\n \" Apprentissage sur les donnees generees \"\n\n for j in range(2,Nc+1):\n print(\" Apprentissage n°1\")\n try:\n tE1 = rgOpt.retropropagation(\"/home/ray974/Learning/Data/bdd_dev.db\",2,j,c1,c2,mu_0,400,400,10,y,Z)\n abs1 = [(i+1) for i in range(len(tE1))]\n plt.plot(np.array(abs1),np.array(tE1),'red',label=str(tE1[len(tE1)-1]))\n except OverflowError:\n print(\"L'apprentissage a echoue -> erreur overflow\")\n print(\" Apprentissage n°2\")\n try:\n tE2 = rgOpt.retropropagation(\"/home/ray974/Learning/Data/bdd_dev.db\",2,j,c1,c2,mu_0,400,400,10,y,Z)\n abs2 = [(i+1) for i in range(len(tE2))]\n plt.plot(np.array(abs2),np.array(tE2),'gold',label=str(tE2[len(tE2)-1]))\n except OverflowError:\n print(\"L'apprentissage a echoue -> erreur overflow\")\n print(\" Apprentissage n°3\")\n try:\n tE3 = rgOpt.retropropagation(\"/home/ray974/Learning/Data/bdd_dev.db\",2,j,c1,c2,mu_0,400,400,10,y,Z)\n abs3 = [(i+1) for i in range(len(tE3))]\n plt.plot(np.array(abs3),np.array(tE3),'darkgreen',label=str(tE3[len(tE3)-1]))\n except OverflowError:\n print(\"L'apprentissage a echoue -> erreur overflow\")\n print(\" Apprentissage n°4\")\n try:\n tE4 = rgOpt.retropropagation(\"/home/ray974/Learning/Data/bdd_dev.db\",2,j,c1,c2,mu_0,400,400,10,y,Z)\n abs4 = [(i+1) for i in range(len(tE4))]\n plt.plot(np.array(abs4),np.array(tE4),'cyan',label=str(tE4[len(tE4)-1]))\n except OverflowError:\n print(\"L'apprentissage a echoue -> erreur overflow\")\n print(\" Apprentissage n°5\")\n try:\n tE5 = rgOpt.retropropagation(\"/home/ray974/Learning/Data/bdd_dev.db\",2,j,c1,c2,mu_0,400,400,10,y,Z)\n abs5 = [(i+1) for i in range(len(tE5))]\n plt.plot(np.array(abs5),np.array(tE5),'slategrey',label=str(tE5[len(tE5)-1]))\n except OverflowError:\n print(\"L'apprentissage a echoue -> erreur overflow\")\n plt.legend()\n plt.title('Nc = ' + str(j) + 'neurones cachés.')\n plt.xlabel(\"Nombre d'itérations\")\n plt.ylabel(\"Erreur quadratique moyenne\")\n plt.show()","sub_path":"testRetropPropagation.py","file_name":"testRetropPropagation.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"516069883","text":"import json\nimport unittest\n# from collections import OrderedDict\n\nimport aiohttp\nfrom aiohttp.test_utils import AioHTTPTestCase\nfrom nerium import Query, ResultFormat\nfrom nerium.app import app\nfrom tests.test_setup import query_name\n\n# Fixtures\nEXPECTED = [{\n 'foo': 1.25,\n 'bar': '2017-09-09',\n 'quux': 'Hello',\n 'quuux': 'Björk Guðmundsdóttir'\n}, {\n 'foo': 42,\n 'bar': '2031-05-25',\n 'quux': 'yo',\n 'quuux': 'ƺƺƺƺ'\n}]\n\n\n\nclass TestResults(unittest.TestCase):\n def test_results_expected(self):\n loader = Query(query_name)\n result = loader.result_set()\n self.assertEqual(result, EXPECTED)\n formatter = ResultFormat(result, format_='default')\n formatted_results = formatter.formatted_results()\n self.assertEqual(formatted_results, EXPECTED)\n\n\nclass TestAPI(AioHTTPTestCase):\n async def get_application(self):\n return app\n\n def test_response(self):\n async def test_get_query():\n url = \"/v1/{}\".format(query_name)\n resp = await self.client.request(\"GET\", url)\n assert resp.status == 200\n text = await resp.text()\n self.assertEqual(EXPECTED, json.loads(text))\n\n self.loop.run_until_complete(test_get_query())\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"341867063","text":"\"\"\"\nImplementation of Exercise 4.7 in Chapter 4 of Sutton and Barto's \"Reinforcement \nLearning\" \n\"\"\"\n\nimport numpy as np\nfrom jackscarrental import JacksCarRental\nimport time\nimport matplotlib.pyplot as plt\n\n#%%\n\ndef init_val_fun(max_cars_per_loc):\n '''\n Initialize state value function for iterative policy improvement\n \n Parameters\n ----------\n max_cars_per_loc : int\n maximum number of cars per location.\n Returns\n -------\n V : ndarray, shape (max_cars_per_loc, max_cars_per_loc)\n State value function\n '''\n\n # state value function\n V = np.zeros((max_cars_per_loc + 1, max_cars_per_loc + 1))#\n \n return V\n\ndef init_policy(max_cars_per_loc):\n \"\"\"\n Initialize policy for iterative policy improvement\n\n Parameters\n ----------\n max_cars_per_loc : int\n maximum number of cars per location.\n\n Returns\n -------\n pi : ndarray, shape (max_cars_per_loc, max_cars_per_loc)\n policy.\n\n \"\"\"\n\n # policy\n pi = np.zeros((max_cars_per_loc + 1, max_cars_per_loc + 1))\n\n return pi\n\n\n#%%\n\ndef eval_policy(env, pi, V, theta, gamma):\n '''\n Calculate state value function for given policy\n\n Parameters\n ----------\n env :\n JCS MDP\n pi : ndarray\n policy\n V : ndarray\n state value function\n theta : float\n treshold for policy evaluation\n gamma : float\n discount factor of DP\n \n Returns\n -------\n V : ndarray, shape (max_cars_per_loc, max_cars_per_loc)\n state value function\n '''\n\n v_tmp = np.zeros(V.shape)\n Delta = theta + 1\n \n while theta < Delta:\n Delta = 0\n for cars_A in range(0, V.shape[0]):\n for cars_B in range(0, V.shape[1]):\n \n v_tmp[cars_A, cars_B] = V[cars_A, cars_B]\n # Calculate value function for given action\n V[cars_A, cars_B] = value_function(env, (cars_A, cars_B), \n pi[cars_A, cars_B], V, gamma)\n # compare value function with previous value function\n Delta = np.max([Delta, np.abs(v_tmp[cars_A, cars_B] - \n V[cars_A, cars_B])])\n \n return V\n\ndef improve_policy(env, V, actions):\n '''\n Iterate improvement of policy\n\n Parameters\n ----------\n env :\n JCS MDP\n V : ndarray\n state value function\n actions: int \n number of cars moved\n Returns\n -------\n V : ndarray, shape (max_cars_per_loc, max_cars_per_loc)\n state value function\n '''\n pi = np.zeros(V.shape)\n\n for cars_A in range(0, V.shape[0]):\n for cars_B in range(0, V.shape[1]):\n Q_prev = 0\n for action in actions:\n # maximum number of cars which can be shifted\n if cars_A + action >= 0 and cars_B - action >= 0:\n # Calculate Q value function for given state (cars_A, cars_B) \n # and action a\n Q_next = value_function(env, (cars_A, cars_B), action, V, gamma)\n \n if Q_next > Q_prev:\n Q_prev = Q_next\n pi[cars_A, cars_B] = action \n \n return pi\n\ndef value_function(env, current_state, action, Value_function, gamma):\n \"\"\"\n Calculate value function for given initial state and action\n\n Parameters\n ----------\n env :\n JCS MDP\n current_state : tuple\n current state consisting of number of cars at A and B\n action :\n number of cars shifted from A to B\n Value_function : ndarray, shape (max_cars_per_loc, max_cars_per_loc)\n state value function\n\n Returns\n -------\n V_value : float\n state value for current state\n\n \"\"\"\n \n # probability transition matrix\n p = env.PTM_dict[\"p\"]\n n_B_ret = env.PTM_dict[\"n_B_ret\"] \n n_A_ret = env.PTM_dict[\"n_A_ret\"]\n n_B_req = env.PTM_dict[\"n_B_req\"]\n n_A_req = env.PTM_dict[\"n_A_req\"]\n\n # calculate how many cars can be rented currently\n # (assume that requests and returns happen at the same)\n n_A_req_2 = np.array([current_state[0] + n_A_ret + int(action),\n n_A_req]).min(axis = 0)\n n_B_req_2 = np.array([current_state[1] + n_B_ret - int(action), \n n_B_req]).min(axis = 0)\n \n # send all additional cars away\n next_state_A = np.array([current_state[0] + n_A_ret - n_A_req_2 + int(action),\n env.max_cars_per_loc*np.ones(len(n_A_ret))]).min(axis = 0)\n next_state_B = np.array([current_state[1] + n_B_ret - n_B_req_2 - int(action),\n env.max_cars_per_loc*np.ones(len(n_A_ret))]).min(axis = 0)\n \n next_state_A = next_state_A.astype(int)\n next_state_B = next_state_B.astype(int)\n \n # calculate rewards and value function\n parking_A = next_state_A > env.nr_free_parking\n parking_B = next_state_B > env.nr_free_parking\n \n reward_parking = env.reward_parking_lot*(next_state_A*parking_A + \n next_state_B*parking_B)\n reward_rent = (env.reward_req*(n_A_req_2 + n_B_req_2))\n reward = reward_rent + reward_parking\n \n reward_shift = env.reward_shift*((action - env.free_shift_AB)*(action>0) - \n (action)*(action<0)) \n \n VF_state = p*(reward + gamma*Value_function[next_state_A, next_state_B])\n V_value = VF_state.sum() + reward_shift\n\n return V_value\n\n\ndef policy_iteration(env, theta, gamma):\n '''\n Policy iteration algorithm\n\n Parameters\n ----------\n env :\n Jacks car service MDP\n theta : float\n treshold for policy evaluation\n gamma : float\n discount factor of DP\n\n Returns\n -------\n V : ndarray, shape (max_cars_per_loc, max_cars_per_loc)\n state value function\n pi : ndarray, shape (max_cars_per_loc, max_cars_per_loc)\n policy\n\n '''\n actions = env.action_space()\n \n V = init_val_fun(env.max_cars_per_loc) \n pi = init_policy(env.max_cars_per_loc)\n\n pi_tmp = np.random.rand(pi.shape[0],pi.shape[1])\n\n while not np.array_equal(pi, pi_tmp):\n pi_tmp = pi\n # evaluate value function\n V = eval_policy(env, pi_tmp, V, theta, gamma)\n # improve_policy\n pi = improve_policy(env, V, actions)\n \n # plots\n\n return V, pi\n\n\n#%%\n\ndef plot_VF(V):\n '''\n Plot value function as a function of number of cars at each location\n\n Parameters\n ----------\n V : ndarray, shape (max_cars_per_loc, max_cars_per_loc)\n state value function\n\n Returns\n -------\n None.\n\n '''\n plt.figure()\n plt.imshow(V)\n plt.colorbar()\n plt.title(\"Value function\")\n plt.xlabel(\"Cars at B\")\n plt.ylabel(\"Cars at A\")\n\n\ndef plot_policy(pi):\n '''\n Plot policy as a function of number of cars at each location\n\n Parameters\n ----------\n pi : ndarray, shape (max_cars_per_loc, max_cars_per_loc)\n policy\n\n Returns\n -------\n None.\n\n '''\n\n plt.figure()\n plt.imshow(pi)\n plt.colorbar()\n plt.title(\"policy function\")\n plt.xlabel(\"Cars at B\")\n plt.ylabel(\"Cars at A\")\n\n#%% \nif __name__ == '__main__':\n \"\"\"\n Set parameters and run jacks car service (Example 4.2 and Exercise 4.7\n in Chapter 4 of Sutton and Barto's \"Reinforcement Learning\" )\n \"\"\"\n max_cars_per_loc = 20 # max number of cars per location\n gamma = 0.9 # discount factor\n theta = 0.01 # treshold for policy evaluation\n \n lbd_req_A = 3 # lambda paramters of poisson distribution for\n lbd_ret_A = 3 # request and return at location A\n \n lbd_req_B = 4 # location B\n lbd_ret_B = 2\n \n max_n = 8 # defines number of considered terms in poisson distr.\n \n min_shift = -5 # max. number of shifted cars (action space)\n max_shift = 5\n \n reward_req = 10 # reward for requested car\n reward_shift = -2 # penalty for car moved over night\n \n \n nr_free_parking = 10 # number of free parking cars\n reward_parking_lot = 0 # penalty for parking more cars over night\n \n free_shift_AB = 1 # shift first car from A to B for free\n\n start = time.time()\n env_JCS = JacksCarRental(\n max_cars_per_loc, min_shift, max_shift,\n lbd_req_A, lbd_ret_A, lbd_req_B, lbd_ret_B, max_n,\n reward_req, reward_shift, reward_parking_lot, \n nr_free_parking, free_shift_AB\n )\n\n V, pi = policy_iteration(env_JCS, theta, gamma)\n \n end = time.time()\n print(end - start)\n \n plot_policy(pi)\n plot_VF(V)","sub_path":"4 Dynamic programming/Jacks car rental/policy_iteration.py","file_name":"policy_iteration.py","file_ext":"py","file_size_in_byte":8768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"11880169","text":"from RSA import RSA\nfrom RSA import *\nfrom CMDInterface import CMDInterface\n\ndef main():\n rsa = RSA()\n UI = CMDInterface(rsa)\n UI.display_menu()\n\ndef test():\n rsa = RSA()\n rsa.e = 3\n rsa.d = 2011\n rsa.n = 3127\n message = \"hello\"\n\n print(\"Test known values e={}, d={}, n={}\".format(rsa.e, rsa.d, rsa.n))\n print(\"Message to encrypt/decrypt: \" + message)\n\n enc = rsa.encrypt(message)\n print(\"Encrypted message: \" + enc)\n dec = rsa.decrypt(enc)\n print(\"Decrypted message: \" + dec)\n\ndef test2():\n rsa = RSA()\n rsa.e = 3\n rsa.d = 2011\n rsa.n = 3127\n message = \"hello\"\n\n print(\"Test known values e={}, d={}, n={}\".format(rsa.e, rsa.d, rsa.n))\n print(\"Message: \" + message)\n\n enc = rsa.encrypt(message)\n print(\"Encrypted message: \" + enc)\n dec = rsa.decrypt(enc)\n print(\"Decrypted message: \" + dec)\n\n sig = \"andres\"\n print(\"Signature: \" + sig)\n dec2 = rsa.decrypt(sig, True)\n print(\"Generated Signature: \" + dec2)\n auth = rsa.encrypt(dec2, True)\n print(\"After Authentication: \" + auth)\n\nif __name__ == '__main__':\n main()\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"43237454","text":"#!/usr/bin/env python3\n\"\"\"\nThis module will read the required VLANs from comma separated file\n\nUsage:\n python3 read_vlan.py vlan.csv Where vlan.csv is the comma separated file with the VLAN information\n\"\"\"\n\nimport warnings\nimport sys\nimport collections\nfrom pprint import pprint as pp\nfrom string import Template\nimport json\nimport requests\nimport csv\nimport ipaddress\nfrom nexus_switch import Nexus9K\n\ndef main():\n \"\"\"\n The main module will verify the required input of the file\n\n :return:\n \"\"\"\n\n try:\n required_vlans = read_csv(sys.argv[1])\n except IndexError:\n sys.exit(\"\\nExiting...\\nPlease enter the input as: read_vlan.py 'csv_file' \"\n \" Where 'csv_file' is the name of hte file\")\n except FileNotFoundError:\n sys.exit(\"\\nExiting...\\nPlease verify and correct the filename: {}\".format(sys.argv[1]))\n except TypeError:\n sys.exit(\"\\nExiting due to 'TypeError'\")\n except ValueError:\n sys.exit(\"\\nExiting...\\nPlease verify the file is a CSV file\")\n\n v = Nexus9K()\n vlans, names = v.switch_vlans()\n v.print_switch_vlans(vlans, names)\n\n for vlan in required_vlans:\n _vlan_exists, _vlan_name_matches = v.verify_switch_vlan(vlan.vlan_index, vlan.vlan_name)\n data_center = \"esv1\"\n if not _vlan_exists:\n print(\"VLAN {} needs to be created.\".format(vlan.vlan_index))\n _vlan_name = data_center + vlan.vlan_name\n v.create_switch_vlan(vlan.vlan_index, _vlan_name)\n else:\n print(\"VLAN {} already exists.\".format(vlan.vlan_index))\n\n\ndef create_vlan(switch_ip, vlan):\n \"\"\"\n This module will create both the layer2 and layer3 configuration for a new vlan\n :param switch_ip: IP address of the switch where the VLAN will be created\n :param vlan: VLAN information\n :return:\n \"\"\"\n print(\"vlan will need to be created. \", vlan[0])\n\n\ndef verify_vlan(switch_ip, vlan):\n \"\"\"\n Will verify the configuraiton of the layer2 and layer3 details of the existing VLAN\n :param switch_ip: IP address of the switch where the VLAN exists\n :param vlan: VLAN information to verify\n :return:\n \"\"\"\n print(\"vlan exists\", vlan[0])\n\n\ndef check_vlan(switch_ip, required_vlan):\n \"\"\"\n This module will verify if the VLAN already exists on a switch.\n Access to the switch is passed as switch_ip and the VLAN is passed as\n :param switch_ip: This is the IP address of the switch\n :param required_vlan: This is the VLAN tag of the VLAN to verify\n :return: True (the VLAN exist) or False (the VLAN does not exist)\n \"\"\"\n\n _switch_vlans = []\n # switch_ip = row[0]\n username = 'cisco'\n password = 'cisco'\n\n my_headers = {'content-type': 'application/json-rpc'}\n\n payload = [{'jsonrpc': '2.0',\n 'method': 'cli',\n 'params': {\n 'cmd': 'show vlan',\n 'version': 1\n },\n 'id': 1\n }\n ]\n my_data = json.dumps(payload)\n\n url = \"http://\" + switch_ip + \"/ins\"\n response = requests.post(url, data=my_data, headers=my_headers, auth=(username, password))\n\n _vlan_table = response.json()['result']['body']['TABLE_mtuinfo']['ROW_mtuinfo']\n for iter in _vlan_table:\n _switch_vlans.append(int(iter['vlanshowinfo-vlanid']))\n\n seq = 0\n for i in required_vlan:\n if (i[seq] in _switch_vlans):\n return True\n else:\n return False\n\n # print('Response: ', response, 'My Data: ', my_data)\n\n # vlan_table = response.json()['result']['body']['TABLE_mtuinfo']['ROW_mtuinfo']\n # print('VLAN Table: ', vlan_table)\n\n\ndef read_csv(name):\n \"\"\"\n Read a csv file\n :param name:\n :return:\n \"\"\"\n VLANs = collections.namedtuple('VLAN', 'vlan_index, vlan_name, ipv4_address_mask,\\\n ipv6_network, ipv6_subnet, ipv6_node, ipv6_subnet_mask')\n required_vlans = []\n for vlan in map(VLANs._make, csv.reader(open(name, 'rt'))):\n required_vlans.append(vlan)\n print(vlan.vlan_index, vlan.vlan_name)\n if str(vlan.ipv4_address_mask) == str(ipaddress.ip_network(vlan.ipv4_address_mask)):\n print('Valid network IP Address and mask was passed: {}'\n .format(ipaddress.ip_network(vlan.ipv4_address_mask)))\n\n _ip_network = ipaddress.ip_network(vlan.ipv4_address_mask)\n\n print('HSRP VIP: {}, Switch1 IP address: {} - Switch2 IP address: {}'\n .format(_ip_network[1], _ip_network[2], _ip_network[3]))\n return required_vlans\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"NexusSwitchConfig/read_vlan.py","file_name":"read_vlan.py","file_ext":"py","file_size_in_byte":4632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"646259370","text":"#목적 : 종속변수값 예측하기\nimport pandas as pd\nfrom statsmodels.formula.api import ols,glm\n\nprint(\"7.2.7 예측하기\")\nwine = pd.read_csv('winequality-both.csv', sep=',',header=0)\nwine.columns = wine.columns.str.replace(' ','_')\n\nmy_formula = 'quality ~ alcohol + chlorides + citric_acid + density + fixed_acidity' \\\n ' + free_sulfur_dioxide + pH + residual_sugar + sulphates + total_sulfur_dioxide' \\\n ' + volatile_acidity'\n# my_formula = 'quality ~ alcohol + chlorides + density + fixed_acidity' \\\n# ' + free_sulfur_dioxide + pH + residual_sugar + sulphates + total_sulfur_dioxide' \\\n# ' + volatile_acidity'\nlm = ols(my_formula, data=wine).fit()\n\ndependent_variable = wine['quality']\nindependent_variables = wine[wine.columns.difference(['quality','type'])]\n\nnew_observations = wine.loc[:, independent_variables.columns]\ny_predicted = lm.predict(new_observations)\ny_predicted_rounded = [round(score) for score in y_predicted]\n\ntotal_count = 0\nindex = 0\ntotal_number = len(y_predicted_rounded)\ntotal_correct = 0\n\nwhile index < total_number:\n print(f'{index+1} | {y_predicted_rounded[index]} | {dependent_variable[index]}')\n if y_predicted_rounded[index] == dependent_variable [index]:\n total_correct += 1\n index += 1\n\nprint(f'\\n 전체 관찰 계수: {total_number}')\nprint(f' 정답수: {total_correct}')\nprint(f' 정답률: {(total_correct/total_number)*100} %')\n","sub_path":"python_workspace/3_bigdata/03_Statistics_basic/2.Linear_Regression_Analysis/wine_quality_8.py","file_name":"wine_quality_8.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"117976959","text":"class Account(object):\n '''\n This is the parent class. It takes two arguments,\n name of type str and balance of type int.\n It has 3 functions, create_account, withdraw and \n interest_earned\n '''\n\n def __init__(self, name, balance):\n self.name = name\n self.balance = balance\n self._BalA_Interest = 0.03\n self._BalB_Interest = 0.05\n self.accounts = {}\n\n def create_account(self):\n '''\n Function takes no arguments.\n It creates a key value pair from the class Account accounts and \n adds them to the accounts dictionary\n '''\n self.accounts[self.name] = self.balance\n return self.accounts\n\n def withdraw(self, amount):\n '''\n Function takes one argument amount of type int.\n It checks if amount is less then balance and if it\n is, deducts amount from the balance and saves the \n new balance which becomes the new value\n '''\n if self.name in self.accounts:\n if self.accounts[self.name] < amount:\n return 'Insufficient funds'\n else:\n new_balance = self.accounts[self.name] - amount\n self.accounts[self.name] = new_balance\n return \"{}'s new balance is {}\".format(self.name, self.accounts[self.name])\n else:\n return 'Account does not exist'\n\n def interest_earned(self):\n '''\n function takes no arguments.\n It calculates the interest earned on the current balance\n '''\n if self.name in self.accounts:\n if self.accounts[self.name] > 1000:\n interest = self.accounts[self.name] * self._BalA_Interest\n new_balance = self.accounts[self.name] + interest\n self.accounts[self.name] = new_balance\n return \"{} earned interest of {}, their new balance is {}\".format(self.name, interest, self.accounts[self.name])\n\n elif self.accounts[self.name] > 5000:\n interest = self.accounts[self.name] * self._BalB_Interest\n new_balance = self.accounts[self.name] + interest\n self.accounts[self.name] = new_balance\n return \"{} earned interest of {} their new balance is {}\".format(self.name, interest, self.accounts[self.name])\n else:\n return 'Account does not exist'\n\n\nclass AccountType(Account):\n '''\n Class AccountType inherits from Account class.\n It overrides the create_account function changing its functionality\n to add the created accounts to either savings account or current \n account depending on the balance amount. \n '''\n\n def __init__(self, name, balance):\n Account.__init__(self, name, balance)\n self.sav_min_balance = 100\n self.savings_account = {}\n self.current_account = {}\n\n def create_account(self):\n '''\n function overrides the Account class create_account function\n It adds the created account to either savings account or \n current account dictionaries. \n If the balance is less more than the savings account minimum\n balance, the account is added to the current account dictionary \n otherwise it is added to the savings account dictionary \n '''\n\n if self.balance > self.sav_min_balance:\n self.savings_account[self.name] = self.balance\n return \"We opened a savings account for {} with a balance of {}\".format(self.name, self.savings_account[self.name])\n self.current_account[self.name] = self.balance\n return \"We opened a current account for {} with a balance of {}\".format(self.name, self.current_account[self.name])\n\n\nnew_accounts = Account('John', 4000)\naccount_types = AccountType('John', 4000)\n\nprint(new_accounts.create_account())\nprint(account_types.create_account())\nprint(new_accounts.withdraw(30))\nprint(new_accounts.interest_earned())\n","sub_path":"Banking.py","file_name":"Banking.py","file_ext":"py","file_size_in_byte":3947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"268130445","text":"# collectd-activemq-python\n# ========================\n#\n# Python-based plugin to put ActiveMQ stats to collectd\n#\n# https://github.com/powdahound/redis-collectd-plugin - was used as template\n# https://github.com/kipsnak/munin-activemq-plugin - was used as inspiration\n\nimport collectd\nfrom xml.dom import minidom\nimport urllib\n\n\nclass AMQMonitor(object):\n\n def __init__(self):\n self.plugin_name = \"activemq_info\"\n self.amq_admin_host = 'localhost'\n self.amq_admin_port = 8161\n self.verbose_logging = False\n\n def log_verbose(self, msg):\n if not self.verbose_logging:\n return\n collectd.info('activemq_info plugin [verbose]: %s' % msg)\n\n def configure_callback(self, conf):\n \"\"\"Receive configuration block\"\"\"\n for node in conf.children:\n if node.key == 'Host':\n self.amq_admin_host = node.values[0]\n elif node.key == 'Port':\n self.amq_admin_port = int(node.values[0])\n elif node.key == 'Verbose':\n self.verbose_logging = bool(node.values[0])\n else:\n collectd.warning('activemq_info plugin: Unknown config key: %s.' % node.key)\n self.log_verbose('Configured with host=%s, port=%s' % (self.amq_admin_host, self.amq_admin_port))\n\n\n def dispatch_value(self, plugin_instance, value_type, instance, value):\n \"\"\"Dispatch a value to collectd\"\"\"\n self.log_verbose('Sending value: %s.%s.%s=%s' % (self.plugin_name, plugin_instance, instance, value))\n val = collectd.Values()\n val.plugin = self.plugin_name\n val.plugin_instance = plugin_instance\n val.type = value_type\n val.type_instance = instance\n val.values = [value, ]\n val.dispatch()\n\n\n def fetch_info(self):\n \"\"\"Connect to ActiveMQ admin webpage and return DOM object\"\"\"\n url = 'http://%s:%s/admin/xml/queues.jsp' % (self.amq_admin_host, self.amq_admin_port)\n dom = None\n try:\n dom = minidom.parse(urllib.urlopen(url))\n #dom = minidom.parse(open('queues.xml', 'r'))\n except Exception:\n pass\n return dom\n\n\n def read_callback(self):\n \"\"\"Collectd read callback\"\"\"\n self.log_verbose('Read callback called')\n dom = self.fetch_info()\n if not dom:\n self.log_verbose('activemq_info plugin: No info received, offline node or turned off ActiveMQ')\n return\n\n queuenodes = dom.getElementsByTagName(\"queue\")\n for node in queuenodes:\n queue = node.attributes.item(0).value.replace('.', '_')\n size = node.getElementsByTagName(\"stats\").item(0).getAttribute(\"size\")\n consumerCount = node.getElementsByTagName(\"stats\").item(0).getAttribute(\"consumerCount\")\n enqueueCount = node.getElementsByTagName(\"stats\").item(0).getAttribute(\"enqueueCount\")\n dequeueCount = node.getElementsByTagName(\"stats\").item(0).getAttribute(\"dequeueCount\")\n self.dispatch_value(queue, 'gauge', 'size', size)\n self.dispatch_value(queue, 'gauge', 'consumerCount', consumerCount)\n self.dispatch_value(queue, 'counter', 'enqueueCount', enqueueCount)\n self.dispatch_value(queue, 'counter', 'dequeueCount', dequeueCount)\n\n\namq = AMQMonitor()\n# register callbacks\ncollectd.register_config(amq.configure_callback)\ncollectd.register_read(amq.read_callback)\n","sub_path":"activemq_info.py","file_name":"activemq_info.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"18595811","text":"import pylab as pl\nimport numpy as np\nimport tensorflow as tf\n\ndsize = 500\nmsize = 100\nx = np.linspace(-1, 1, dsize)\ny = x**2 + np.random.normal(size=dsize)/4\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Dense(2, activation=\"tanh\", kernel_initializer=tf.keras.initializers.GlorotNormal(), input_shape=(1,) ),\n tf.keras.layers.Dense(1, activation=\"linear\", kernel_initializer=tf.keras.initializers.GlorotNormal())\n])\n\nmodel.compile(optimizer=\"adam\",\n loss = tf.keras.losses.MeanSquaredError())\n\nmodel.fit(x, y, epochs=1000)\npl.plot(np.linspace(-1, 1, msize), model.predict(np.linspace(-1, 1, msize)), \"r-\")\n\npl.scatter(x, y)\npl.show()\n","sub_path":"Lecture02/non-linear.py","file_name":"non-linear.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"327334673","text":"hi = []\ns = 0\nmi = 0\nmv = ''\nfor i in range(1,5):\n print(f'- - - {i}ª PESSOA - - - ')\n nome = str(input('Nome: ')).upper().strip()\n idade = int(input('Idade: '))\n sexo = str(input('Sexo [F/M]: ')).upper()\n s += idade\n if sexo == 'M':\n hi.append(idade)\n if idade == max(hi):\n mv = nome\n else:\n if idade < 21:\n mi += 1\nprint('- ' * 20)\nprint(f'A média de idade é: {s / 4} anos')\nprint(f'O nome do homem mais velho tem {max(hi)} e se chama {mv}')\nif mi == 1:\n print(f'Foi digitada {mi} mulher menor de idade')\nelse:\n print(f'Foram digitadas {mi} mulheres menores de idade')\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Desafios/des056.py","file_name":"des056.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"302330949","text":"\"\"\" Administración del Hogar en Flask \"\"\"\nfrom flask import Flask, render_template\n\nfrom src.shared.db import db\nfrom src.pages.cuentas import cuentas\nfrom src.pages.departamentos import departamentos\nfrom src.pages.registros import registros\nfrom src.pages.reportes import reportes\n\nAPP = Flask(__name__)\nAPP.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db/home.db'\ndb.app = APP\ndb.init_app(APP)\ndb.create_all()\n\nAPP.register_blueprint(cuentas)\nAPP.register_blueprint(departamentos)\nAPP.register_blueprint(registros)\nAPP.register_blueprint(reportes)\n\n\n@APP.route('/')\ndef home():\n \"\"\" Página de Inicio \"\"\"\n return render_template('home.html')\n\n\nif __name__ == \"__main__\":\n APP.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"217589619","text":"# THIS IS A FUNCTION WHICH WILL FIND THE CUBES UPTO A GIVEN NUMBER STARTING FROM 1.\r\n\r\ndef cube_finder(num):\r\n cubes = {}\r\n for cube in range(1,num+1):\r\n cubes[cube] = cube**3\r\n # print(f\"{cube}:{cubes}\")\r\n return cubes\r\n\r\nnumber = int(input(\"Enter a number to find the cubes upto that number: \"))\r\nprint(cube_finder(number))\r\n\r\n","sub_path":"dict_ex1.py","file_name":"dict_ex1.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"75298777","text":"# input\n# 1 2\n\n# output\n# <\n\ndef compare(x, y) :\n if x > y :\n print(\">\")\n elif x == y :\n print(\"==\")\n else :\n print(\"<\")\n\ninputs = list(map(int, input().split(' ')))\n\nx = inputs[0]\ny = inputs[1]\ncompare(x, y)\n\n","sub_path":"baekjoon/step by step/if statement/Compare two numbers.py","file_name":"Compare two numbers.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"526142590","text":"from os.path import join, isdir\r\nfrom os import mkdir, makedirs\r\nfrom io import StringIO\r\nfrom sys import stdout\r\n\r\nbasedir = r'/home/david/scripts/codejam'\r\nif not isdir(basedir):\r\n makedirs(basedir)\r\n\r\nclass CodeJam(object):\r\n def __init__(self, year, cround, problem, processor, debug=False):\r\n path = join(basedir, str(year), cround)\r\n if not isdir(path):\r\n makedirs(path)\r\n self.loc = path\r\n self.problem = problem\r\n self.debug = debug\r\n self.processor = processor\r\n def processtext(self, text):\r\n self.process(StringIO(text), stdout)\r\n def processfile(self, filename):\r\n with open(join(self.loc, filename)) as fin, \\\r\n open(join(self.loc, filename + '.out'), 'w') as fout:\r\n self.process(fin, fout)\r\n def process(self, fin, fout):\r\n tests = int(fin.readline().strip())\r\n if self.debug:\r\n print('Total {} tests...'.format(tests))\r\n for test in range(tests):\r\n if self.debug:\r\n print(' Processing test {}...'.format(test))\r\n print('Case #{}: {}'.format(test+1, self.processor(fin)), file=fout)\r\n","sub_path":"solutions_5652388522229760_0/Python/Fornax/codejam.py","file_name":"codejam.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"407759217","text":"\n# coding: utf-8\n# Goal: Store donation amounts in a custom AVL order statistic Tree,\n# allowing O(log(n)) runtimes\n\n#Details:\n#AVL Tree: A balanced binary tree that has O(log(n)) complexity for append\n#operations.\n#Statistic Order Tree: A tree where each node stores the number of elements in\n#it's branches. This allows O(log(n)) complexity for rank search and percentile\n#retrieval.\n#Duplicate values are not added as nodes. Instead, the corresponding node's\n#variable \"count\" is incremented. This requires some changes to the algorithims\n#for finding percentile, adding values, rotating the tree, etc\n\n#Unit testing is shown in AVL_Order_Statistic_Tree.ipynb. I observe ~5x to >10x\n#speed increases by using AVL trees as opposed to arrays (O(n)) with large data.\n\n#References and Citations\n# AVL Tree\n# Problem Solving with Algorithms and Data Structures using Python\n# Brad Miller and David Ranum, Luther College\n# http://interactivepython.org/runestone/static/pythonds/Trees/AVLTreeImplementation.html\n#\n# Order Statistic Tree\n# James Aspnes\n# http://www.cs.yale.edu/homes/aspnes/pinewiki/OrderStatisticsTree.html\n\nimport math\n\n#Tree node\nclass Node:\n\n\n def __init__(self, val, parent = None):\n self.leftChild = None\n self.rightChild = None\n self.parent = parent\n self.value = val\n self.leftCount = 0\n self.rightCount = 0\n self.count = 1\n self.balance = 0\n\n\n def isLeftChild(self):\n return (not self.parent is None) and self.parent.leftChild is self\n\n\n def isRightChild(self):\n return (not self.parent is None) and self.parent.rightChild is self\n\n\n#AVL order statistic tree\nclass Tree:\n\n\n def __init__(self):\n self.root = None\n self.numEntries = 0\n\n\n def add(self, val):\n self.numEntries += 1\n if self.root == None:\n self.root = Node(val)\n else:\n self._add(val, self.root)\n\n #Add value to Tree\n #Use standard binary tree algorithm, then check balance and rotate if needed\n def _add(self, val, node):\n if val < node.value:\n node.leftCount += 1\n if node.leftChild is None:\n node.leftChild = Node(val, node)\n self.updateBalance(node.leftChild)\n else:\n self._add(val, node.leftChild)\n elif val > node.value:\n node.rightCount += 1\n if node.rightChild is None:\n node.rightChild = Node(val, node)\n self.updateBalance(node.rightChild)\n else:\n self._add(val, node.rightChild)\n elif val == node.value:\n node.count +=1\n\n\n #Update balance of node and parents if needed\n #Balance = left subtree height - right subtree height\n def updateBalance (self, node):\n if node.balance >1 or node.balance <-1:\n self.rebalance(node)\n return\n if not node.parent is None:\n if node.isLeftChild():\n node.parent.balance += 1\n elif node.isRightChild():\n node.parent.balance -=1\n if node.parent.balance !=0:\n self.updateBalance(node.parent)\n\n\n def rotateLeft (self, oldRoot):\n newRoot = oldRoot.rightChild\n oldRoot.rightChild = newRoot.leftChild\n if not newRoot.leftChild is None:\n newRoot.leftChild.parent = oldRoot\n newRoot.parent = oldRoot.parent\n if oldRoot is self.root:\n self.root = newRoot\n else:\n if oldRoot.isLeftChild():\n oldRoot.parent.leftChild = newRoot\n else:\n oldRoot.parent.rightChild = newRoot\n newRoot.leftChild = oldRoot\n oldRoot.parent = newRoot\n #Update balance factor of nodes\n oldRoot.balance = oldRoot.balance + 1 - min (newRoot.balance, 0)\n newRoot.balance = newRoot.balance + 1 + max (oldRoot.balance, 0)\n #Update leftCount and rightCount of nodes\n oldRoot.rightCount = newRoot.leftCount\n newRoot.leftCount = oldRoot.rightCount + oldRoot.leftCount + oldRoot.count\n\n\n def rotateRight (self, oldRoot):\n newRoot = oldRoot.leftChild\n oldRoot.leftChild = newRoot.rightChild\n if not newRoot.rightChild is None:\n newRoot.rightChild.parent = oldRoot\n newRoot.parent = oldRoot.parent\n if oldRoot is self.root:\n self.root = newRoot\n else:\n if oldRoot.isRightChild():\n oldRoot.parent.rightChild = newRoot\n else:\n oldRoot.parent.leftChild = newRoot\n newRoot.rightChild = oldRoot\n oldRoot.parent = newRoot\n #Update balance factors of nodes\n oldRoot.balance = oldRoot.balance - 1 - max (newRoot.balance, 0)\n newRoot.balance = newRoot.balance - 1 + min (oldRoot.balance, 0)\n #Update leftCount and rightCount of nodes\n oldRoot.leftCount = newRoot.rightCount\n newRoot.rightCount = oldRoot.leftCount + oldRoot.rightCount +oldRoot.count\n\n\n def rebalance (self, node):\n if node.balance < 0:\n if node.rightChild.balance > 0:\n self.rotateRight (node.rightChild)\n self.rotateLeft (node)\n else:\n self.rotateLeft(node)\n elif node.balance > 0:\n if node.leftChild.balance < 0:\n self.rotateLeft (node.leftChild)\n self.rotateRight(node)\n else:\n self.rotateRight(node)\n\n\n def printTree (self):\n if not self.root is None:\n self._printTree(self.root)\n\n\n def _printTree (self, node):\n if not node is None:\n self._printTree(node.leftChild)\n print ('Value =', node.value, '. Left Count =', node.leftCount, '. Right Count = ', node.rightCount, '. Count = ', node.count)\n self._printTree (node.rightChild)\n\n\n def findRank(self, rank):\n if not self.root is None:\n return self._findRank(rank, self.root)\n else:\n return None\n\n\n def _findRank(self, rank, node):\n if node.leftChild is None:\n leftCount = 0\n else:\n leftCount = node.leftCount\n\n if rank >leftCount and rank <= leftCount + node.count:\n return node.value\n elif rank <= leftCount:\n if node.leftChild is None:\n return None\n return self._findRank(rank, node.leftChild)\n elif rank > leftCount + node.count:\n if node.rightChild is None:\n return None\n return self._findRank (rank-leftCount-node.count, node.rightChild)\n else:\n return None\n\n\n def findPercentile (self, percentile):\n #Find nearest-rank, given percentile\n\n #Special case: 0th percentile is rank 1.\n if percentile == 0:\n return self.findRank(1)\n\n rank = math.ceil(percentile/100*self.numEntries)\n return self.findRank(rank)\n","sub_path":"src/AVL_Tree.py","file_name":"AVL_Tree.py","file_ext":"py","file_size_in_byte":6979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"519305287","text":"import config\nimport requests\nimport json\nimport logging\nimport cv2\n\nclass APIDeteccao:\n \"\"\"Classe de integração com a API de Deteccao\n \"\"\"\n routes = {\n \"rosto\": config.API_DETECCAO + 'rosto' \n }\n \n def getRosto(self, frame):\n \"\"\"Obtém o usuários com faces cadastradas\n \"\"\"\n\n print(\"teste getRosto\") \n img_encoded = cv2.imencode('.jpg', frame)[1]\n\n #Monta o objeto imagem com o nome da foto\n file_img = {'imagem': ('image.jpg', img_encoded.tostring(), 'image/jpeg', {'Expires': '0'})}\n\n dataUser = { \n \"imagem\": file_img,\n \n }\n\n response = None\n try:\n response = requests.post(self.routes['rosto'], \n \n # headers=self._getHeadersBinary(), \n verify=False,\n data=dataUser,\n files=file_img) \n\n print(response)\n \n # logging.info('Response: {}'.format(response))\n # print('response', response)\n except:\n print('Não foi possível obter comunicação com a API de detecção')\n logging.info('Não foi possível obter comunicação com a API de detecção') \n return None \n\n if(response and response.status_code == 200):\n resultObject = json.loads(response.content.decode())\n\n if(resultObject):\n logging.debug('resultObject: {:}'.format(resultObject))\n return resultObject\n\n return None\n\n \n","sub_path":"apis/api_deteccao_bkp.py","file_name":"api_deteccao_bkp.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"277970363","text":"# -*- coding: utf-8 -*-\nimport mechanize\nfrom mechanize import Browser\nfrom bs4 import BeautifulSoup\nimport urllib2\nimport cookielib\nimport time\nimport os\nfrom progressbar import ProgressBar\n\npbar = ProgressBar()\n\nbr = Browser()\n\n# Prompt user file Input\nprint('vvic.com stock_checker version 0.1')\nprint('--------------------------------------------------------------------')\nuser_input = raw_input(\"Enter the full path of your file including its extension: \")\nassert os.path.exists(user_input), \"I did not find the file at, \"+str(user_input)\n\nwith open(user_input, 'r+') as f:\n lines = f.read().splitlines()\n\nprint(\"File found!\"+\"\\n\"+\"Now checking for OOS items...\")\n\n\n# Test file,do not remove\n# with open('productid.txt', 'r') as f:\n# lines = f.read().splitlines()\n\n\n# Browser options\nbr.set_handle_equiv(True)\n# br.set_handle_gzip(True)\nbr.set_handle_redirect(True)\nbr.set_handle_referer(True)\nbr.set_handle_robots(False)\ncj = cookielib.LWPCookieJar()\nbr.set_cookiejar(cj)\n\n\n# Follows refresh\nbr.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)\n\n\n# Do not mess this\n# for j in lines:\n# r = br.open(j)\n\n# Open first link to make sure no redirection\nr = br.open('http://www.vvic.com/item.htm?uuid=441807575491')\n\nno_stock = []\nfor i in pbar(lines):\n r = br.open(i)\n r = r.read()\n soup = BeautifulSoup(r,'html.parser')\n table = soup.find_all('div', {'class' : \"empty_result\"})\n results = soup.find_all('strong', style = 'color: red;')\n if table or results:\n no_stock.append(i)\n else:\n continue\n\n# Assurance to avoid duplicates\nno_stock = list(set(no_stock))\n\n# Print assurance if there is 'no' empty stock\nif len(no_stock) == 0:\n # Save no_stock into .txt and write no stock found at this time.\n with open('no_stock_lists_vvic.txt','w') as f :\n f.write('No. of out of stock items : '+str(len(no_stock))+'\\n'+'\\n')\n f.write('Yay! All interested/checked stocks are still available. Check back again later for any updates on availability.')\nelse:\n # Save no_stock into .txt\n with open('no_stock_lists_vvic.txt','w') as f :\n f.write('No. of out of stock items : '+str(len(no_stock))+'\\n'+'\\n')\n for i in no_stock:\n f.write(i + '\\n')\n print(\"OOS items successfully saved in 'no_stock_lists_vvic.txt'. Please Check.\")\n","sub_path":"stock_checker/vvic_stock_checker_v1.0.0.py","file_name":"vvic_stock_checker_v1.0.0.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"579049795","text":"import datetime\nimport html\nfrom test_plus.test import TestCase\nfrom django.utils import timezone\n\nfrom tempo.events import models\nfrom tempo.events import suggestions\n\nfrom . import factories\n\n\nclass TestSuggestions(TestCase):\n\n def test_can_rank_events_by_time(self):\n now = timezone.now()\n thirty_minutes_ago = now - datetime.timedelta(minutes=30)\n two_hours_ago = now - datetime.timedelta(hours=2)\n\n e1 = factories.Entry(\n start=now,\n )\n e2 = factories.Entry(\n start=thirty_minutes_ago,\n )\n e3 = factories.Entry(\n start=two_hours_ago,\n )\n\n qs = models.Entry.objects.all()\n s = suggestions.rank_by_closest(qs, 'start', now)\n self.assertEqual(\n [e for score, e in s],\n [e1, e2, e3]\n )\n\n s = suggestions.rank_by_closest(qs, 'start', thirty_minutes_ago)\n\n self.assertEqual(\n [e for score, e in s],\n [e2, e1, e3]\n )\n\n s = suggestions.rank_by_closest(qs, 'start', two_hours_ago)\n self.assertEqual(\n [e for score, e in s],\n [e3, e2, e1]\n )\n","sub_path":"tempo/events/tests/test_suggestions.py","file_name":"test_suggestions.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"436050799","text":"\ndef main():\n\n phrase = input(\"Enter a phrase you wish to acronimize: \")\n acronym = phrase[0].upper()\n for i in range(len(phrase)):\n if(phrase[i] == \" \"):\n acronym = acronym + phrase[i+1].upper()\n print(acronym)\n\nmain()\n","sub_path":"Lab6/acronimize.py","file_name":"acronimize.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"243887302","text":"from django.test import TestCase\nfrom core.models import Project, ActivityJournal\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nimport datetime as dt\n# Create your tests here.\n\n\nclass TimeCalculator(TestCase):\n\n def test_calculate_one_activity(self):\n project = Project.objects.create()\n user = User.objects.create()\n activity_journal = ActivityJournal.objects.create(\n project=project,\n user=user,\n start=dt.datetime(2018, 4, 18, 14, 0, 0),\n end=dt.datetime(2018, 4, 18, 15, 0, 0),\n )\n \n total_time = project.time_calculator(user=user)\n self.assertEqual(dt.timedelta(hours=1), total_time)\n\n\nclass ActivityJournalModelTest(TestCase):\n\n def test_close_activity(self):\n project = Project.objects.create()\n user = User.objects.create()\n start_date = timezone.now()-timezone.timedelta(hours=3)\n activity_journal = ActivityJournal.objects.create(\n project=project,\n user=user,\n start=start_date,\n )\n activity_journal.close_activity()\n self.assertEqual(activity_journal.time_lapse,\n timezone.timedelta(hours=3).total_seconds())\n","sub_path":"core/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"216528222","text":"from django.shortcuts import render\nfrom django.http.response import JsonResponse\nfrom django.db.models import Sum\nfrom rest_framework.parsers import JSONParser, MultiPartParser\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, parser_classes, permission_classes\nfrom api.serializers import UserSerializer, SongsSerializer, ArtistLikedSerializer, ArtistPlayTotalSerializer, ArtistSearchSerializer\nfrom api.models import User, Songs, ArtistLiked\n\n# Rendering response\nfrom rest_framework.renderers import JSONRenderer\n\n# Pandas stuff\nimport pandas as pd\nimport numpy as np\nimport random\n\n# Logging\nimport sys\nimport logging\n\n@api_view(['POST'])\ndef login(request):\n\ttry:\n\t\tuser = User.objects.get(user_id = request.data['user_id']) \n\t\treturn JsonResponse('Valid user', safe=False,status=status.HTTP_200_OK)\n\texcept:\n\t\treturn JsonResponse('Not a valid user', safe=False,status=status.HTTP_404_NOT_FOUND)\n\t\n@api_view(['GET'])\ndef get_user_data(request, user_query_id):\n\ttry:\n\t\tuser = User.objects.get(user_id = user_query_id)\n\t\tserialized_user = UserSerializer(user)\n\t\treturn JsonResponse(serialized_user.data, safe=False,status=status.HTTP_200_OK)\n\texcept User.DoesNotExist:\n\t\treturn JsonResponse('Not found', safe=False,status=status.HTTP_404_NOT_FOUND)\n\n@api_view(['GET'])\ndef get_all_users(request):\n\ttry:\n\t\tusers_query = User.objects.all()\n\t\tprint(users_query)\n\t\tserialized_users = UserSerializer(users_query, many=True)\n\t\treturn JsonResponse(serialized_users.data, safe=False,status=status.HTTP_200_OK)\n\texcept User.DoesNotExist:\n\t\treturn JsonResponse('Not found', safe=False,status=status.HTTP_404_NOT_FOUND)\n\n\n@api_view(['POST'])\ndef register(request):\n\tserializer = UserSerializer(data = request.data)\n\tif serializer.is_valid():\n\t\tserializer.save()\n\t\treturn JsonResponse(serializer.data, safe=False, status=status.HTTP_202_ACCEPTED)\n\telse:\n\t\treturn JsonResponse({'error': 'El usuario que se quiere crear ya existe'}, safe=False, status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET'])\ndef get_recommendations(request, user_id):\n\ttry:\n\t\tuser = User.objects.get(user_id = user_id)\n\n\t\tif user.is_old_user:\n\t\t\tdf_top_for_user = pd.read_csv(f'./Export/{user_id}_top_100.csv')\n\t\t\tmin_id, max_id = (10*user.recommendation_frame, 10*(user.recommendation_frame + 1))\n\t\t\tsample = df_top_for_user[['iid']][min_id:max_id].to_dict()\n\t\t\t\n\t\t\tartistsKnown = ArtistLiked.objects.all().filter(user_id=user_id)\n\t\t\tknown_aid = []\n\t\t\tfor x in artistsKnown:\n\t\t\t\tknown_aid.append(x.artist_id)\n\n\t\t\tres_list = []\n\t\t\tfor x in sample['iid'].values():\n\t\t\t\tif x in known_aid:\n\t\t\t\t\tprint(f'Disliked hit found on aid={x}')\n\t\t\t\telse:\n\t\t\t\t\tres_list.append(x)\n\n\t\t\tif len(res_list) > 2:\n\t\t\t\treturn JsonResponse({'results': res_list}, safe=False, status=status.HTTP_200_OK)\n\t\t\telse:\n\t\t\t\tres = get_top_artists_helper(user.user_id, user.recommendation_frame)\n\t\t\t\treturn JsonResponse({'results': res}, safe=False, status=status.HTTP_200_OK)\n\t\telse:\n\t\t\tres = get_top_artists_helper(user.user_id, user.recommendation_frame)\n\t\t\treturn JsonResponse({'results': res}, safe=False, status=status.HTTP_200_OK)\n\n\texcept User.DoesNotExist:\n\t\treturn JsonResponse({'error': 'User does not exist'}, safe=False,status=status.HTTP_404_NOT_FOUND)\n\n@api_view(['POST'])\ndef play_song(request):\n\tlog = []\n\tpayload = request.data\n\ttry:\n\t\tuid = payload['user_id']\n\t\ttid = payload['track_id']\n\t\tsong_obj = Songs.objects.get(track_id=tid)\n\t\tsong_obj.play_count = song_obj.play_count + 1\n\t\tsong_obj.save()\n\n\t\ttry:\n\t\t\tuser = User.objects.get(user_id = uid)\n\t\t\tif not like_artist_helper(user.user_id, song_obj.artist_id, True, True):\n\t\t\t\tlog.append('user was new but failed to like artist')\n\t\texcept User.DoesNotExist:\n\t\t\tlog.append('user entered did not exist')\n\t\texcept:\n\t\t\tlogging.exception('Unkown reason logging')\n\t\t\tlog.append('user like artist failed for unknown reason')\n\t\tserialized_song = SongsSerializer(song_obj)\n\t\treturn JsonResponse({'song_update': serialized_song.data, 'log_out': log}, safe=False, status=status.HTTP_202_ACCEPTED)\n\texcept:\n\t\treturn JsonResponse({'error': 'an error ocurred, could not update the song play count'}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n@api_view(['POST'])\ndef like_artist(request):\n\tpayload = request.data\n\ttry:\n\t\tuid = payload['user_id']\n\t\taid = payload['artist_id']\n\t\tvalid = like_artist_helper(uid, aid, False, True)\n\t\tif valid:\n\t\t\treturn JsonResponse({'msg': \"was able to update the user's preferences\"}, safe=False, status=status.HTTP_202_ACCEPTED)\n\t\telse:\n\t\t\treturn JsonResponse({'error': \"an error ocurred, could not update the user's preferences\"}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\texcept:\n\t\treturn JsonResponse({'error': \"an error ocurred, could not update the user's preferences\"}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n@api_view(['POST'])\ndef dislike_artist(request):\n\tpayload = request.data\n\ttry:\n\t\tuid = payload['user_id']\n\t\taid = payload['artist_id']\n\t\tvalid = like_artist_helper(uid, aid, False, False)\n\t\tif valid:\n\t\t\treturn JsonResponse({'msg': \"was able to update the user's preferences\"}, safe=False, status=status.HTTP_202_ACCEPTED)\n\t\telse:\n\t\t\treturn JsonResponse({'error': \"an error ocurred, could not update the user's preferences\"}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\texcept:\n\t\treturn JsonResponse({'error': \"an error ocurred, could not update the user's preferences\"}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n@api_view(['GET'])\ndef push_recommendation_window(request, user_id):\n\ttry:\n\t\tuser = User.objects.get(user_id = user_id)\n\t\tif user.recommendation_frame == 9:\n\t\t\tuser.recommendation_frame = 0\n\t\telse:\n\t\t\tuser.recommendation_frame = user.recommendation_frame+1\n\t\tuser.save()\n\t\treturn JsonResponse({'msg': 'Successfully updated recommendation window'}, safe=False,status=status.HTTP_200_OK)\n\texcept:\n\t\tlogging.exception('Error for push_recommendation_window')\n\t\treturn JsonResponse({'error': \"an error ocurred, could not update the user's recommendation window\"}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\ndef like_artist_helper(uid, aid, counts, liked):\n\ttry:\n\t\tal, created = ArtistLiked.objects.get_or_create(user_id=uid, artist_id=aid)\n\t\tif counts:\n\t\t\tal.play_count = al.play_count+1\n\t\tal.liked = liked\n\t\tal.save()\n\t\treturn True\n\texcept:\n\t\tlogging.exception('Error for like_artist_helper')\n\t\treturn False\n\ndef get_top_artists_helper(uid, recommendation_frame):\n\titems_per_artist = 3\n\tartists_to_poll = 5\n\texpected_items = 10\n\ttry: \n\t\tartistsLiked = ArtistLiked.objects.all().filter(user_id=uid, liked=True)\n\t\tartistsDisliked= ArtistLiked.objects.all().filter(user_id=uid, liked=False)\n\n\t\taid_list = []\n\t\tfor x in artistsLiked:\n\t\t\taid_list.append(x.artist_id)\n\t\t\n\t\tgeneral_aid_list = aid_list.copy()\n\t\tfor x in artistsDisliked:\n\t\t\tgeneral_aid_list.append(x.artist_id)\n\t\t\n\t\tif (len(aid_list) > 0):\n\t\t\trandom.seed(recommendation_frame)\n\t\t\trandom.shuffle(aid_list)\n\n\t\t\trecommended_aid_list = []\n\t\t\tdf_neighbors = pd.read_csv('./Export/webapp_neighbors_map.csv')\n\t\t\tfor i in range(0, min(len(aid_list), artists_to_poll)):\n\t\t\t\taid=aid_list[i]\n\t\t\t\treq = items_per_artist\n\t\t\t\tdf_filtered = df_neighbors[aid]\n\t\t\t\tvalid = df_filtered.loc[np.bitwise_not(np.bitwise_or(np.isin(df_filtered, general_aid_list), np.isin(df_filtered, recommended_aid_list)))]\n\t\t\t\tprint(f'valid length: {len(valid)}')\n\t\t\t\tfor x in valid:\n\t\t\t\t\treq -= 1\n\t\t\t\t\trecommended_aid_list.append(x)\n\t\t\t\t\tif req == 0:\n\t\t\t\t\t\tbreak\n\t\t\tprint(f'recommended_aid_list length: {len(recommended_aid_list)}')\n\t\t\trandom.shuffle(recommended_aid_list)\n\t\t\tfiltered_res = recommended_aid_list[0:min(len(recommended_aid_list), expected_items)]\n\t\t\treturn filtered_res\n\t\telse:\n\t\t\ttop_raw = ArtistLiked.objects.values('artist_id').annotate(play_sum=Sum('play_count')).order_by('-play_sum')\n\t\t\taid_result = []\n\t\t\tfor x in top_raw:\n\t\t\t\tif x['artist_id'] not in general_aid_list:\n\t\t\t\t\taid_result.append(x['artist_id'])\n\t\t\t\tif len(aid_result) > expected_items:\n\t\t\t\t\tbreak\n\t\t\treturn aid_result\n\texcept:\n\t\tlogging.exception('Error for get_top_artists_helper')\n\t\treturn []\n\n@api_view(['GET'])\ndef get_user_history(request, user_id):\n\tmax_length = 10000\n\ttry:\n\t\tuser_history = ArtistLiked.objects.all().filter(user_id=user_id)\n\t\tuser_history = user_history[0:min(max_length, len(user_history))]\n\t\thistory_data = ArtistLikedSerializer(user_history, many=True)\n\t\treturn JsonResponse({'history': history_data.data}, safe=False,status=status.HTTP_200_OK)\n\texcept:\n\t\tlogging.exception('Error for get_user_history')\n\t\treturn JsonResponse({'error': 'could not retrieve user history'}, safe=False,status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n@api_view(['GET'])\ndef get_top_artists(request):\n\ttry:\n\t\ttop100_raw = ArtistLiked.objects.values('artist_id').annotate(play_sum=Sum('play_count')).order_by('-play_sum')[0:100]\n\t\ttop_100 = ArtistPlayTotalSerializer(top100_raw, many=True)\n\t\treturn JsonResponse({'top': top_100.data}, safe=False,status=status.HTTP_200_OK)\n\texcept:\n\t\tlogging.exception('Error for get_top_artists')\n\t\treturn JsonResponse({'error': 'could not retrieve top 100 artists'}, safe=False,status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n@api_view(['GET'])\ndef get_artist_detail(request, artist_id):\n\ttry:\n\t\tsongs_raw = Songs.objects.all().filter(artist_id=artist_id).order_by('-play_count')\n\t\tsongs = SongsSerializer(songs_raw, many=True)\n\t\t\n\t\ttotal_play = 0\n\t\tfor x in songs_raw:\n\t\t\ttotal_play+= x.play_count\n\n\t\treturn JsonResponse({\n\t\t\t'artist_id': songs_raw[0].artist_id,\n\t\t\t'artist_name': songs_raw[0].artist_name,\n\t\t\t'total_play': total_play,\n\t\t\t'songs': songs.data[0:min(len(songs.data), 10)]\n\t\t\t}, safe=False, status=status.HTTP_200_OK)\n\texcept:\n\t\tlogging.exception('Error for get_track_detail')\n\t\treturn JsonResponse({'error': f'could not retrieve artist with id {artist_id}'}, safe=False,status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n@api_view(['GET'])\ndef get_track_detail(request, track_id):\n\ttry:\n\t\tsong_raw = Songs.objects.get(track_id=track_id)\n\t\tsong = SongsSerializer(song_raw)\n\t\treturn JsonResponse(song.data, safe=False, status=status.HTTP_200_OK)\n\texcept:\n\t\tlogging.exception('Error for get_track_detail')\n\t\treturn JsonResponse({'error': f'could not retrieve song with id {track_id}'}, safe=False,status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n@api_view(['GET'])\ndef get_artists_with_filter(request):\n\tquery_dict = request.GET.dict()\n\tartist_name_prefix = '' if 'artist_name_prefix' not in query_dict else query_dict['artist_name_prefix']\n\ttry:\n\t\tartists_query = None\n\t\tif artist_name_prefix == '':\n\t\t\tartists_query = Songs.objects.raw(f\"SELECT *, SUM(play_count) AS play_total FROM api_songs GROUP BY artist_id ORDER BY artist_name\")\n\t\telse:\n\t\t\tartists_query = Songs.objects.raw(f\"SELECT *, SUM(play_count) AS play_total FROM api_songs WHERE (artist_name LIKE '%{artist_name_prefix}%') GROUP BY artist_id ORDER BY artist_name\")\n\t\tartists_search = ArtistSearchSerializer(artists_query[0:min(len(artists_query), 100)], many=True)\n\t\treturn JsonResponse(artists_search.data, safe=False, status=status.HTTP_200_OK)\n\texcept:\n\t\tlogging.exception('Error for get_artists_with_filter')\n\t\treturn JsonResponse({'error': 'could not retreive songs due to internal errors'}, safe=False,status=status.HTTP_500_INTERNAL_SERVER_ERROR)","sub_path":"Taller1/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"593695095","text":" # -*- coding: utf-8 -*-\nfrom django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n\n\n url(r'^guia/crear/$', views.package, name='package'),\n url(r'^guia/modificar/(?P.*)/$', views.packageChange, name='packageChange'),\n url(r'^guia/buscar/$', views.packageSearch, name='packageSearch'),\n \n url(r'^guia/origen/$', views.packageIndex, name='packageIndex'),\n url(r'^guia/transito/$', views.packageIndex, {'traveling': True}, name='packageTraveling'),\n url(r'^guia/destino/$', views.packageIndex, {'finish': True}, name='packageFinish'),\n url(r'^guia/entregado/$', views.packageIndex, {'delivered': True}, name='packageDelivered'),\n url(r'^guia/direccion/$', views.packageIndex, {'reciever': True}, name='packageReciever'),\n url(r'^guia/retirar/$', views.packageIndex, {'transmitter': True}, name='packageTransmitter'),\n url(r'^guia/estado/$', views.packageState, name='packageState'),\n url(r'^guia/tarifado/$', views.packageRate, name='packageRate'),\n url(r'^guia/formadepago/$', views.packagePay, name='packagePay'),\n\n url(r'^guia/pdf/(?P.*)/$', views.packagePdf, name='packagePdf'),\n\n url(r'^guia/flete/$', views.packageFreight, name='packageFreight'),\n url(r'^guia/(?P.*)/$', views.packageProfile, name='packageProfile'),\n\n url(r'^recogida/crear/$', views.pickup, name='pickup'), \n url(r'^recogida/esperando/$', views.pickupWaiting, name='pickupWaiting'),\n url(r'^recogida/listos/$', views.pickupReady , name='pickupReady'),\n url(r'^recogida/guia/$', views.pickupPackage, name='pickupPackage'),\n url(r'^recogida/(?P.*)/$', views.pickupProfile, name='pickupProfile'),\n\n]\n\n","sub_path":"packages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"636154768","text":"import models.ssd_resnet_50 as ssd_resnet_50\nimport tensorflow as tf\n\ndef test_multibox_predict(): # Test passed.\n input_layer = tf.zeros([1, 10, 10, 256], dtype=tf.float32)\n class_num = 50\n layer_name = 'resnet_v2_50/block3'\n weight_decay = 0.9\n\n cls, pos = ssd_resnet_50.multibox_predict(input_layer, class_num, layer_name, weight_decay)\n\n init = tf.global_variables_initializer()\n with tf.Session() as ss:\n ss.run(init)\n\n clsv, posv = ss.run([cls, pos])\n\n print(clsv.shape)\n print(posv.shape)\n\ndef test_predict(): # Test passed.\n image = tf.zeros([1, 300, 300, 3], dtype=tf.float32)\n class_num = 50\n weight_decay = 0.9\n\n ssd = ssd_resnet_50.init(class_num, weight_decay, False)\n logits, locations, end_feats = ssd(image)\n\n init = tf.global_variables_initializer()\n with tf.Session() as ss:\n ss.run(init)\n\n out = ss.run(locations)\n print(out.shape)\n\n\nif __name__ == '__main__':\n # test_multibox_predict()\n test_predict()\n","sub_path":"utest/test_ssd_resnet_50.py","file_name":"test_ssd_resnet_50.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"526742152","text":"# coding: utf-8\nfrom __future__ import print_function\nimport tensorflow as tf\nfrom nets import nets_factory\nfrom preprocessing import preprocessing_factory\nimport reader\nimport model\nimport time\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\ngpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)\n\ntf.app.flags.DEFINE_string('loss_model', 'vgg_16', 'The name of the architecture to evaluate. '\n 'You can view all the support models in nets/nets_factory.py')\ntf.app.flags.DEFINE_integer('image_size', 256, 'Image size to train.')\n#tf.app.flags.DEFINE_string(\"model_file\", \"models.ckpt\", \"\")\ntf.app.flags.DEFINE_string(\"image_file\", \"a.jpg\", \"\")\n\nFLAGS = tf.app.flags.FLAGS\n#demo_list = ['0','11','12','15','17','18','2','5','6','7']\n#demo_list = ['0_1','11_1','12_1','15_1','16_1','17_1','18_1','2_1','5_1','6_1','7_1',\n# '0_2','11_2','12_2','15_2','16_2','17_2','18_2','2_2','5_2','6_2','7_2','11_5','11_6','11_7']\n#demo_list = ['cartoon', 'guohua', 'ice_mountain', 'moon_night', 'sand_painting',\n# 'sunset', 'wenli_06', 'shuimo']\ndemo_list = ['date_0308/sand_painting']\n\ndef main(_):\n height = 0\n width = 0\n for head in demo_list:\n model_file = 'models/%s/fast-style-model.ckpt-2000'%head\n with open(FLAGS.image_file, 'rb') as img:\n with tf.Session().as_default() as sess:\n if FLAGS.image_file.lower().endswith('png'):\n image = sess.run(tf.image.decode_png(img.read()))\n else:\n image = sess.run(tf.image.decode_jpeg(img.read()))\n height = image.shape[0]\n width = image.shape[1]\n tf.logging.info('Image size: %dx%d' % (width, height))\n\n with tf.Graph().as_default():\n with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)).as_default() as sess:\n image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(\n FLAGS.loss_model,\n is_training=False)\n image = reader.get_image(FLAGS.image_file, height, width, image_preprocessing_fn)\n image = tf.expand_dims(image, 0)\n generated = model.net(image, training=False)\n generated = tf.squeeze(generated, [0])\n saver = tf.train.Saver(tf.all_variables())\n sess.run([tf.initialize_all_variables(), tf.initialize_local_variables()])\n #name = FLAGS.model_file.split('/')[1]\n model_file = os.path.abspath(model_file)\n saver.restore(sess, model_file)\n\n start_time = time.time()\n generated = sess.run(generated)\n generated = tf.cast(generated, tf.uint8)\n end_time = time.time()\n tf.logging.info('Elapsed time: %fs' % (end_time - start_time))\n generated_file = 'generated/result_0338/%s.jpg'%head\n #generated_file = 'generated/%s.jpg'%head\n if os.path.exists('generated') is False:\n os.makedirs('generated')\n with open(generated_file, 'wb') as img:\n img.write(sess.run(tf.image.encode_jpeg(generated)))\n tf.logging.info('Done. Please check %s.' % generated_file)\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"201626806","text":"from __future__ import unicode_literals\n\nimport copy\nimport datetime\n\nimport boto.redshift\nfrom moto.compat import OrderedDict\nfrom moto.core import BaseBackend, BaseModel\nfrom moto.core.utils import iso_8601_datetime_with_milliseconds\nfrom moto.ec2 import ec2_backends\nfrom .exceptions import (\n ClusterNotFoundError,\n ClusterParameterGroupNotFoundError,\n ClusterSecurityGroupNotFoundError,\n ClusterSnapshotAlreadyExistsError,\n ClusterSnapshotNotFoundError,\n ClusterSubnetGroupNotFoundError,\n InvalidSubnetError,\n)\n\n\nclass Cluster(BaseModel):\n\n def __init__(self, redshift_backend, cluster_identifier, node_type, master_username,\n master_user_password, db_name, cluster_type, cluster_security_groups,\n vpc_security_group_ids, cluster_subnet_group_name, availability_zone,\n preferred_maintenance_window, cluster_parameter_group_name,\n automated_snapshot_retention_period, port, cluster_version,\n allow_version_upgrade, number_of_nodes, publicly_accessible,\n encrypted, region):\n self.redshift_backend = redshift_backend\n self.cluster_identifier = cluster_identifier\n self.status = 'available'\n self.node_type = node_type\n self.master_username = master_username\n self.master_user_password = master_user_password\n self.db_name = db_name if db_name else \"dev\"\n self.vpc_security_group_ids = vpc_security_group_ids\n self.cluster_subnet_group_name = cluster_subnet_group_name\n self.publicly_accessible = publicly_accessible\n self.encrypted = encrypted\n\n self.allow_version_upgrade = allow_version_upgrade if allow_version_upgrade is not None else True\n self.cluster_version = cluster_version if cluster_version else \"1.0\"\n self.port = int(port) if port else 5439\n self.automated_snapshot_retention_period = int(\n automated_snapshot_retention_period) if automated_snapshot_retention_period else 1\n self.preferred_maintenance_window = preferred_maintenance_window if preferred_maintenance_window else \"Mon:03:00-Mon:03:30\"\n\n if cluster_parameter_group_name:\n self.cluster_parameter_group_name = [cluster_parameter_group_name]\n else:\n self.cluster_parameter_group_name = ['default.redshift-1.0']\n\n if cluster_security_groups:\n self.cluster_security_groups = cluster_security_groups\n else:\n self.cluster_security_groups = [\"Default\"]\n\n self.region = region\n if availability_zone:\n self.availability_zone = availability_zone\n else:\n # This could probably be smarter, but there doesn't appear to be a\n # way to pull AZs for a region in boto\n self.availability_zone = region + \"a\"\n\n if cluster_type == 'single-node':\n self.number_of_nodes = 1\n elif number_of_nodes:\n self.number_of_nodes = int(number_of_nodes)\n else:\n self.number_of_nodes = 1\n\n @classmethod\n def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n redshift_backend = redshift_backends[region_name]\n properties = cloudformation_json['Properties']\n\n if 'ClusterSubnetGroupName' in properties:\n subnet_group_name = properties[\n 'ClusterSubnetGroupName'].cluster_subnet_group_name\n else:\n subnet_group_name = None\n cluster = redshift_backend.create_cluster(\n cluster_identifier=resource_name,\n node_type=properties.get('NodeType'),\n master_username=properties.get('MasterUsername'),\n master_user_password=properties.get('MasterUserPassword'),\n db_name=properties.get('DBName'),\n cluster_type=properties.get('ClusterType'),\n cluster_security_groups=properties.get(\n 'ClusterSecurityGroups', []),\n vpc_security_group_ids=properties.get('VpcSecurityGroupIds', []),\n cluster_subnet_group_name=subnet_group_name,\n availability_zone=properties.get('AvailabilityZone'),\n preferred_maintenance_window=properties.get(\n 'PreferredMaintenanceWindow'),\n cluster_parameter_group_name=properties.get(\n 'ClusterParameterGroupName'),\n automated_snapshot_retention_period=properties.get(\n 'AutomatedSnapshotRetentionPeriod'),\n port=properties.get('Port'),\n cluster_version=properties.get('ClusterVersion'),\n allow_version_upgrade=properties.get('AllowVersionUpgrade'),\n number_of_nodes=properties.get('NumberOfNodes'),\n publicly_accessible=properties.get(\"PubliclyAccessible\"),\n encrypted=properties.get(\"Encrypted\"),\n region=region_name,\n )\n return cluster\n\n def get_cfn_attribute(self, attribute_name):\n from moto.cloudformation.exceptions import UnformattedGetAttTemplateException\n if attribute_name == 'Endpoint.Address':\n return self.endpoint\n elif attribute_name == 'Endpoint.Port':\n return self.port\n raise UnformattedGetAttTemplateException()\n\n @property\n def endpoint(self):\n return \"{0}.cg034hpkmmjt.{1}.redshift.amazonaws.com\".format(\n self.cluster_identifier,\n self.region,\n )\n\n @property\n def security_groups(self):\n return [\n security_group for security_group\n in self.redshift_backend.describe_cluster_security_groups()\n if security_group.cluster_security_group_name in self.cluster_security_groups\n ]\n\n @property\n def vpc_security_groups(self):\n return [\n security_group for security_group\n in self.redshift_backend.ec2_backend.describe_security_groups()\n if security_group.id in self.vpc_security_group_ids\n ]\n\n @property\n def parameter_groups(self):\n return [\n parameter_group for parameter_group\n in self.redshift_backend.describe_cluster_parameter_groups()\n if parameter_group.cluster_parameter_group_name in self.cluster_parameter_group_name\n ]\n\n def to_json(self):\n return {\n \"MasterUsername\": self.master_username,\n \"MasterUserPassword\": \"****\",\n \"ClusterVersion\": self.cluster_version,\n \"VpcSecurityGroups\": [{\n \"Status\": \"active\",\n \"VpcSecurityGroupId\": group.id\n } for group in self.vpc_security_groups],\n \"ClusterSubnetGroupName\": self.cluster_subnet_group_name,\n \"AvailabilityZone\": self.availability_zone,\n \"ClusterStatus\": self.status,\n \"NumberOfNodes\": self.number_of_nodes,\n \"AutomatedSnapshotRetentionPeriod\": self.automated_snapshot_retention_period,\n \"PubliclyAccessible\": self.publicly_accessible,\n \"Encrypted\": self.encrypted,\n \"DBName\": self.db_name,\n \"PreferredMaintenanceWindow\": self.preferred_maintenance_window,\n \"ClusterParameterGroups\": [{\n \"ParameterApplyStatus\": \"in-sync\",\n \"ParameterGroupName\": group.cluster_parameter_group_name,\n } for group in self.parameter_groups],\n \"ClusterSecurityGroups\": [{\n \"Status\": \"active\",\n \"ClusterSecurityGroupName\": group.cluster_security_group_name,\n } for group in self.security_groups],\n \"Port\": self.port,\n \"NodeType\": self.node_type,\n \"ClusterIdentifier\": self.cluster_identifier,\n \"AllowVersionUpgrade\": self.allow_version_upgrade,\n \"Endpoint\": {\n \"Address\": '{}.{}.redshift.amazonaws.com'.format(\n self.cluster_identifier,\n self.region),\n \"Port\": self.port\n },\n \"PendingModifiedValues\": []\n }\n\n\nclass SubnetGroup(BaseModel):\n\n def __init__(self, ec2_backend, cluster_subnet_group_name, description, subnet_ids):\n self.ec2_backend = ec2_backend\n self.cluster_subnet_group_name = cluster_subnet_group_name\n self.description = description\n self.subnet_ids = subnet_ids\n if not self.subnets:\n raise InvalidSubnetError(subnet_ids)\n\n @classmethod\n def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n redshift_backend = redshift_backends[region_name]\n properties = cloudformation_json['Properties']\n\n subnet_group = redshift_backend.create_cluster_subnet_group(\n cluster_subnet_group_name=resource_name,\n description=properties.get(\"Description\"),\n subnet_ids=properties.get(\"SubnetIds\", []),\n )\n return subnet_group\n\n @property\n def subnets(self):\n return self.ec2_backend.get_all_subnets(filters={'subnet-id': self.subnet_ids})\n\n @property\n def vpc_id(self):\n return self.subnets[0].vpc_id\n\n def to_json(self):\n return {\n \"VpcId\": self.vpc_id,\n \"Description\": self.description,\n \"ClusterSubnetGroupName\": self.cluster_subnet_group_name,\n \"SubnetGroupStatus\": \"Complete\",\n \"Subnets\": [{\n \"SubnetStatus\": \"Active\",\n \"SubnetIdentifier\": subnet.id,\n \"SubnetAvailabilityZone\": {\n \"Name\": subnet.availability_zone\n },\n } for subnet in self.subnets],\n }\n\n\nclass SecurityGroup(BaseModel):\n\n def __init__(self, cluster_security_group_name, description):\n self.cluster_security_group_name = cluster_security_group_name\n self.description = description\n\n def to_json(self):\n return {\n \"EC2SecurityGroups\": [],\n \"IPRanges\": [],\n \"Description\": self.description,\n \"ClusterSecurityGroupName\": self.cluster_security_group_name,\n }\n\n\nclass ParameterGroup(BaseModel):\n\n def __init__(self, cluster_parameter_group_name, group_family, description):\n self.cluster_parameter_group_name = cluster_parameter_group_name\n self.group_family = group_family\n self.description = description\n\n @classmethod\n def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):\n redshift_backend = redshift_backends[region_name]\n properties = cloudformation_json['Properties']\n\n parameter_group = redshift_backend.create_cluster_parameter_group(\n cluster_parameter_group_name=resource_name,\n description=properties.get(\"Description\"),\n group_family=properties.get(\"ParameterGroupFamily\"),\n )\n return parameter_group\n\n def to_json(self):\n return {\n \"ParameterGroupFamily\": self.group_family,\n \"Description\": self.description,\n \"ParameterGroupName\": self.cluster_parameter_group_name,\n }\n\n\nclass Snapshot(BaseModel):\n\n def __init__(self, cluster, snapshot_identifier, tags=None):\n self.cluster = copy.copy(cluster)\n self.snapshot_identifier = snapshot_identifier\n self.snapshot_type = 'manual'\n self.status = 'available'\n self.tags = tags or []\n self.create_time = iso_8601_datetime_with_milliseconds(\n datetime.datetime.now())\n\n @property\n def arn(self):\n return \"arn:aws:redshift:{0}:1234567890:snapshot:{1}/{2}\".format(\n self.cluster.region,\n self.cluster.cluster_identifier,\n self.snapshot_identifier)\n\n def to_json(self):\n return {\n 'SnapshotIdentifier': self.snapshot_identifier,\n 'ClusterIdentifier': self.cluster.cluster_identifier,\n 'SnapshotCreateTime': self.create_time,\n 'Status': self.status,\n 'Port': self.cluster.port,\n 'AvailabilityZone': self.cluster.availability_zone,\n 'MasterUsername': self.cluster.master_username,\n 'ClusterVersion': self.cluster.cluster_version,\n 'SnapshotType': self.snapshot_type,\n 'NodeType': self.cluster.node_type,\n 'NumberOfNodes': self.cluster.number_of_nodes,\n 'DBName': self.cluster.db_name,\n 'Tags': self.tags\n }\n\n\nclass RedshiftBackend(BaseBackend):\n\n def __init__(self, ec2_backend):\n self.clusters = {}\n self.subnet_groups = {}\n self.security_groups = {\n \"Default\": SecurityGroup(\"Default\", \"Default Redshift Security Group\")\n }\n self.parameter_groups = {\n \"default.redshift-1.0\": ParameterGroup(\n \"default.redshift-1.0\",\n \"redshift-1.0\",\n \"Default Redshift parameter group\",\n )\n }\n self.ec2_backend = ec2_backend\n self.snapshots = OrderedDict()\n\n def reset(self):\n ec2_backend = self.ec2_backend\n self.__dict__ = {}\n self.__init__(ec2_backend)\n\n def create_cluster(self, **cluster_kwargs):\n cluster_identifier = cluster_kwargs['cluster_identifier']\n cluster = Cluster(self, **cluster_kwargs)\n self.clusters[cluster_identifier] = cluster\n return cluster\n\n def describe_clusters(self, cluster_identifier=None):\n clusters = self.clusters.values()\n if cluster_identifier:\n if cluster_identifier in self.clusters:\n return [self.clusters[cluster_identifier]]\n else:\n raise ClusterNotFoundError(cluster_identifier)\n return clusters\n\n def modify_cluster(self, **cluster_kwargs):\n cluster_identifier = cluster_kwargs.pop('cluster_identifier')\n new_cluster_identifier = cluster_kwargs.pop(\n 'new_cluster_identifier', None)\n\n cluster = self.describe_clusters(cluster_identifier)[0]\n\n for key, value in cluster_kwargs.items():\n setattr(cluster, key, value)\n\n if new_cluster_identifier:\n self.delete_cluster(cluster_identifier)\n cluster.cluster_identifier = new_cluster_identifier\n self.clusters[new_cluster_identifier] = cluster\n\n return cluster\n\n def delete_cluster(self, cluster_identifier):\n if cluster_identifier in self.clusters:\n return self.clusters.pop(cluster_identifier)\n raise ClusterNotFoundError(cluster_identifier)\n\n def create_cluster_subnet_group(self, cluster_subnet_group_name, description, subnet_ids):\n subnet_group = SubnetGroup(\n self.ec2_backend, cluster_subnet_group_name, description, subnet_ids)\n self.subnet_groups[cluster_subnet_group_name] = subnet_group\n return subnet_group\n\n def describe_cluster_subnet_groups(self, subnet_identifier=None):\n subnet_groups = self.subnet_groups.values()\n if subnet_identifier:\n if subnet_identifier in self.subnet_groups:\n return [self.subnet_groups[subnet_identifier]]\n else:\n raise ClusterSubnetGroupNotFoundError(subnet_identifier)\n return subnet_groups\n\n def delete_cluster_subnet_group(self, subnet_identifier):\n if subnet_identifier in self.subnet_groups:\n return self.subnet_groups.pop(subnet_identifier)\n raise ClusterSubnetGroupNotFoundError(subnet_identifier)\n\n def create_cluster_security_group(self, cluster_security_group_name, description):\n security_group = SecurityGroup(\n cluster_security_group_name, description)\n self.security_groups[cluster_security_group_name] = security_group\n return security_group\n\n def describe_cluster_security_groups(self, security_group_name=None):\n security_groups = self.security_groups.values()\n if security_group_name:\n if security_group_name in self.security_groups:\n return [self.security_groups[security_group_name]]\n else:\n raise ClusterSecurityGroupNotFoundError(security_group_name)\n return security_groups\n\n def delete_cluster_security_group(self, security_group_identifier):\n if security_group_identifier in self.security_groups:\n return self.security_groups.pop(security_group_identifier)\n raise ClusterSecurityGroupNotFoundError(security_group_identifier)\n\n def create_cluster_parameter_group(self, cluster_parameter_group_name,\n group_family, description):\n parameter_group = ParameterGroup(\n cluster_parameter_group_name, group_family, description)\n self.parameter_groups[cluster_parameter_group_name] = parameter_group\n\n return parameter_group\n\n def describe_cluster_parameter_groups(self, parameter_group_name=None):\n parameter_groups = self.parameter_groups.values()\n if parameter_group_name:\n if parameter_group_name in self.parameter_groups:\n return [self.parameter_groups[parameter_group_name]]\n else:\n raise ClusterParameterGroupNotFoundError(parameter_group_name)\n return parameter_groups\n\n def delete_cluster_parameter_group(self, parameter_group_name):\n if parameter_group_name in self.parameter_groups:\n return self.parameter_groups.pop(parameter_group_name)\n raise ClusterParameterGroupNotFoundError(parameter_group_name)\n\n def create_snapshot(self, cluster_identifier, snapshot_identifier, tags):\n cluster = self.clusters.get(cluster_identifier)\n if not cluster:\n raise ClusterNotFoundError(cluster_identifier)\n if self.snapshots.get(snapshot_identifier) is not None:\n raise ClusterSnapshotAlreadyExistsError(snapshot_identifier)\n snapshot = Snapshot(cluster, snapshot_identifier, tags)\n self.snapshots[snapshot_identifier] = snapshot\n return snapshot\n\n def describe_snapshots(self, cluster_identifier, snapshot_identifier):\n if cluster_identifier:\n for snapshot in self.snapshots.values():\n if snapshot.cluster.cluster_identifier == cluster_identifier:\n return [snapshot]\n raise ClusterNotFoundError(cluster_identifier)\n\n if snapshot_identifier:\n if snapshot_identifier in self.snapshots:\n return [self.snapshots[snapshot_identifier]]\n raise ClusterSnapshotNotFoundError(snapshot_identifier)\n\n return self.snapshots.values()\n\n def delete_snapshot(self, snapshot_identifier):\n if snapshot_identifier not in self.snapshots:\n raise ClusterSnapshotNotFoundError(snapshot_identifier)\n\n deleted_snapshot = self.snapshots.pop(snapshot_identifier)\n deleted_snapshot.status = 'deleted'\n return deleted_snapshot\n\n def describe_tags_for_resource_type(self, resource_type):\n tagged_resources = []\n if resource_type == 'Snapshot':\n for snapshot in self.snapshots.values():\n for tag in snapshot.tags:\n data = {\n 'ResourceName': snapshot.arn,\n 'ResourceType': 'snapshot',\n 'Tag': {\n 'Key': tag['Key'],\n 'Value': tag['Value']\n }\n }\n tagged_resources.append(data)\n return tagged_resources\n\n\nredshift_backends = {}\nfor region in boto.redshift.regions():\n redshift_backends[region.name] = RedshiftBackend(ec2_backends[region.name])\n","sub_path":"moto/redshift/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":19719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"60891748","text":"\nimport sys\nfrom queue import Queue\nfrom threading import Event, Thread\nimport better_exchook\nimport Persistence\nimport Logging\n\nkNumWorkers = 5\nkMinQueuedActions = kNumWorkers # fill workerQueue always up to N elements, via the watcher thread\n\nif not \"mainLoopQueue\" in vars():\n\tmainLoopQueue = Queue()\nif not \"exitEvent\" in vars():\n\texitEvent = Event()\n\n\ndef setup():\n\timport Action\n\tif not \"workerQueue\" in globals():\n\t\tglobal workerQueue\n\t\tworkerQueue = Persistence.load(\"workerQueue.db\", Queue, env=vars(Action))\n\tif not \"currentWork\" in globals():\n\t\tglobal currentWork\n\t\tcurrentWork = Persistence.load(\"currentWork.db\", set, env=vars(Action))\n\n\t_initWatcherThread()\n\t_initWorkerThreads()\n\n\ndef queueWork(func):\n\tworkerQueue.put(func)\n\tworkerQueue.save()\n\n\ndef mainLoop():\n\twhile True:\n\t\tfunc = mainLoopQueue.get()\n\t\tfunc()\n\ndef workerLoop():\n\tbetter_exchook.install()\n\twhile True:\n\t\tfunc = workerQueue.get()\n\t\tLogging.log(\"Next work item: %s\" % func)\n\t\tcurrentWork.add(func)\n\t\tcurrentWork.save()\n\t\ttry:\n\t\t\tfunc()\n\t\texcept KeyboardInterrupt:\n\t\t\treturn\n\t\texcept Exception:\n\t\t\tLogging.logException(\"Worker\", *sys.exc_info())\n\t\tfinally:\n\t\t\ttry:\n\t\t\t\tcurrentWork.remove(func)\n\t\t\texcept Exception as e:\n\t\t\t\tLogging.log(\"Error: Dont understand: %s, %r not in %r\" % (e, func, currentWork))\n\ndef watcherLoop():\n\tbetter_exchook.install()\n\twhile not exitEvent.isSet():\n\t\tif workerQueue.qsize() >= kMinQueuedActions:\n\t\t\texitEvent.wait(1)\n\t\t\tcontinue\n\n\t\timport Action\n\t\tfunc = Action.getNewAction()\n\t\tworkerQueue.put(func)\n\nif \"workers\" not in vars():\n\tworkers = []\nif \"watcher\" not in vars():\n\twatcher = None\n\ndef _initWorkerThreads():\n\tif len(workers) >= kNumWorkers: return\n\tassert not workers # needs fixing otherwise\n\t# Move all of the queued entries to the set to eliminate duplicates.\n\twhile not workerQueue.empty():\n\t\tcurrentWork.add(workerQueue.get())\n\t# Now back to the queue.\n\tfor func in currentWork:\n\t\tqueueWork(func)\n\t# And cleanup.\n\tcurrentWork.clear()\n\tcurrentWork.save()\n\t# Now init the threads.\n\tfor i in range(kNumWorkers - len(workers)):\n\t\tthread = Thread(target=workerLoop, name=\"Worker %i/%i\" % (i + 1, kNumWorkers))\n\t\tworkers.append(thread)\n\t\tthread.daemon = True\n\t\tthread.start()\n\ndef _initWatcherThread():\n\tglobal watcher\n\tif watcher: return\n\twatcher = Thread(target=watcherLoop, name=\"Watcher\")\n\twatcher.daemon = True\n\twatcher.start()\n\n\n","sub_path":"TaskSystem.py","file_name":"TaskSystem.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"568916865","text":"from random import randint\n\nuser_numbers = []\nlucky_numbers = []\n\nprint('- 1~10까지의 숫자를 5개 입력하십시오. ')\n\n#추첨 엔트리용 숫자를 고른다.\nwhile 0 <= len(user_numbers) < 5:\n input_numbers = input('> ')\n\n try:\n a = int(input_numbers)\n except:\n print(\"무효한 값입니다. 다시 입력하십시오. \")\n continue\n\n if 0 > a or a > 10:\n print(\"1~10까지의 숫자를 입력하십시오.\")\n continue\n elif a in user_numbers:\n print(user_numbers, '이외의 숫자를 입력하십시오. ')\n continue\n user_numbers.append(a)\nprint(\"당신이 고른 숫자는 \", user_numbers, \"입니다.\\n\")\n\n#당첨번호 고르기\nprint('추첨을 시작합니다. ')\n\nwhile 0 <= len(lucky_numbers) < 5:\n b = randint(1, 10)\n if b not in lucky_numbers:\n lucky_numbers.append(b)\n else: #추첨한 숫자의 중복을 피한다.\n continue\nprint(lucky_numbers, \"\\n\")\n\n#추첨 엔드리용 버호와 당첨 번호를 비교한다.\nuserset = set(user_numbers)\nluckyset = set(lucky_numbers)\nwinset = userset.intersection(luckyset)\nprint('당첨된 숫자는', winset)\nprint('당첨된 개수는', len(winset), '개입니다. ')","sub_path":"homework/3-0/3-3_lottery.py","file_name":"3-3_lottery.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"292126772","text":"class VarInt(object):\n @classmethod\n def _zigzag_encode(cls, num):\n retval = num * 2 if num >= 0 else -2 * num - 1\n return int(retval)\n\n @classmethod\n def _zigzag_decode(cls, num):\n retval = - (num + 1) / 2 if num % 2 else num / 2\n return int(retval)\n\n @classmethod\n def int_to_var(cls, num):\n num = cls._zigzag_encode(num)\n print(num)\n result_dict = {}\n result = i = 0\n while num >> 7:\n # 尾巴部分\n tail = num & 0x7f\n if (i > 0):\n tail = tail ^ 0x80\n # 除去尾巴部分\n temp = num >> 7\n result_dict[i] = tail\n i = i + 1\n num = temp\n if(i>0):\n result_dict[i] = num ^ 0x80\n\n for key in result_dict:\n result = result + (result_dict[key] << int(key) * 8)\n return result\n\n @classmethod\n def var_to_int(cls, num):\n result_dict = {}\n i = 0\n result = 0\n # 除尾巴的7位 前面还有\n while num >> 8:\n # 尾巴部分\n tail = num & 0x7f\n result_dict[i] = tail\n # 除去尾巴部分\n temp = num >> 8\n i = i + 1\n num = temp\n\n if(i>0):\n result_dict[i] = num & 0x7f\n\n for key in result_dict:\n print(str(key) + \":\" + str(result_dict[key]))\n result = result + (result_dict[key] << int(key) * 7)\n return cls._zigzag_decode(int(result))\n\n\n\ndef two2ten(n):\n sum = 0;\n for i in range(n):\n sum += 2**i\n print (sum)\n\nif __name__ ==\"__main__\":\n # print(VarInt.int_to_var(10))\n # print (VarInt.int_to_var(127))\n print(VarInt.var_to_int(33150))\n\n\n","sub_path":"varint_demo.py","file_name":"varint_demo.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"249273705","text":"##\n# This demo is a client that connect to Unity Simulator and\n# (1) creates a camera you can you can use.\n# (2) registers the camera in the simulator and previews it\n# (3) changes the FPS and previews\n# (4) changes the JPEG quality and previews \n# (5) deletes the camera and disconnects from simulator\n##\nimport sys\nfrom functions.MSScam import *\nfrom functions.MSSclient import *\nfrom utils.MSSutils import*\nfrom utils.PropertyFileReader import *\nimport socket\nfrom random import randint\n\nport = 8889\n# port=-1\nsock = -1\nipAddress = \"150.244.57.171\"\ncfgpath=\"./config.ini\"\n\n# parse command line arguments (if any)\nif len(sys.argv) > 2:\n\tport = sys.argv[1]\n\tipAddress = sys.argv[2]\n\tcfgpath = sys.argv[3]\n\n# display demo information in console\nprint(\"DEMO3: SAVE & LOAD A CAMERA\\n\")\nprint(\"This demo test basic functionality:\\n\")\nprint(\"(1) creates a camera you can you can use\\n\")\nprint(\"(2) registers the camera in the simulator and previews it\\n\")\nprint(\"(3) changes the FPS and previews\\n\")\nprint(\"(4) changes the JPEG quality and previews\\n\")\nprint(\"(5) deletes the camera and disconnects from simulator\\n\")\n\n# read application settings\nprint(\"\\nReading sever configuration (IP & port) from config file %s\" % cfgpath)\nfilereader = PropertyFileReader(cfgpath, False)\n(found, value) = filereader.getProperty(\"SIMULATOR_IP\")\nif found:\n\tipAddress = str(value)\n(found, value) = filereader.getProperty(\"SIMULATOR_PORT\")\nif found:\n\tport=int(value)\n\n# initialize client\ncli = MSSclient()\ncli.connectTosimulator(ipAddress, port, sock)\n\n# create the camera\n# cam = MSScam(\"demo3_test\", 640, 480, 10, 15.0, -3.0, 5.0, 20.0, 10.0, 0.0) # EPS LITE\ncam = MSScam(\"demo3_test\", 640, 480, 10, -108.5, 20.0, -31.5, 15.0, 45.0, 0.0)\n\n# add the camera to the simulator\ncam.addTosimulator(cli.sock, ipAddress, port)\n\n# preview the camera for 10 secs\ncam.preview(10) # time in seconds\n\n# preview the camera for 10 secs\ncam.setFPS(1)\ncam.preview(10) # time in seconds\n\n# set PNG images (lossless codec) and preview camera for 10 secs\ncam.setTXRXformat(CAM_PNG) # 0 = jpg, 1 = png,\ncam.preview(10) # time in seconds\n\n# set JPEG quality to low and preview camera for 10 secs\ncam.setTXRXformat(CAM_JPEG) # 0 = jpg, 1 = png,\ncam.setJPEGquality(25)\ncam.preview(10) # time in seconds\n\n# set JPEG quality to low and preview camera for 10 secs\ncam.setTXRXformat(CAM_JPEG) # 0 = jpg, 1 = png,\ncam.setJPEGquality(95)\ncam.preview(10) # time in seconds\n\n# wait 1 sec before closing this demo\ntime.sleep(1) # time in seconds\n\n# unregistering camera\ncam.removeFromSimulator()\n\n# disconnect from the simulator\ncli.disconnectFromSimulator(cli.sock)","sub_path":"testbasic3.py","file_name":"testbasic3.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"605851167","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\nimport pytest\n\nfrom pathlib import Path\nfrom jsonschema import Draft4Validator\n\n\n@pytest.fixture\ndef schema() -> Path:\n path = Path('click391', 'schema')\n file = path.with_suffix('.json')\n return file\n\n\ndef test_schema_against_draft_v4(schema: Path):\n assert schema.exists()\n text = schema.read_text()\n data = json.loads(text)\n Draft4Validator.check_schema(data)\n","sub_path":"tests/unit/test_schema.py","file_name":"test_schema.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"203844529","text":"# coding=utf-8\n\nclass Lamp:\n\n def __init__(self):\n self.prendido = False\n\n def prender(self):\n self.prendido = True\n self._display_image()\n \n def apagar(self):\n self.prendido = False\n self._display_image()\n\n def _display_image(self):\n if self.prendido:\n print(\"Lampara prendida\")\n else:\n print(\"Lampara apagada\")\n\ndef run():\n lamp = Lamp()\n\n while True:\n command = str(input('''\n [p] Prender\n [a] Apagar \n [s] Salir\n '''))\n\n if command == 'p' or command == 'P':\n lamp.prender()\n \n elif command == 'a' or command == 'A':\n lamp.apagar()\n\n else:\n break\n\nif __name__ == '__main__':\n run()","sub_path":"objetos.py","file_name":"objetos.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"288425154","text":"#!/usr/bin/env python3\n#\n# Internal Reporting Tool.\n#\n# This tool queries the \"news\" postgre database to answer the following:\n# 1. What are the most popular three articles of all time?\n# 2. Who are the most popular article authors of all time?\n# 3. On which days did more than 1% of requests lead to errors?\n\nimport psycopg2\nimport bleach\n\n# each views table name, formatted result string, and question answered\nviews = [\n (0, 'popular_articles',\n '''\\n\"{0}\" - {1} views''',\n '\\n\\nWhat are the most popular three articles of all time?'),\n (1, 'popular_authors',\n '''\\n{0} - {1} views''',\n '\\n\\nWho are the most popular article authors of all time?'),\n (2, 'error_one_percent',\n '''\\n{0} - {1:.1f}% errors''',\n '\\n\\nOn which days did more than 1% of requests lead to errors?')\n]\n\n\ndef get_report(view):\n \"\"\"Returns db query results for 'view' from 'news' db\"\"\"\n news = psycopg2.connect(\"dbname=news\")\n cursor = news.cursor()\n # cant do this for sql injects: cursor.execute('select * from %s', (view,))\n # use integers inputs to method only\n cursor.execute('select * from %s' % views[view][1])\n rows = cursor.fetchall()\n news.close()\n return rows\n\nif __name__ == '__main__':\n # display reports\n for view in views:\n results = get_report(view[0])\n print(view[3])\n print(\"\".join(view[2].format(val, count) for val, count in results))\n\n # finish\n print(\"\\n\\n\")\n","sub_path":"reporting_tool.py","file_name":"reporting_tool.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"491477852","text":"#\n# gemini_python\n#\n# primitives_preprocess.py\n# ------------------------------------------------------------------------------\nimport math\nimport datetime\nimport numpy as np\nfrom copy import deepcopy\nfrom scipy.ndimage import binary_dilation, filters\nfrom astropy.table import Table\nfrom astropy.convolution import convolve\n\nimport astrodata\nimport gemini_instruments\nfrom astrodata.provenance import add_provenance\n\nfrom gempy.gemini import gemini_tools as gt\nfrom geminidr.gemini.lookups import DQ_definitions as DQ\n\nfrom geminidr import PrimitivesBASE\nfrom recipe_system.utils.md5 import md5sum\nfrom . import parameters_preprocess\n\nfrom recipe_system.utils.decorators import parameter_override\n\n#import os, psutil\n#def memusage(proc):\n# return '{:9.3f}'.format(float(proc.memory_info().rss) / 1000000)\n# ------------------------------------------------------------------------------\n@parameter_override\nclass Preprocess(PrimitivesBASE):\n \"\"\"\n This is the class containing all of the preprocessing primitives.\n\n \"\"\"\n tagset = None\n\n def __init__(self, adinputs, **kwargs):\n super().__init__(adinputs, **kwargs)\n self._param_update(parameters_preprocess)\n\n def addObjectMaskToDQ(self, adinputs=None, suffix=None):\n \"\"\"\n Combines the object mask in a `OBJMASK` extension into the `DQ` (Data\n Quality) plane.\n\n Parameters\n ----------\n adinputs : :class:`~astrodata.AstroData`\n Images that contain `OBJMASK`. If `OBJMASK` does not exist, the\n extension is untouched.\n\n suffix: str/None\n Suffix to be added to output filenames.\n\n Returns\n -------\n list of :class:`~astrodata.AstroData`\n Images with updated `DQ` plane.\n \"\"\"\n log = self.log\n log.debug(gt.log_message(\"primitive\", self.myself(), \"starting\"))\n\n for ad in adinputs:\n for ext in ad:\n if hasattr(ext, 'OBJMASK'):\n if ext.mask is None:\n ext.mask = deepcopy(ext.OBJMASK)\n else:\n # CJS: This probably shouldn't just be dumped into\n # the 1-bit\n ext.mask |= ext.OBJMASK\n else:\n log.warning('No object mask present for {}:{}; cannot '\n 'apply object mask'.format(ad.filename,\n ext.hdr['EXTVER']))\n ad.update_filename(suffix=suffix, strip=True)\n return adinputs\n\n def ADUToElectrons(self, adinputs=None, suffix=None):\n \"\"\"\n This primitive will convert the units of the pixel data extensions\n of the input AstroData object from ADU to electrons by multiplying\n by the gain.\n\n Parameters\n ----------\n suffix: str/None\n suffix to be added to output filenames\n \"\"\"\n log = self.log\n log.debug(gt.log_message(\"primitive\", self.myself(), \"starting\"))\n timestamp_key = self.timestamp_keys[self.myself()]\n\n for ad in adinputs:\n if ad.phu.get(timestamp_key):\n log.warning(\"No changes will be made to {}, since it has \"\n \"already been processed by ADUToElectrons\".\n format(ad.filename))\n continue\n\n gain_list = ad.gain()\n # Now multiply the pixel data in each science extension by the gain\n # and the pixel data in each variance extension by the gain squared\n log.status(\"Converting {} from ADU to electrons by multiplying by \"\n \"the gain\".format(ad.filename))\n for ext, gain in zip(ad, gain_list):\n extver = ext.hdr['EXTVER']\n log.stdinfo(\" gain for EXTVER {} = {}\".format(extver, gain))\n ext.multiply(gain)\n\n # Update the headers of the AstroData Object. The pixel data now\n # has units of electrons so update the physical units keyword.\n ad.hdr.set('BUNIT', 'electron', self.keyword_comments['BUNIT'])\n gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)\n ad.update_filename(suffix=suffix, strip=True)\n return adinputs\n\n def applyDQPlane(self, adinputs=None, **params):\n \"\"\"\n This primitive sets the value of pixels in the science plane according\n to flags from the DQ plane. A uniform mean/median or specific value can\n be given, or a ring filter can be used (if inner_radius and outer_radius\n are both defined, and replace_value is *not* a number).\n\n Parameters\n ----------\n suffix: str\n suffix to be added to output files\n replace_flags: int\n The DQ bits, of which one needs to be set for a pixel to be replaced\n replace_value: str/float\n \"median\" or \"mean\" to replace with that value of the good pixels,\n or a value\n inner_radius: float/None\n inner radius of the mean/median cleaning filter\n outer_radius: float/None\n outer radius of the cleaning filter\n max_iters: int\n maximum number of cleaning iterations to perform\n \"\"\"\n log = self.log\n log.debug(gt.log_message(\"primitive\", self.myself(), \"starting\"))\n timestamp_key = self.timestamp_keys[self.myself()]\n replace_flags = params[\"replace_flags\"]\n replace_value = params[\"replace_value\"]\n inner_radius = params[\"inner\"]\n outer_radius = params[\"outer\"]\n max_iters = params[\"max_iters\"]\n footprint = None\n\n flag_list = [int(math.pow(2,i)) for i,digit in\n enumerate(str(bin(replace_flags))[2:][::-1]) if digit=='1']\n log.stdinfo(\"The flags {} will be applied\".format(flag_list))\n\n for ad in adinputs:\n for ext in ad:\n if ext.mask is None:\n log.warning(\"No DQ plane exists for {}:{}, so the correction \"\n \"cannot be applied\".format(ad.filename,\n ext.hdr['EXTVER']))\n continue\n\n # We need to know the dimensionality of the data to create the\n # footprint but, if we've done it once we can avoid creating\n # it again if the dimensionality of this extension is the same\n if inner_radius is not None and outer_radius is not None:\n ndim = len(ext.shape)\n if footprint is None or footprint.ndim != ndim:\n size = int(outer_radius)\n mgrid = np.array(np.meshgrid(*([np.arange(-size, size+1)] * ndim)))\n mgrid *= mgrid\n footprint = np.sqrt(np.sum(mgrid, axis=0))\n footprint = np.where(np.logical_and(footprint>=inner_radius,\n footprint<=outer_radius), 1, 0)\n\n try:\n rep_value = float(replace_value)\n log.fullinfo(\"Replacing bad pixels in {}:{} with the \"\n \"user value {}\".format(ad.filename,\n ext.hdr['EXTVER'], rep_value))\n except ValueError: # already validated so must be \"mean\" or \"median\"\n if footprint is not None:\n mask = (ext.mask & replace_flags) > 0\n filtered_data = ext.data\n iter = 0\n while (iter < max_iters and np.any(mask)):\n iter += 1\n if replace_value == \"median\":\n median_data = filters.median_filter(filtered_data, footprint=footprint)\n filtered_data = np.where(mask, median_data, filtered_data)\n # If we're median filtering, we can update the mask...\n # if more than half the input pixels were bad, the\n # output is still bad.\n if iter < max_iters:\n mask = filters.median_filter(mask, footprint=footprint)\n else:\n # \"Mean\" filtering is just convolution. The astropy\n # version handles the mask.\n median_data = convolve(filtered_data, footprint,\n mask=mask, boundary=\"extend\")\n filtered_data = np.where(mask, median_data, filtered_data)\n # Output pixels are only bad if *all* the pixels in\n # the kernel were bad.\n if iter < max_iters:\n mask = np.where(convolve(mask, footprint,\n boundary=\"extend\")>0.9999, True, False)\n ext.data = filtered_data\n continue\n else:\n oper = getattr(np, replace_value)\n rep_value = oper(ext.data[ext.mask & replace_flags == 0])\n log.fullinfo(\"Replacing bad pixels in {}:{} with the {} \"\n \"of the good data\".format(ad.filename,\n ext.hdr['EXTVER'], replace_value))\n\n # kernel-based replacement avoids this line\n ext.data[(ext.mask & replace_flags) != 0] = rep_value\n\n gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)\n ad.update_filename(suffix=params[\"suffix\"], strip=True)\n return adinputs\n\n def associateSky(self, adinputs=None, **params):\n \"\"\"\n This primitive determines which sky AstroData objects are associated\n with each science AstroData object and puts this information in a\n Table attached to each science frame.\n\n The input sky AstroData objects can be provided by the user using the\n parameter 'sky'. Otherwise, the science AstroData objects are found in\n the main stream (as normal) and the sky AstroData objects are found in\n the sky stream.\n\n Parameters\n ----------\n suffix: str\n suffix to be added to output files\n distance: float\n minimum separation (in arcseconds) required to use an image as sky\n max_skies: int/None\n maximum number of skies to associate to each input frame\n sky: str/list\n name(s) of sky frame(s) to associate to each input\n time: float\n number of seconds\n use_all: bool\n use everything in the \"sky\" stream?\n \"\"\"\n log = self.log\n log.debug(gt.log_message(\"primitive\", self.myself(), \"starting\"))\n timestamp_key = self.timestamp_keys[self.myself()]\n sfx = params[\"suffix\"]\n min_skies = params[\"min_skies\"]\n max_skies = params[\"max_skies\"]\n min_distsq = params.get(\"distance\", 0) ** 2\n\n # Create a timedelta object using the value of the \"time\" parameter\n seconds = datetime.timedelta(seconds=params[\"time\"])\n\n if params.get('sky'):\n sky = params['sky']\n # Produce a list of AD objects from the sky frame/list\n ad_skies = sky if isinstance(sky, list) else [sky]\n ad_skies = [ad if isinstance(ad, astrodata.AstroData) else\n astrodata.open(ad) for ad in ad_skies]\n else: # get from sky stream (put there by separateSky)\n ad_skies = self.streams.get('sky', [])\n\n # Timestamp and update filenames. Do now so filenames agree at end\n for ad in set(adinputs + ad_skies):\n ad.update_filename(suffix=sfx, strip=True)\n gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)\n\n if not adinputs or not ad_skies:\n log.warning(\"Cannot associate sky frames, since at least one \"\n \"science AstroData object and one sky AstroData \"\n \"object are required for associateSky\")\n else:\n # Create a dict with the observation times to aid in association\n # Allows us to select suitable skies and propagate their datetimes\n sky_times = dict(zip(ad_skies,\n [ad.ut_datetime() for ad in ad_skies]))\n\n for ad in adinputs:\n # If use_all is True, use all of the sky AstroData objects for\n # each science AstroData object\n if params[\"use_all\"]:\n log.stdinfo(\"Associating all available sky AstroData \"\n \"objects with {}\" .format(ad.filename))\n sky_list = ad_skies\n else:\n sci_time = ad.ut_datetime()\n xoffset = ad.telescope_x_offset()\n yoffset = ad.telescope_y_offset()\n\n # First, select only skies with matching configurations\n # and within the specified time and with sufficiently\n # large separation. Keep dict format\n sky_dict = {k: v for k, v in sky_times.items() if\n gt.matching_inst_config(ad1=ad, ad2=k,\n check_exposure=True)\n and ((k.telescope_x_offset() - xoffset)**2 +\n (k.telescope_y_offset() - yoffset)**2\n > min_distsq)}\n\n # Sort sky list by time difference and determine how many\n # skies will be matched by the default conditions\n sky_list = sorted(sky_dict, key=lambda x:\n abs(sky_dict[x]-sci_time))[:max_skies]\n num_matching_skies = len([k for k in sky_dict\n if abs(sky_dict[k]-sci_time)\n <= seconds])\n\n # Now create a sky list of the appropriate length\n num_skies = min(max_skies or len(sky_list),\n max(min_skies or 0, num_matching_skies))\n sky_list = sky_list[:num_skies]\n\n # Sort sky list chronologically for presentation purposes\n sky_list = sorted(sky_list,\n key=lambda sky: sky.ut_datetime())\n\n if sky_list:\n sky_table = Table(names=('SKYNAME',),\n data=[[sky.filename for sky in sky_list]])\n log.stdinfo(\"The sky frames associated with {} are:\".\n format(ad.filename))\n for sky in sky_list:\n log.stdinfo(\" {}\".format(sky.filename))\n ad.SKYTABLE = sky_table\n else:\n log.warning(\"No sky frames available for {}\".format(ad.filename))\n\n # Need to update sky stream in case it came from the \"sky\" parameter\n self.streams['sky'] = ad_skies\n return adinputs\n\n def correctBackgroundToReference(self, adinputs=None, suffix=None,\n separate_ext=True, remove_background=False):\n \"\"\"\n This primitive does an additive correction to a set\n of images to put their sky background at the same level\n as the reference image before stacking.\n\n Parameters\n ----------\n suffix: str\n suffix to be added to output files\n remove_background: bool\n if True, set the new background level to zero in all images\n if False, set it to the level of the first image\n \"\"\"\n log = self.log\n log.debug(gt.log_message(\"primitive\", self.myself(), \"starting\"))\n timestamp_key = self.timestamp_keys[self.myself()]\n\n if len(adinputs) <= 1:\n log.warning(\"No correction will be performed, since at least \"\n \"two input AstroData objects are required for \"\n \"correctBackgroundToReference\")\n # Check that all images have the same number of extensions\n elif not all(len(ad)==len(adinputs[0]) for ad in adinputs):\n raise OSError(\"Number of science extensions in input \"\n \"images do not match\")\n else:\n # Loop over input files\n ref_bg_list = None\n for ad in adinputs:\n bg_list = gt.measure_bg_from_image(ad, value_only=True,\n separate_ext=separate_ext)\n # If this is the first (reference) image, set the reference bg levels\n if ref_bg_list is None:\n if remove_background:\n ref_bg_list = ([0] * len(ad)) if separate_ext else 0.\n else:\n ref_bg_list = bg_list\n\n if separate_ext:\n for ext, bg, ref in zip(ad, bg_list, ref_bg_list):\n if bg is None:\n log.warning(\"Could not get background level from \"\n \"{}:{}\".format(ad.filename, ext.hdr['EXTVER']))\n continue\n\n # Add the appropriate value to this extension\n log.fullinfo(\"Background level is {:.0f} for {}:{}\".\n format(bg, ad.filename, ext.hdr['EXTVER']))\n difference = np.float32(ref - bg)\n log.fullinfo(\"Adding {:.0f} to match reference background \"\n \"level {:.0f}\".format(difference, ref))\n ext.add(difference)\n ext.hdr.set('SKYLEVEL', ref,\n self.keyword_comments[\"SKYLEVEL\"])\n else:\n if bg_list is None:\n log.warning(\"Could not get background level from \"\n \"{}\".format(ad.filename))\n continue\n\n # Add the appropriate value to the entire AD object\n log.fullinfo(\"Background level is {:.0f} for {}\".\n format(bg_list, ad.filename))\n difference = np.float32(ref_bg_list - bg_list)\n log.fullinfo(\"Adding {:.0f} to match reference background \"\n \"level {:.0f}\".format(difference, ref_bg_list))\n ad.add(difference)\n ad.hdr.set('SKYLEVEL', ref_bg_list,\n self.keyword_comments[\"SKYLEVEL\"])\n\n # Timestamp the header and update the filename\n gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)\n ad.update_filename(suffix=suffix, strip=True)\n\n return adinputs\n\n def darkCorrect(self, adinputs=None, suffix=None, dark=None, do_dark=True):\n \"\"\"\n This primitive will subtract each SCI extension of the inputs by those\n of the corresponding dark. If the inputs contain VAR or DQ frames,\n those will also be updated accordingly due to the subtraction on the\n data. If no dark is provided, getProcessedDark will be called to\n ensure a dark exists for every adinput.\n\n Parameters\n ----------\n suffix: str\n suffix to be added to output files\n dark: str/list\n name(s) of the dark file(s) to be subtracted\n do_dark: bool\n perform dark correction?\n \"\"\"\n log = self.log\n log.debug(gt.log_message(\"primitive\", self.myself(), \"starting\"))\n timestamp_key = self.timestamp_keys[self.myself()]\n\n if not do_dark:\n log.warning(\"Dark correction has been turned off.\")\n return adinputs\n\n if dark is None:\n self.getProcessedDark(adinputs, refresh=False)\n dark_list = self._get_cal(adinputs, 'processed_dark')\n else:\n dark_list = dark\n\n # Provide a dark AD object for every science frame\n for ad, dark in zip(*gt.make_lists(adinputs, dark_list,\n force_ad=True)):\n if ad.phu.get(timestamp_key):\n log.warning(\"No changes will be made to {}, since it has \"\n \"already been processed by darkCorrect\".\n format(ad.filename))\n continue\n\n if dark is None:\n if 'qa' in self.mode:\n log.warning(\"No changes will be made to {}, since no \"\n \"dark was specified\".format(ad.filename))\n continue\n else:\n raise OSError(\"No processed dark listed for {}\".\n format(ad.filename))\n\n # Check the inputs have matching binning, shapes & units\n # TODO: Check exposure time?\n try:\n gt.check_inputs_match(ad, dark, check_filter=False,\n check_units=True)\n except ValueError:\n # Else try to extract a matching region from the dark\n dark = gt.clip_auxiliary_data(ad, aux=dark, aux_type=\"cal\")\n\n # Check again, but allow it to fail if they still don't match\n gt.check_inputs_match(ad, dark, check_filter=False,\n check_units=True)\n\n log.fullinfo(\"Subtracting the dark ({}) from the input \"\n \"AstroData object {}\".\n format(dark.filename, ad.filename))\n ad.subtract(dark)\n\n # Record dark used, timestamp, and update filename\n ad.phu.set('DARKIM', dark.filename, self.keyword_comments[\"DARKIM\"])\n gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)\n ad.update_filename(suffix=suffix, strip=True)\n\n if dark.path:\n add_provenance(ad, dark.filename, md5sum(dark.path) or \"\", self.myself())\n return adinputs\n\n def dilateObjectMask(self, adinputs=None, suffix=None, dilation=1, repeat=False):\n \"\"\"\n Grows the influence of objects detected by dilating the OBJMASK using\n the binary_dilation routine\n\n Parameters\n ----------\n suffix: str\n suffix to be added to output files\n dilation: float\n radius of dilation circle\n repeat: bool\n allow a repeated dilation? Unless set, the primitive will no-op\n if the appropriate header keyword timestamp is found\n \"\"\"\n log = self.log\n log.debug(gt.log_message(\"primitive\", self.myself(), \"starting\"))\n timestamp_key = self.timestamp_keys[self.myself()]\n\n # Nothing is going to happen so leave now!\n if dilation < 1:\n return adinputs\n\n xgrid, ygrid = np.mgrid[-int(dilation):int(dilation+1),\n -int(dilation):int(dilation+1)]\n structure = np.where(xgrid*xgrid+ygrid*ygrid <= dilation*dilation,\n True, False)\n\n for ad in adinputs:\n if timestamp_key in ad.phu and not repeat:\n log.warning(\"No changes will be made to {}, since it has \"\n \"already been processed by dilateObjectMask\".\n format(ad.filename))\n continue\n for ext in ad:\n if hasattr(ext, 'OBJMASK') and ext.OBJMASK is not None:\n ext.OBJMASK = binary_dilation(ext.OBJMASK,\n structure).astype(np.uint8)\n\n ad.update_filename(suffix=suffix, strip=True)\n gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)\n return adinputs\n\n def flatCorrect(self, adinputs=None, suffix=None, flat=None, do_flat=True):\n \"\"\"\n This primitive will divide each SCI extension of the inputs by those\n of the corresponding flat. If the inputs contain VAR or DQ frames,\n those will also be updated accordingly due to the division on the data.\n If no flatfield is provided, getProcessedFlat will be called\n to ensure a flat exists for every adinput.\n\n If the flatfield has had a QE correction applied, this information is\n copied into the science header to avoid the correction being applied\n twice.\n\n Parameters\n ----------\n suffix: str\n suffix to be added to output files\n flat: str\n name of flatfield to use\n do_flat: bool\n perform flatfield correction?\n \"\"\"\n log = self.log\n log.debug(gt.log_message(\"primitive\", self.myself(), \"starting\"))\n timestamp_key = self.timestamp_keys[self.myself()]\n qecorr_key = self.timestamp_keys['QECorrect']\n\n if not do_flat:\n log.warning(\"Flat correction has been turned off.\")\n return adinputs\n\n if flat is None:\n self.getProcessedFlat(adinputs, refresh=False)\n flat_list = self._get_cal(adinputs, 'processed_flat')\n else:\n flat_list = flat\n\n # Provide a flatfield AD object for every science frame\n for ad, flat in zip(*gt.make_lists(adinputs, flat_list,\n force_ad=True)):\n if ad.phu.get(timestamp_key):\n log.warning(\"No changes will be made to {}, since it has \"\n \"already been processed by flatCorrect\".\n format(ad.filename))\n continue\n\n if flat is None:\n if 'qa' in self.mode:\n log.warning(\"No changes will be made to {}, since no \"\n \"flatfield has been specified\".\n format(ad.filename))\n continue\n else:\n raise OSError(\"No processed flat listed for {}\".\n format(ad.filename))\n\n # Check the inputs have matching filters, binning, and shapes\n try:\n gt.check_inputs_match(ad, flat)\n except ValueError:\n # Else try to clip the flat frame to the size of the science\n # data (e.g., for GMOS, this allows a full frame flat to\n # be used for a CCD2-only science frame.\n flat = gt.clip_auxiliary_data(adinput=ad,\n aux=flat, aux_type=\"cal\")\n # Check again, but allow it to fail if they still don't match\n gt.check_inputs_match(ad, flat)\n\n # Do the division\n log.fullinfo(\"Dividing the input AstroData object {} by this \"\n \"flat:\\n{}\".format(ad.filename, flat.filename))\n ad.divide(flat)\n\n # Update the header and filename, copying QECORR keyword from flat\n ad.phu.set(\"FLATIM\", flat.filename, self.keyword_comments[\"FLATIM\"])\n try:\n qecorr_value = flat.phu[qecorr_key]\n except KeyError:\n pass\n else:\n log.fullinfo(\"Copying {} keyword from flatfield\".format(qecorr_key))\n ad.phu.set(qecorr_key, qecorr_value, flat.phu.comments[qecorr_key])\n gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)\n ad.update_filename(suffix=suffix, strip=True)\n if flat.path:\n add_provenance(ad, flat.filename, md5sum(flat.path) or \"\", self.myself())\n return adinputs\n\n def makeSky(self, adinputs=None, **params):\n adinputs = self.separateSky(adinputs, **self._inherit_params(params, \"separateSky\"))\n adinputs = self.associateSky(adinputs, **self._inherit_params(params, \"associateSky\"))\n #adinputs = self.stackSkyFrames(adinputs, **self._inherit_params(params, \"stackSkyFrames\"))\n #self.makeMaskedSky()\n return adinputs\n\n def nonlinearityCorrect(self, adinputs=None, suffix=None):\n \"\"\"\n Apply a generic non-linearity correction to data.\n At present (based on GSAOI implementation) this assumes/requires that\n the correction is polynomial. The ad.non_linear_coeffs() descriptor\n should return the coefficients in ascending order of power\n\n Parameters\n ----------\n suffix: str\n suffix to be added to output files\n \"\"\"\n log = self.log\n log.debug(gt.log_message(\"primitive\", self.myself(), \"starting\"))\n timestamp_key = self.timestamp_keys[self.myself()]\n\n for ad in adinputs:\n if ad.phu.get(timestamp_key):\n log.warning(\"No changes will be made to %s, since it has \"\n \"already been processed by nonlinearityCorrect\".\n format(ad.filename))\n continue\n\n # Get the correction coefficients\n try:\n nonlin_coeffs = ad.nonlinearity_coeffs()\n except:\n log.warning(\"Unable to obtain nonlinearity coefficients for \"\n \"{}\".format(ad.filename))\n continue\n\n in_adu = ad.is_in_adu()\n # It's impossible to do this cleverly with a string of ad.mult()s\n # so use regular maths\n log.status(\"Applying nonlinearity correction to {}\".\n format(ad.filename))\n for ext, coeffs in zip(ad, nonlin_coeffs):\n log.status(\" nonlinearity correction for EXTVER {} is {}\".\n format(ext.hdr['EXTVER'], coeffs))\n pixel_data = np.zeros_like(ext.data)\n\n # Convert back to ADU per exposure if coadds have been summed\n # or if the data have been converted to electrons\n conv_factor = 1 if in_adu else ext.gain()\n if ext.is_coadds_summed():\n conv_factor *= ext.coadds()\n for n in range(len(coeffs), 0, -1):\n pixel_data += coeffs[n-1]\n pixel_data *= ext.data / conv_factor\n pixel_data *= conv_factor\n # Try to do something useful with the VAR plane, if it exists\n # Since the data are fairly pristine, VAR will simply be the\n # Poisson noise (divided by gain if in ADU, divided by COADDS\n # if the coadds are averaged), possibly plus read-noise**2\n # So making an additive correction will sort this out,\n # irrespective of whether there's read noise\n conv_factor = ext.gain() if in_adu else 1\n if not ext.is_coadds_summed():\n conv_factor *= ext.coadds()\n if ext.variance is not None and \\\n 'poisson' in ext.hdr.get('VARNOISE', '').lower():\n ext.variance += (pixel_data - ext.data) / conv_factor\n # Now update the SCI extension\n ext.data = pixel_data\n\n # Timestamp the header and update the filename\n gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)\n ad.update_filename(suffix=suffix, strip=True)\n return adinputs\n\n def normalizeFlat(self, adinputs=None, **params):\n \"\"\"\n This primitive normalizes each science extension of the input\n AstroData object by its mean\n\n Parameters\n ----------\n suffix: str\n suffix to be added to output files\n scale: str\n type of scaling to use. Must be a numpy function\n separate_ext: bool\n Scale each extension individually?\n \"\"\"\n log = self.log\n log.debug(gt.log_message(\"primitive\", self.myself(), \"starting\"))\n timestamp_key = self.timestamp_keys[self.myself()]\n sfx = params[\"suffix\"]\n separate_ext = params[\"separate_ext\"]\n operator = getattr(np, params[\"scale\"])\n\n for ad in adinputs:\n if ad.phu.get(timestamp_key):\n log.warning(\"No changes will be made to {}, since it has \"\n \"already been processed by normalizeFlat\".\n format(ad.filename))\n continue\n\n if separate_ext:\n for ext in ad:\n # Normalise the input AstroData object. Calculate the\n # \"average\" value of the science extension\n if ext.mask is None:\n scaling = operator(ext.data).astype(np.float32)\n else:\n scaling = operator(ext.data[ext.mask==0]).astype(np.float32)\n # Divide the science extension by the median value\n # VAR is taken care of automatically\n log.fullinfo(\"Normalizing {} EXTVER {} by dividing by {:.2f}\".\n format(ad.filename, ext.hdr['EXTVER'], scaling))\n ext /= scaling\n else:\n # Combine pixels from all extensions, using DQ if present\n scaling = operator(np.concatenate([(ext.data.ravel()\n if ext.mask is None else ext.data[ext.mask==0].ravel())\n for ext in ad])).astype(np.float32)\n log.fullinfo(\"Normalizing {} by dividing by {:.2f}\".\n format(ad.filename, scaling))\n ad /= scaling\n\n # Timestamp and update the filename\n gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)\n ad.update_filename(suffix=sfx, strip=True)\n return adinputs\n\n def scaleByExposureTime(self, adinputs=None, **params):\n \"\"\"\n This primitive scales input images to have the same effective exposure\n time. This can either be provided as a parameter, or the images will be\n scaled to match the exposure time of the first image in the input list.\n\n Parameters\n ----------\n suffix: str/None\n suffix to be added to output files\n time: float/None\n exposure time to scale to (None => use first image's exposure time)\n \"\"\"\n log = self.log\n log.debug(gt.log_message(\"primitive\", \"scaleByExposureTime\", \"starting\"))\n timestamp_key = self.timestamp_keys[\"scaleByExposureTime\"]\n sfx = params[\"suffix\"]\n time = params[\"time\"]\n\n # First check if any scaling is actually required\n exptimes = [ad.exposure_time() for ad in adinputs]\n if len(set(exptimes)) == 1 and (time is None or time == exptimes[0]):\n if time is None:\n log.stdinfo(\"Exposure times are the same therefore no scaling\"\n \" is required.\")\n else:\n log.stdinfo(\"Exposure times are all equal to the requested \"\n \"time of {}\".format(time))\n else:\n for ad, exptime in zip(adinputs, exptimes):\n kw_exptime = ad._keyword_for('exposure_time')\n if time is None:\n time = exptime\n log.stdinfo(\"Scaling to {}'s exposure time of {}\".\n format(ad.filename, time))\n else:\n scale = time / exptime\n if abs(scale - 1.0) > 0.001:\n log.stdinfo(\"Scaling {} by factor {:.3f}\".\n format(ad.filename, scale))\n ad.phu.set(kw_exptime, time,\n comment=self.keyword_comments[kw_exptime])\n # ORIGTEXP should always be the *original* exposure\n # time, so if it already exists, leave it alone!\n if \"ORIGTEXP\" not in ad.phu:\n ad.phu.set(\"ORIGTEXP\", exptime, \"Original exposure time\")\n\n ad.multiply(scale)\n else:\n log.stdinfo(\"{} does not require scaling\".format(ad.filename))\n\n # Timestamp and update the filename\n gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)\n ad.update_filename(suffix=sfx, strip=True)\n return adinputs\n\n def separateSky(self, adinputs=None, **params):\n \"\"\"\n Given a set of input exposures, sort them into separate but\n possibly-overlapping streams of on-target and sky frames. This is\n achieved by dividing the data into distinct pointing/dither groups,\n applying a set of rules to classify each group as target(s) or sky\n and optionally overriding those classifications with user guidance\n (up to and including full manual specification of both lists).\n\n If all exposures are found to be on source then both output streams\n will replicate the input. Where a dataset appears in both lists, a\n separate copy (TBC: copy-on-write?) is made in the sky list to avoid\n subsequent operations on one of the output lists affecting the other.\n\n Parameters\n ----------\n suffix: str\n suffix to be added to output files\n frac_FOV: float\n Proportion by which to scale the instrumental field of view when\n determining whether points are considered to be within the same\n field, for tweaking borderline cases (eg. to avoid co-adding\n target positions right at the edge of the field)\n ref_obj: str\n comma-separated list of filenames (as read from disk, without any\n additional suffixes appended) to be considered object/on-target\n exposures, as overriding guidance for any automatic classification.\n ref_sky: str\n comma-separated list of filenames to be considered as sky exposures\n\n Any existing OBJFRAME or SKYFRAME flags in the input meta-data will\n also be respected as input (unless overridden by ref_obj/ref_sky) and\n these same keywords are set in the output, along with a group number\n with which each exposure is associated (EXPGROUP).\n \"\"\"\n log = self.log\n log.debug(gt.log_message(\"primitive\", self.myself(), \"starting\"))\n timestamp_key = self.timestamp_keys[self.myself()]\n sfx = params[\"suffix\"]\n\n # Allow tweaking what size of offset, as a fraction of the field\n # dimensions, is considered to move a target out of the field in\n # gt.group_exposures(). If we want to check this parameter value up\n # front I'm assuming the infrastructure will do that at some point.\n frac_FOV = params[\"frac_FOV\"]\n\n # Primitive will construct sets of object and sky frames. First look\n # for pre-assigned header keywords (user can set them as a guide)\n objects = set(filter(lambda ad: 'OBJFRAME' in ad.phu, adinputs))\n skies = set(filter(lambda ad: 'SKYFRAME' in ad.phu, adinputs))\n\n # Next use optional parameters. These are likely to be passed as\n # comma-separated lists, but should also cope with NoneTypes\n ref_obj = (params[\"ref_obj\"] or '').split(',')\n ref_sky = (params[\"ref_sky\"] or '').split(',')\n if ref_obj == ['']: ref_obj = []\n if ref_sky == ['']: ref_sky = []\n\n # Add these to the object/sky sets, warning of conflicts\n # use \"in\" for filename comparison so user can specify rootname only\n def strip_fits(s):\n return s[:-5] if s.endswith('.fits') else s\n\n missing = []\n for ad in adinputs:\n for obj_filename in ref_obj:\n if strip_fits(obj_filename) in ad.filename:\n objects.add(ad)\n if 'SKYFRAME' in ad.phu and 'OBJFRAME' not in ad.phu:\n log.warning(\"{} previously classified as SKY; added \"\n \"OBJECT as requested\".format(ad.filename))\n break\n missing.append(obj_filename)\n\n for sky_filename in ref_sky:\n if strip_fits(sky_filename) in ad.filename:\n objects.add(ad)\n if 'OBJFRAME' in ad.phu and 'SKYFRAME' not in ad.phu:\n log.warning(\"{} previously classified as OBJECT; \"\n \"added SKY as requested\".format(ad.filename))\n break\n missing.append(sky_filename)\n\n # Mark unguided exposures as skies\n if ad.wavefront_sensor() is None:\n # Old Gemini data are missing the guiding keywords and the\n # descriptor returns None. So look to see if the keywords\n # exist; if so, it really is unguided.\n if ('PWFS1_ST' in ad.phu and 'PWFS2_ST' in ad.phu and\n 'OIWFS_ST' in ad.phu):\n if ad in objects:\n # Warn user but keep manual assignment\n log.warning(\"{} manually flagged as OBJECT but it's \"\n \"unguided!\".format(ad.filename))\n elif ad not in skies:\n log.fullinfo(\"Treating {} as SKY since it's unguided\".\n format(ad.filename))\n skies.add(ad)\n # (else can't determine guiding state reliably so ignore it)\n\n # Warn the user if they referred to non-existent input file(s):\n if missing:\n log.warning(\"Failed to find the following file(s), specified \"\n \"via ref_obj/ref_sky parameters, in the input:\")\n for name in missing:\n log.warning(\" {}\".format(name))\n\n # Analyze the spatial clustering of exposures and attempt to sort them\n # into dither groups around common nod positions.\n groups = gt.group_exposures(adinputs, self.inst_lookups, frac_FOV=frac_FOV)\n ngroups = len(groups)\n log.fullinfo(\"Identified {} group(s) of exposures\".format(ngroups))\n\n # Loop over the nod groups identified above, record which group each\n # exposure belongs to, propagate any already-known classification(s)\n # to other members of the same group and determine whether everything\n # is finally on source and/or sky:\n for num, group in enumerate(groups):\n adlist = group.list()\n for ad in adlist:\n ad.phu['EXPGROUP'] = num\n\n # If any of these is already an OBJECT, then they all are:\n if objects.intersection(adlist):\n objects.update(adlist)\n\n # And ditto for SKY:\n if skies.intersection(adlist):\n skies.update(adlist)\n\n # If one set is empty, try to fill it. Put unassigned inputs in the\n # empty set. If all inputs are assigned, put them all in the empty set.\n if objects and not skies:\n skies = (set(adinputs) - objects) or objects.copy()\n elif skies and not objects:\n objects = (set(adinputs) - skies) or skies.copy()\n\n # If all the exposures are still unclassified at this point, we\n # couldn't decide which groups are which based on user input or guiding\n # so try to use the distance from the target\n if not objects and not skies:\n if ngroups < 2: # Includes zero if adinputs=[]\n log.fullinfo(\"Treating a single group as both object and sky\")\n objects = set(adinputs)\n skies = set(adinputs)\n else:\n distsq = [sum([x * x for x in g.group_cen]) for g in groups]\n if ngroups == 2:\n log.fullinfo(\"Treating 1 group as object and 1 as sky, \"\n \"based on target proximity\")\n closest = np.argmin(distsq)\n objects = set(groups[closest].list())\n skies = set(adinputs) - objects\n else: # More than 2 groups\n # Add groups by proximity until at least half the inputs\n # are classified as objects\n log.fullinfo(\"Classifying groups based on target \"\n \"proximity and observation efficiency\")\n for group in [groups[i] for i in np.argsort(distsq)]:\n objects.update(group.list())\n if len(objects) >= len(adinputs) // 2:\n break\n # We might have everything become an object here, in\n # which case, make them all skies too (better ideas?)\n skies = (set(adinputs) - objects) or objects\n\n # It's still possible for some exposures to be unclassified at this\n # point if the user has identified some but not all of several groups\n # manually (or that's what's in the headers). We can't do anything\n # sensible to rectify that, so just discard the unclassified ones and\n # complain about it.\n missing = [ad for ad in adinputs if ad not in objects | skies]\n if missing:\n log.warning(\"Ignoring the following input file(s), which could \"\n \"not be classified as object or sky after applying incomplete \"\n \"prior classifications from the input:\")\n for ad in missing:\n log.warning(\" {}\".format(ad.filename))\n\n # Construct object & sky lists (preserving order in adinputs) from\n # the classifications, making a complete copy of the input for any\n # duplicate entries:\n ad_objects = [ad for ad in adinputs if ad in objects]\n ad_skies = [ad for ad in adinputs if ad in skies]\n #ad_skies = [deepcopy(ad) if ad in objects else ad for ad in ad_skies]\n\n log.stdinfo(\"Science frames:\")\n for ad in ad_objects:\n log.stdinfo(\" {}\".format(ad.filename))\n ad.phu['OBJFRAME'] = 'TRUE'\n\n log.stdinfo(\"Sky frames:\")\n for ad in ad_skies:\n log.stdinfo(\" {}\".format(ad.filename))\n ad.phu['SKYFRAME'] = 'TRUE'\n\n # Timestamp and update filename for all object/sky frames\n for ad in ad_objects + ad_skies:\n gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)\n ad.update_filename(suffix=sfx, strip=True)\n\n # Put skies in sky stream and return the objects\n self.streams['sky'] = ad_skies\n return ad_objects\n\n def skyCorrect(self, adinputs=None, **params):\n \"\"\"\n This primitive subtracts a sky frame from each of the science inputs.\n Each science input should have a list of skies in a SKYTABLE extension\n and these are stacked and subtracted, using the appropriate primitives.\n\n Parameters\n ----------\n suffix: str\n suffix to be added to output files\n apply_dq: bool\n apply DQ mask to data before combining?\n statsec: str/None\n region of image to use for statistics\n operation: str\n type of combining operation for stacking sky frames\n reject_method: str\n type of rejection method for stacking sky frames\n mask_objects: bool\n mask objects using OBJMASK?\n dilation: float\n dilation radius if objects are being masked\n hsigma: float\n high rejection threshold (standard deviations)\n lsigma: float\n low rejection threshold (standard deviations)\n mclip: bool\n use median (rather than mean) for sigma-clipping?\n nlow: int\n number of low pixels to reject (for \"minmax\")\n nhigh: int\n number of high pixels to reject (for \"minmax\")\n memory: float/None\n available memory (in GB) for stacking calculations\n reset_sky: bool\n maintain the sky level by adding a constant to the science\n frame after subtracting the sky?\n scale_sky: bool\n scale each extension of each sky frame to match the science frame?\n offset_sky: bool\n apply offset to each extension of each sky frame to match science?\n sky: str/AD/list\n sky frame(s) to subtract\n \"\"\"\n #tpid = os.getpid()\n #proc = psutil.Process(tpid)\n log = self.log\n log.debug(gt.log_message(\"primitive\", self.myself(), \"starting\"))\n\n #print \"STARTING\", memusage(proc)\n\n save_sky = params[\"save_sky\"]\n reset_sky = params[\"reset_sky\"]\n scale_sky = params[\"scale_sky\"]\n offset_sky = params[\"offset_sky\"]\n if params[\"scale\"] and params[\"zero\"]:\n log.warning(\"Both the scale and zero parameters are set. \"\n \"Setting zero=False.\")\n params[\"zero\"] = False\n\n # Parameters to be passed to stackSkyFrames\n stack_params = self._inherit_params(params, 'stackSkyFrames',\n pass_suffix=True)\n #stack_params['mask_objects'] = False # We're doing this en masse\n\n # To avoid a crash in certain methods of operation\n if \"sky\" not in self.streams:\n log.warning(\"Sky stream is empty. Will search for sky frames in\"\n \" main stream.\")\n self.streams[\"sky\"] = adinputs\n\n # We'll need to process the sky frames so collect them all up and do\n # this first, to avoid repeating it every time one is reused\n skies = set()\n skytables = []\n for ad in adinputs:\n try:\n # Sort to ease equality comparisons\n sky_list = sorted(list(ad.SKYTABLE[\"SKYNAME\"]))\n del ad.SKYTABLE # Not needed any more\n except AttributeError:\n log.warning(\"{} has no SKYTABLE so cannot subtract a sky \"\n \"frame\".format(ad.filename))\n sky_list = None\n except KeyError:\n log.warning(\"Cannot read SKYTABLE associated with {} so \"\n \"continuing\".format(ad.filename))\n sky_list = None\n skytables.append(sky_list)\n if sky_list: # Not if None\n skies.update(sky_list)\n\n # Now make a list of AD instances of the skies, and delete any\n # filenames that could not be converted to ADs\n skies = sorted(list(skies))\n ad_skies = []\n for filename in skies:\n for sky in self.streams[\"sky\"]:\n if sky.filename in [filename,\n filename.replace(self.params[\"associateSky\"].suffix,\n self.params[\"separateSky\"].suffix)]:\n break\n else:\n try:\n sky = astrodata.open(filename)\n except astrodata.AstroDataError:\n log.warning(\"Cannot find a sky file named {}. \"\n \"Ignoring it.\".format(filename))\n skies.remove(filename)\n continue\n else:\n log.stdinfo(\"Found {} on disk\".format(filename))\n ad_skies.append(sky)\n\n # We've got all the sky frames in sky_dict, so delete the sky stream\n # to eliminate references to the original frames before we modify them\n # Note that we can edit the OBJMASK even if the sky is also a science\n # frame because we expect detectSources() to be run again on the\n # sky-subtracted image.\n #del self.streams[\"sky\"]\n if params[\"mask_objects\"]:\n #ad_skies = [ad if any(hasattr(ext, 'OBJMASK') for ext in ad)\n # else self.detectSources([ad])[0] for ad in ad_skies]\n dilate_params = self._inherit_params(params, \"dilateObjectMask\")\n ad_skies = self.dilateObjectMask(ad_skies, **dilate_params)\n sky_dict = dict(zip(skies, ad_skies))\n stack_params[\"dilation\"] = 0 # We've already dilated\n\n # Make a list of stacked sky frames, but use references if the same\n # frames are used for more than one adinput. Use a value \"0\" to\n # indicate we have not tried to make a sky for this adinput (\"None\"\n # means we've tried but failed and this can be passed to subtractSky)\n # Fill initial list with None where the SKYTABLE produced None\n stacked_skies = [None if tbl is None else 0 for tbl in skytables]\n for i, (ad, skytable) in enumerate(zip(adinputs, skytables)):\n if skytable is None:\n log.stdinfo(\"Cannot subtract sky from {}\".format(ad.filename))\n continue\n if stacked_skies[i] == 0:\n log.stdinfo(\"Creating sky frame for {}\".format(ad.filename))\n stacked_sky = self.stackSkyFrames([sky_dict[sky] for sky in\n skytable], **stack_params)\n #print ad.filename, memusage(proc)\n if len(stacked_sky) == 1:\n stacked_sky = stacked_sky[0]\n # Provide a more intelligent filename\n stacked_sky.filename = ad.filename\n stacked_sky.update_filename(suffix=\"_sky\", strip=True)\n else:\n log.warning(\"Problem with stacking the following sky \"\n \"frames for {}\".format(adinputs[i].filename))\n for filename in skytable:\n log.warning(\" {}\".format(filename))\n stacked_sky = None\n # Assign this stacked sky frame to all adinputs that want it\n for j in range(i, len(skytables)):\n if skytables[j] == skytable:\n stacked_skies[j] = stacked_sky\n if j > i:\n log.stdinfo(\"This sky will also be used for {}\".format(adinputs[j].filename))\n skytables[j] = [None]\n\n # Go through all the science frames and sky-subtract any that\n # aren't needed for future sky-frame creation\n for j, ad2 in enumerate(adinputs):\n # If already been sky-subtracted or not yet processed\n if not skytables[j] or stacked_skies[j] == 0:\n continue\n\n # We're iterating over *all* skytables so replace \"None\"s\n # with iterable empty lists\n if ad2 not in [sky_dict.get(sky) for skytable in skytables\n for sky in (skytable or [])]:\n # Sky-subtraction is in place, so we can discard the output\n self.subtractSky([ad2], sky=stacked_skies[j], scale_sky=scale_sky,\n offset_sky=offset_sky, reset_sky=reset_sky,\n save_sky=save_sky)\n skytables[j] = []\n # This deletes a reference to the AD sky object\n stacked_skies[j] = None\n\n # Now we have a list of skies to subtract, one per adinput, so send\n # this to subtractSky as the \"sky\" parameter\n #print(\"ABOUT TO SUBTRACT\", memusage(proc))\n #adinputs = self.subtractSky(adinputs, sky=stacked_skies, scale_sky=scale_sky,\n # offset_sky=offset_sky, reset_sky=reset_sky)\n #print(\"SUBTRACTED\", memusage(proc))\n return adinputs\n\n def subtractSky(self, adinputs=None, **params):\n \"\"\"\n This function will subtract the science extension of the input sky\n (or other) frames from the science extension of the input science\n frames. The variance and data quality extension will be updated, if\n they exist.\n\n Parameters\n ----------\n suffix: str\n suffix to be added to output files\n reset_sky: bool\n maintain the sky level by adding a constant to the science\n frame after subtracting the sky?\n scale_sky: bool\n scale each extension of each sky frame to match the science frame?\n offset_sky: bool\n apply offset to each extension of each sky frame to match science?\n sky: str/AD/list\n sky frame(s) to subtract\n \"\"\"\n log = self.log\n log.debug(gt.log_message(\"primitive\", self.myself(), \"starting\"))\n timestamp_key = self.timestamp_keys[self.myself()]\n\n save_sky = params[\"save_sky\"]\n reset_sky = params[\"reset_sky\"]\n scale = params[\"scale_sky\"]\n zero = params[\"offset_sky\"]\n if scale and zero:\n log.warning(\"Both the scale_sky and offset_sky parameters are set. \"\n \"Setting offset_sky=False.\")\n zero = False\n\n for ad, ad_sky in zip(*gt.make_lists(adinputs, params[\"sky\"],\n force_ad=True)):\n if ad.phu.get(timestamp_key):\n log.warning(\"No changes will be made to {}, since it has \"\n \"already been processed by subtractSky\".\n format(ad.filename))\n continue\n\n if ad_sky is not None:\n # Only call measure_bg_from_image if we need it\n if reset_sky or scale or zero:\n old_bg = gt.measure_bg_from_image(ad, value_only=True)\n log.stdinfo(\"Subtracting the image ({}) from the science \"\n \"AstroData object {}\".\n format(ad_sky.filename, ad.filename))\n if scale or zero:\n sky_bg = gt.measure_bg_from_image(ad_sky, value_only=True)\n for ext_sky, final_bg, init_bg in zip(ad_sky, old_bg, sky_bg):\n if scale:\n ext_sky *= final_bg / init_bg\n else:\n ext_sky += final_bg - init_bg\n log.fullinfo(\"Applying {} to EXTVER {} from {} to {}\".\n format((\"scaling\" if scale else \"zeropoint\"),\n ext_sky.hdr['EXTVER'], init_bg, final_bg))\n if save_sky:\n #ad_sky.update_filename(suffix='_skyimage', strip=True)\n self.writeOutputs([ad_sky])\n ad.subtract(ad_sky)\n if reset_sky:\n new_bg = gt.measure_bg_from_image(ad, value_only=True)\n for ext, new_level, old_level in zip(ad, new_bg, old_bg):\n sky_offset = old_level - new_level\n log.stdinfo(\" Adding {} to {}:{}\".format(sky_offset,\n ad.filename, ext.hdr['EXTVER']))\n ext.add(sky_offset)\n else:\n log.warning(\"No changes will be made to {}, since no \"\n \"sky was specified\".format(ad.filename))\n\n # Timestamp and update filename\n gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)\n ad.update_filename(suffix=params[\"suffix\"], strip=True)\n return adinputs\n\n def subtractSkyBackground(self, adinputs=None, suffix=None):\n \"\"\"\n This primitive is used to subtract the sky background specified by\n the keyword SKYLEVEL.\n\n Parameters\n ----------\n suffix: str\n suffix to be added to output files\n \"\"\"\n log = self.log\n log.debug(gt.log_message(\"primitive\", self.myself(), \"starting\"))\n timestamp_key = self.timestamp_keys[self.myself()]\n\n for ad in adinputs:\n if ad.phu.get(timestamp_key):\n log.warning(\"No changes will be made to {}, since it has \"\n \"already been processed by subtractSkyBackground\".\n format(ad.filename))\n continue\n\n bg_list = ad.hdr.get('SKYLEVEL')\n for ext, bg in zip(ad, bg_list):\n extver = ext.hdr['EXTVER']\n if bg is None:\n log.warning(\"No changes will be made to {}:{}, since there \"\n \"is no sky background measured\".\n format(ad.filename, extver))\n else:\n log.fullinfo(\"Subtracting {:.0f} to remove sky level from \"\n \"image {}:{}\".format(bg, ad.filename, extver))\n ext.subtract(bg)\n\n # Timestamp and update filename\n gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)\n ad.update_filename(suffix=suffix, strip=True)\n return adinputs\n\n def thresholdFlatfield(self, adinputs=None, **params):\n \"\"\"\n This primitive sets the DQ '64' bit (unilluminated) for any pixels\n which have a value upper in the SCI plane.\n it also sets the science plane pixel value to 1.0 for pixels which are bad\n and very close to zero, to avoid divide by zero issues and inf values\n in the flat-fielded science data.\n\n Parameters\n ----------\n suffix: str\n suffix to be added to output files\n lower: float\n value below which DQ pixels should be set to unilluminated\n upper: float\n value above which DQ pixels should be set to unilluminated\n \"\"\"\n log = self.log\n log.debug(gt.log_message(\"primitive\", self.myself(), \"starting\"))\n timestamp_key = self.timestamp_keys[self.myself()]\n sfx = params[\"suffix\"]\n lower = params[\"lower\"]\n upper = params[\"upper\"]\n\n for ad in adinputs:\n if ad.phu.get(timestamp_key):\n log.warning(\"No changes will be made to {}, since it has \"\n \"already been processed by thresholdFlatfield\".\n format(ad.filename))\n continue\n\n for ext in ad:\n if ext.mask is None:\n ext.mask = np.zeros_like(ext.data, dtype=DQ.datatype)\n # Mark the unilumminated pixels with a bit '64' in the DQ plane.\n # make sure the 64 is an int16(64) else it will promote the DQ\n # plane to int64\n unillum = np.where(((ext.data > upper) | (ext.data < lower)) &\n ((ext.mask & DQ.bad_pixel) == 0),\n np.int16(DQ.unilluminated), np.int16(0))\n ext.mask = unillum if ext.mask is None else ext.mask | unillum\n log.fullinfo(\"ThresholdFlatfield set bit '64' for values \"\n \"outside the range [{:.2f},{:.2f}]\".\n format(lower, upper))\n\n # Bad pixels might have low values and don't get flagged as\n # unilluminated, so we need to flag them to avoid infinite\n # values in the flat-fielded image\n ext.data[(ext.mask & DQ.unilluminated) > 0] = 1.0\n ext.data[ext.data < lower] = 1.0\n log.fullinfo(\"ThresholdFlatfield set flat-field pixels to 1.0 \"\n \"for non-illuminated pixels.\")\n\n # Timestamp and update the filename\n gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)\n ad.update_filename(suffix=sfx, strip=True)\n return adinputs\n","sub_path":"geminidr/core/primitives_preprocess.py","file_name":"primitives_preprocess.py","file_ext":"py","file_size_in_byte":64607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"132436485","text":"#Danilo Silva Babilius\t\tRA1800140\r\n#Diego de Almeida Saraiva\tRA1800054\r\n#Rodrigo Rodrigues da Silva\tRA1800409\r\n#Rogerio Alves Santos\t\tRA1800386\r\n#William Bruce Ogura\t\tRA1800114\r\n\r\nimport csv\r\n\r\n#Mensagem de Bem Vindo e Opcoes ao Usuario\r\ndef bemvindo():\r\n\tprint(\"Bem Vindo a Agenda\")\r\n\tprint(\"Selecione uma Opcao\")\r\n\tprint(\"1 Adicionar um novo contato\")\r\n\tprint(\"2 Listar os contatos da agenda\")\r\n\tprint(\"4 Apagar um contato\")\r\n\tprint(\"5 Buscar um contato\")\r\n\r\n \r\n#Funcoes do processo\r\ndef adicionar():\r\n\tprint(\"Adicionar um registro\")\r\n\tagenda = open(\"agendatelefonica.csv\",'a')\r\n\tnome = input(\"Nome do Contato:\")\r\n\ttelefone = input(\"Digite o telefone:\")\r\n\tprint(\"Contato salvo com nome:\",nome,\" e numero\",telefone)\r\n\tagenda.write(nome)\r\n\tagenda.write(\",\")\r\n\tagenda.write(telefone)\r\n\tagenda.write(\",\")\r\n\tagenda.write(\"\\n\")\r\n\tagenda.close()\r\n\t\r\n# Listar linhas da agenda\r\ndef numlinhas():\r\n\tarquivo = open(\"agendatelefonica.csv\", \"r\")\r\n\tn_linhas = sum(1 for linha in arquivo)\r\n\tarquivo.close()\r\n\treturn n_linhas\r\n\r\n\r\ndef listar():\r\n\tqtdlinhas = numlinhas()\r\n\tprint(\"Lista de Contatos\")\r\n\tagenda = open(\"agendatelefonica.csv\")\r\n\tnumero = 0\r\n\twhile numero < qtdlinhas:\r\n\t\tprint (agenda.readline())\r\n\t\tnumero = numero + 1\r\n\tprint(\"Listado correctamente\")\t\r\n\tagenda.close()\r\n\r\ndef deletar():\r\n with open(\"agendatelefonica.csv\",\"r\") as agenda:\r\n reader = csv.reader(agenda)\r\n data = list(reader)\r\n nome=str(\"\")\r\n while nome not in ([row[0] for row in data]):\r\n nome = input(\"Nome do contato a ser deletado: \")\r\n data.pop([row[0] for row in data].index(nome))\r\n with open(\"agendatelefonica.csv\",\"w\",newline=\"\") as file:\r\n writer = csv.writer(file)\r\n for row in data :\r\n writer.writerow(row)\r\n print(\"Contato removido com sucesso!!\")\r\n bemvindo()\r\ndef falha():\r\n\tprint(\"Opcao Incorreta\")\r\n\r\ndef encontrar(busca):\r\n agenda = open(\"agendatelefonica.csv\")\r\n lista = (agenda.readlines())\r\n nome = False\r\n for i in range (0,len(lista)):\r\n if busca in lista[i]:\r\n print(lista[i])\r\n nome = True\r\n if nome == False:\r\n print(\"Nome não encontrado\")\r\n","sub_path":"funcoes.py","file_name":"funcoes.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"266097516","text":"from django.contrib import admin\nfrom .models import Game\n# Register your models here.\n\n\nclass GameAdmin(admin.ModelAdmin):\n list_display = ('id', 'name', 'creator',)\n list_display_links = ('id', 'name', )\n list_filter = ('creator', )\n search_fields = ('name', )\n\n\nadmin.site.register(Game, GameAdmin)\n","sub_path":"games/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"223173512","text":"import argparse\nimport os\nimport shutil\nimport torch\nimport time\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.utils.data as data\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nfrom models.loss import LabelSmoothingCrossEntropy\nfrom thop import profile\nfrom torchstat import stat\nfrom ptflops import get_model_complexity_info\nimport torch.optim as optim\nfrom utils import WarmUpLR\n\nmy_model = resnext50_32x4d(num_classes=1000)\npre_model = torchvision.models.resnext50_32x4d(pretrained=True)\npre_dict = pre_model.state_dict()\nmy_model_dict = my_model.state_dict()\n\nneed_pre_dict = {k: v for k, v in pre_dict.items() if k in my_model_dict}\nmy_model_dict.update(need_pre_dict)\nmy_model.load_state_dict(my_model_dict)\nmodel = my_model\n\n\nprint(model)\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch scen classification training')\n\nparser.add_argument('--batch-size', type=int, default=32, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--test-batch-size', type=int, default=128, metavar='N',\n help='input batch size for testing (default: 256)')\nparser.add_argument('--epochs', type=int, default=600, metavar='N',\n help='number of epochs to train (default: 160)')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.1)')\nparser.add_argument('-warm', type=int, default=1, help='warm up training phase')\nparser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='SGD momentum (default: 0.9)')\nparser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=100, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--save', default='./log_aid20_r8', type=str, metavar='PATH',\n help='path to save prune model (default: current directory)')\nparser.add_argument('--arch', default='vgg', type=str,\n help='architecture to use')\nparser.add_argument('--depth', default=16, type=int,\n help='depth of the neural network')\n\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\nif not os.path.exists(args.save):\n os.makedirs(args.save)\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\n\npretrained_size = 288\n#UCM20\ntrain_means = [0.4858, 0.4915, 0.4518]\ntrain_stds= [0.1739, 0.1640, 0.1564]\n\ntest_means = [0.4827, 0.4886, 0.4492]\ntest_stds= [0.1731, 0.1630, 0.1545]\n\n\n#AID50\n# train_means = [0.3989, 0.4096, 0.3691]\n# train_stds= [0.1586, 0.1460, 0.1408]\n#\n# test_means = [0.3967, 0.4089, 0.3679]\n# test_stds= [0.1573, 0.1442, 0.1397]\n\n# #NWPU45_10\n# train_means = [0.3655, 0.3785, 0.3413]\n# train_stds= [0.1452, 0.1355, 0.1320]\n#\n# test_means = [0.3683, 0.3813, 0.3438]\n# test_stds= [0.1454, 0.1356, 0.1320]\n\n#NWPU45_20\n# train_means = [0.3655, 0.3785, 0.3413]\n# train_stds= [0.1452, 0.1355, 0.1320]\n#\n# test_means = [0.3684, 0.3812, 0.3438]\n# test_stds= [0.1454, 0.1356, 0.1320]\n\ntrain_transforms = transforms.Compose([\n transforms.Resize(pretrained_size),\n transforms.RandomRotation(270),\n transforms.RandomHorizontalFlip(0.5),\n transforms.RandomCrop(pretrained_size, padding = 10),\n transforms.ToTensor(),\n transforms.Normalize(train_means, train_stds)\n ])\n\ntest_transforms = transforms.Compose([\n transforms.Resize(pretrained_size),\n transforms.CenterCrop(pretrained_size),\n transforms.ToTensor(),\n transforms.Normalize(test_means, test_stds)\n ])\n\ntrain_data = datasets.ImageFolder(root = \"data/AID50/train/\",\n transform = train_transforms)\n\ntest_data = datasets.ImageFolder(root = \"data/AID50/test/\",\n transform = test_transforms)\n\ntrain_iterator = data.DataLoader(train_data,\n shuffle = True,\n batch_size = args.batch_size)\n\ntest_iterator = data.DataLoader(test_data,\n batch_size = args.batch_size)\n\n\nfrom torch.optim import lr_scheduler\ndevice = torch.device('cuda' if torch.cuda.is_available else 'cpu')\n\ncriterion = LabelSmoothingCrossEntropy()\n# criterion = torch.nn.CrossEntropyLoss()\n\nmodel = model.to(device)\ncriterion = criterion.to(device)\n# optimizer = torch.optim.Adam(model.parameters(), lr=LR, weight_decay=0.0001)\noptimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n# exp_lr_scheduler = lr_scheduler.StepLR(optimizer,step_size=100,gamma=0.1)\n\n\nn_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)\nprint('number of params:', n_parameters)\n\n\nif args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n best_prec1 = checkpoint['best_prec1']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {}) Prec1: {:f}\"\n .format(args.resume, checkpoint['epoch'], best_prec1))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\nfrom torchsummaryX import summary\n\ndef train(epoch):\n model.train()\n avg_loss = 0.\n train_acc = 0.\n for batch_idx, (data, target) in enumerate(train_iterator):\n if args.cuda:\n data, target = data.to(device), target.to(device)\n\n data, target = Variable(data), Variable(target)\n optimizer.zero_grad()\n output = model(data)\n # print(summary(model, data))\n # flops, params = profile(model, inputs=(data.to(device),))\n macs, params = get_model_complexity_info(model, (3,288,288), as_strings=True,\n print_per_layer_stat=True, verbose=True)\n # print(\"flops:.6f\", macs, \"params:.6f\", params, \"\\n\")\n print('{:<30} {:<8}'.format('Computational complexity: ', macs))\n print('{:<30} {:<8}'.format('Number of parameters: ', params))\n loss = criterion(output, target)\n avg_loss += loss.item()\n pred = output.data.max(1, keepdim=True)[1]\n train_acc += pred.eq(target.data.view_as(pred)).cpu().sum()\n loss.backward()\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n\n print('Train Epoch: {}/{}\\tTrain lr: {} [{}/{} ({:.1f}%)]\\tLoss: {:.6f}'.format(\n epoch,args.epochs,optimizer.param_groups[0]['lr'], batch_idx * len(data), len(train_iterator.dataset),\n 100. * batch_idx / len(train_iterator), loss.item()))\ndef test():\n model.eval()\n test_loss = 0\n correct = 0\n # import time\n # start = time.time()\n time_z = 0\n for data, target in test_iterator:\n\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data, volatile=True), Variable(target)\n\n with torch.no_grad():\n import time\n start = time.time()\n output = model(data)\n end = time.time()\n time_z = time_z + (end - start)\n # print(\"time: {}\".format((end - start)/len(data)))\n test_loss += F.cross_entropy(output, target, size_average=False).item() # sum up batch loss\n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n # end = time.time()\n # time = end - start\n test_loss /= len(test_iterator.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%), Time: {}\\n'.format(\n test_loss, correct, len(test_iterator.dataset),\n 100.00 * correct / len(test_iterator.dataset),\n time_z/len(test_iterator.dataset)\n\n ))\n return correct / float(len(test_iterator.dataset))\n\n\n\ndef save_checkpoint(state, is_best, filepath):\n torch.save(state, os.path.join(filepath, 'checkpoint.pth.tar'))\n if is_best:\n shutil.copyfile(os.path.join(filepath, 'checkpoint.pth.tar'), os.path.join(filepath, 'model_best.pth.tar'))\n\nbest_prec1 = 0.\nfor epoch in range(args.start_epoch, args.epochs):\n\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr * (0.5 ** (epoch // 10))\n if epoch == 0:\n lr = args.lr * 0.1\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n train(epoch)\n prec1 = test()\n is_best = prec1 > best_prec1\n best_prec1 = max(prec1, best_prec1)\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'best_prec1': best_prec1,\n 'optimizer': optimizer.state_dict(),\n 'cfg': model\n }, is_best, filepath=args.save)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#\n# import torch.nn.functional as F\n# def get_predictions(model, iterator):\n#\n# model.eval()\n#\n# images = []\n# labels = []\n# probs = []\n#\n# with torch.no_grad():\n#\n# for (x, y) in iterator:\n#\n# x = x.to(device)\n#\n# y_pred = model(x)\n#\n# y_prob = F.softmax(y_pred, dim = -1)\n# top_pred = y_prob.argmax(1, keepdim = True)\n#\n# images.append(x.cpu())\n# labels.append(y.cpu())\n# probs.append(y_prob.cpu())\n#\n# images = torch.cat(images, dim = 0)\n# labels = torch.cat(labels, dim = 0)\n# probs = torch.cat(probs, dim = 0)\n#\n# return images, labels, probs\n# images, labels, probs = get_predictions(model, test_iterator)\n#\n# pred_labels = torch.argmax(probs, 1)\n#\n#\n# def plot_confusion_matrix(labels, pred_labels, classes):\n# fig = plt.figure(figsize=(50, 50))\n# ax = fig.add_subplot(1, 1, 1)\n# cm = confusion_matrix(labels, pred_labels)\n# bm = ConfusionMatrixDisplay(cm, classes)\n# bm.plot(values_format='d', cmap='Blues', ax=ax)\n# fig.delaxes(fig.axes[1]) # delete colorbar\n# plt.xticks(rotation=90)\n# plt.xlabel('Predicted Label', fontsize=80)\n# plt.ylabel('True Label', fontsize=80)\n#\n#\n# from sklearn.metrics import confusion_matrix\n# from sklearn.metrics import ConfusionMatrixDisplay\n# import matplotlib.pyplot as plt\n# import os\n# classes = os.listdir('data/train/')\n#\n# plot_confusion_matrix(labels, pred_labels, classes)","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":11499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"79321154","text":"from threading import Thread\n\ndef threadmain():\n global num\n num=20\n # print()\n print(\"子线程\", num ,id(num))\n\n\n\n\nif __name__ == \"__main__\":\n num = 10\n thread = Thread(target=threadmain)\n thread.start()\n thread.join()\n print(\"finish\")\n print(\"主线程\", num, id(num))\n\n\n# 在函数内部赋值的局部变量 不能使用global访问到\n# 多个线程可以共享主线程数据\n# ","sub_path":"PythonAdvance/day0219/demo28.py","file_name":"demo28.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"562327231","text":"from django.conf import settings\nfrom django.utils import translation, encoding\n\nimport pytest\nfrom mock import Mock, patch\nfrom nose.tools import eq_\nfrom pyquery import PyQuery as pq\nfrom django.utils.translation import activate\n\nfrom olympia import amo, sharing\nfrom olympia.sharing.views import share as share_view\nfrom olympia.amo.tests import BaseTestCase\nfrom olympia.sharing.forms import ShareForm\nfrom olympia.sharing.helpers import sharing_box\nfrom olympia.users.models import UserProfile\n\n\npytestmark = pytest.mark.django_db\n\n\nclass SharingHelpersTestCase(BaseTestCase):\n fixtures = ['base/addon_3615']\n\n def test_sharing_box(self):\n request = Mock()\n request.user = UserProfile()\n request.APP = amo.FIREFOX\n ctx = {'request': request,\n 'APP': request.APP,\n 'LANG': translation.get_language()}\n\n doc = pq(sharing_box(ctx))\n assert doc.html()\n assert doc('li').length == len(sharing.SERVICES_LIST)\n\n # Make sure services are in the right order.\n for i in range(len(sharing.SERVICES_LIST)):\n expected = sharing.SERVICES_LIST[i].shortname\n assert doc('li').eq(i).attr('class') == expected\n assert doc('li a').eq(i).attr('target') in ('_blank', '_self'), (\n 'Sharing link target must either be blank or self.')\n\n\ndef test_services_unicode():\n u = u'\\u05d0\\u05d5\\u05e1\\u05e3'\n d = dict(title=u, url=u, description=u)\n for service in sharing.SERVICES_LIST:\n if service.url:\n service.url.format(**d)\n # This does not work since Python tries to use ascii to decode the string.\n # d = dict((k, encoding.smart_str(v)) for k, v in d.items())\n # for service in sharing.SERVICES_LIST:\n # if service.url:\n # service.url.format(**d)\n\n\ndef test_share_view():\n u = u'\\u05d0\\u05d5\\u05e1\\u05e3'\n s = encoding.smart_str(u)\n request, obj = Mock(), Mock()\n request.GET = {'service': 'twitter'}\n obj.get_url_path.return_value = u\n share_view(request, obj, u, u)\n obj.get_url_path.return_value = s\n share_view(request, obj, s, s)\n\n\n@patch.object(settings, 'SITE_URL', 'http://test')\ndef test_share_form():\n form = ShareForm({\n 'title': 'title',\n 'url': '/path/to/nowhere/',\n 'description': 'x' * 250 + 'abcdef',\n })\n form.full_clean()\n eq_(form.cleaned_data['description'], 'x' * 247 + '...')\n assert form.cleaned_data['url'].startswith('http'), (\n \"Unexpected: URL not absolute\")\n\n\ndef test_get_services_in_en_locale():\n # The order is the same as the order of sharing.SERVICES_LIST\n l = ['facebook', 'twitter', 'gplus', 'Reddit', 'Tumblr']\n assert l == [s.shortname for s in sharing.get_services()]\n\n\ndef test_get_services_in_ja_locale():\n\n testo = sharing.LOCALSERVICE1\n testo.shortname = 'translated-localservice1'\n\n expected = [\n 'facebook',\n 'twitter',\n 'gplus',\n 'Reddit',\n 'Tumblr',\n 'translated-localservice1']\n\n with patch.object(sharing, 'LOCALSERVICE1', testo):\n old_locale = translation.get_language()\n try:\n activate('ja')\n assert expected == [s.shortname for s in sharing.get_services()]\n finally:\n activate(old_locale)\n","sub_path":"src/olympia/sharing/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"505670484","text":"import numpy as np\nimport datetime\nimport shutil\nimport urllib.request as request\nfrom contextlib import closing\nfrom netCDF4 import Dataset\nfrom scipy.interpolate import griddata\nfrom scipy.optimize import minimize\nfrom scipy.stats import pearsonr\nfrom scipy.linalg import expm\nimport glob\nimport struct\nimport pandas as pd\nfrom scipy.stats import linregress\nimport os\nimport warnings\nwarnings.filterwarnings('ignore')\n\ndef make_npstere_grid(boundinglat,lon_0,grid_res=25e3):\n import pyproj as proj\n p = proj.Proj('+proj=stere +R=6370997.0 +units=m +lon_0='+str(float(lon_0))+' +lat_ts=90.0 +lat_0=90.0',\\\n preserve_units=True)\n llcrnrlon = lon_0 - 45\n urcrnrlon = lon_0 + 135\n y_ = p(lon_0,boundinglat)[1]\n llcrnrlat = p(np.sqrt(2.)*y_,0.,inverse=True)[1]\n urcrnrlat = llcrnrlat\n llcrnrx,llcrnry = p(llcrnrlon,llcrnrlat)\n p = proj.Proj('+proj=stere +R=6370997.0 +units=m +lon_0='+str(float(lon_0))+' +lat_ts=90.0 +lat_0=90.0 +x_0='\\\n +str(-llcrnrx)+' +y_0='+str(-llcrnry), preserve_units=True)\n urcrnrx,urcrnry = p(urcrnrlon,urcrnrlat)\n\n nx = int(urcrnrx/grid_res)+1\n ny = int(urcrnry/grid_res)+1\n dx = urcrnrx/(nx-1)\n dy = urcrnry/(ny-1)\n\n x = dx*np.indices((ny,nx),np.float32)[1,:,:]\n y = dy*np.indices((ny,nx),np.float32)[0,:,:]\n lon,lat = p(x,y,inverse=True)\n return lon,lat,x,y,p\n\ndef read_SIE(fmin,fmax): \n SIEs = {}\n SIEs_dt = {}\n SIEs_trend = {}\n with closing(request.urlopen(sie_ftp+'/north/monthly/data/N_09_extent_v3.0.csv')) as r:\n with open(home+'/DATA/N_09_extent_v3.0.csv', 'wb') as f:\n shutil.copyfileobj(r, f)\n with closing(request.urlopen(sie_ftp+'/seaice_analysis/N_Sea_Ice_Index_Regional_Monthly_Data_G02135_v3.0.xlsx')) as r:\n with open(home+'/DATA/N_Sea_Ice_Index_Regional_Monthly_Data_G02135_v3.0.xlsx', 'wb') as f:\n shutil.copyfileobj(r, f)\n xls = pd.ExcelFile(home+'/DATA/N_Sea_Ice_Index_Regional_Monthly_Data_G02135_v3.0.xlsx',engine='openpyxl')\n SIEs['Pan-Arctic'] = (np.genfromtxt(home+'/DATA/N_09_extent_v3.0.csv',delimiter=',').T[4][1:])[:fmax-1979+1]\n SIEs['Beaufort'] = (np.array(np.array(pd.read_excel(xls, 'Beaufort-Extent-km^2')['September'])[3:-1]/1e6,dtype='float64')[:fmax-1979+1]).round(3)\n SIEs['Chukchi'] = (np.array(np.array(pd.read_excel(xls, 'Chukchi-Extent-km^2')['September'])[3:-1]/1e6,dtype='float64')[:fmax-1979+1]).round(3)\n \n for tag in SIEs:\n trend = np.zeros((fmax-(fmin-1)+1,2))\n dt = np.zeros((fmax-(fmin-1)+1,fmax-1979+1))\n for year in range(fmin-1,fmax+1):\n n = year-1979+1\n reg = linregress(np.arange(n),SIEs[tag][range(n)])\n lineT = (reg[0]*np.arange(n)) + reg[1]\n trend[year-(fmin-1),0] = reg[0]\n trend[year-(fmin-1),1] = reg[1]\n dt[year-(fmin-1),range(n)] = SIEs[tag][range(n)]-lineT\n SIEs_trend[tag] = trend\n SIEs_dt[tag] = dt.round(3)\n return SIEs,SIEs_dt,SIEs_trend\n\ndef readNSIDC(fmin,fmax):\n dimX = 448\n dimY = 304\n SIC = {}\n SIC['lat'] = (np.fromfile(home+\"/misc/psn25lats_v3.dat\",dtype=' 1987) & (year < 1992):\n sat = 'f08'\n elif (year > 1991) & (year < 1996):\n sat = 'f11'\n elif (year > 1995) & (year < 2008):\n sat = 'f13'\n elif year > 2007:\n sat = 'f17'\n files = glob.glob(home+'/DATA/nt_'+str(year)+'06*.1_n.bin')\n if len(files) == 0:\n with closing(request.urlopen(sic_ftp2+'/nt_'+str(year)+'06_'+sat+'_v1.1_n.bin')) as r:\n with open(home+'/DATA/nt_'+str(year)+'06_'+sat+'_v1.1_n.bin', 'wb') as f:\n shutil.copyfileobj(r, f)\n icefile = open(glob.glob(home+'/DATA/nt_'+str(year)+'06*.1_n.bin')[0], 'rb')\n contents = icefile.read()\n icefile.close()\n s=\"%dB\" % (int(dimX*dimY),)\n z=struct.unpack_from(s, contents, offset = 300)\n monthly = (np.array(z).reshape((dimX,dimY)))/250\n monthly[monthly>1] = np.nan\n if year <= 1987:\n hole=84.5\n elif (year > 1987) & (year < 2008):\n hole=87.2\n else:\n hole=89.2\n phole = np.nanmean(monthly[(SIC['lat'] > hole-0.5) & (SIC['lat'] < hole)]) #calculate the mean 0.5 degrees around polar hole\n filled = np.ma.where((SIC['lat'] >= hole-0.5), phole, monthly)\n data_regrid[:,:,k] = griddata((SIC['x'].ravel(),SIC['y'].ravel()),filled.ravel(),\\\n (SIC['xr'],SIC['yr']),'linear')\n k += 1\n SIC['data'] = data_regrid\n return SIC\n\ndef detrend(dataset,fmin,fmax):\n import itertools\n for year in range(fmin,fmax+1):\n n = year-1979+1\n data = dataset['data'][:,:,range(n)]\n X = data.shape[0] ; Y = data.shape[1] ; T = data.shape[2]\n detrended = np.zeros(data.shape)*np.nan\n trend = np.zeros((X,Y,2))*np.nan\n for i,j in itertools.product(range(X),range(Y)):\n if ~np.isnan(data[i,j,range(T)]).all():\n reg = linregress(np.arange(T),data[i,j,range(T)])\n lineT = (reg[0]*np.arange(T)) + reg[1]\n trend[i,j,0] = reg[0]\n trend[i,j,1] = reg[1]\n detrended[i,j,range(T)]=data[i,j,range(T)]-lineT\n\n dataset['dt_'+str(year)] = detrended\n dataset['trend_'+str(year)] = trend\n\ndef networks(dataset,fmin,fmax):\n from CNs_backup.backups import CN_forecast as CN\n for year in range(fmin,fmax+1):\n network = CN.Network(data=dataset['dt_'+str(year)])\n CN.Network.tau(network, 0.01)\n CN.Network.area_level(network,latlon_grid=False)\n CN.Network.intra_links(network, area=dataset['psar'])\n dataset['nodes_'+str(year)] = network.V\n dataset['anoms_'+str(year)] = network.anomaly\n\ndef forecast(fmin,fmax):\n regions = ['Pan-Arctic','Beaufort','Chukchi']\n GPR = {}\n l_init = [np.logspace(-7,2,20)[11],np.logspace(-7,2,20)[0],3.125433e+10]#np.logspace(-7,2,20)[9]]\n sigma_init = [np.logspace(-3,9,20)[4],np.logspace(-3,9,20)[15],40221.26298973]#np.logspace(-3,9,20)[3]]\n for k in range(3):\n fmean = np.zeros(fmax-fmin+1)\n fvar = np.zeros(fmax-fmin+1)\n fmean_rt = np.zeros(fmax-fmin+1)\n for year in range(fmin,fmax+1):\n y = np.asarray([SIEs_dt[regions[k]][year-(fmin-1)-1,range(year-1979)]]).T #n x 1\n n = len(y)\n X = []\n for area in SIC['anoms_'+str(year)]:\n r,p = pearsonr(y[:,0],SIC['anoms_'+str(year)][area][:-1])\n if r>0:\n X.append(SIC['anoms_'+str(year)][area]) \n X = np.asarray(X).T #n x N\n Xs = np.asarray([X[-1,:]])\n X = X[:-1,:]\n\n M = np.abs(np.cov(X, rowvar=False, bias=True))\n np.fill_diagonal(M,0)\n np.fill_diagonal(M,-np.sum(M,axis=0))\n\n def MLII(hyperparameters): #Empirical Bayesian technique for optimisation of hyperparameters\n ℓ = np.exp(hyperparameters[0]) ; σn_tilde = np.exp(hyperparameters[1])\n try:\n Σ_tilde = expm(ℓ*M)\n L_tilde = np.linalg.cholesky(np.linalg.multi_dot([X,Σ_tilde,X.T]) + np.eye(n)*σn_tilde)\n A_tilde = np.linalg.solve(L_tilde.T,np.linalg.solve(L_tilde,y))\n σf = (np.dot(y.T,A_tilde)/n)[0][0]\n σn = σf*σn_tilde\n Σ = σf * expm(ℓ*M)\n L = np.linalg.cholesky(np.linalg.multi_dot([X,Σ,X.T]) + np.eye(n)*σn)\n α = np.linalg.solve(L.T,np.linalg.solve(L,y))\n nlML = np.dot(y.T,α)/2 + np.log(L.diagonal()).sum() + n*np.log(2*np.pi)/2\n\n dKdℓ = np.linalg.multi_dot([X,np.dot(M,Σ),X.T]) + np.eye(n)*σn\n dKdσ_tilde = np.linalg.multi_dot([X,Σ,X.T]) + np.eye(n)*σf\n\n dKdθ1 = ((np.trace(np.linalg.solve(L.T,np.linalg.solve(L,dKdℓ)))/2 - np.linalg.multi_dot([α.T,dKdℓ,α])/2))[0][0]\n dKdθ2 = ((np.trace(np.linalg.solve(L.T,np.linalg.solve(L,dKdσ_tilde)))/2 - np.linalg.multi_dot([α.T,dKdσ_tilde,α])/2))[0][0]\n\n except (np.linalg.LinAlgError,ValueError,OverflowError) as e:\n nlML = np.inf\n dKdθ1 = np.inf ; dKdθ2 = np.inf\n return np.squeeze(nlML), np.asarray([dKdθ1,dKdθ2])\n\n #θ = minimize(MLII,x0=[np.log(l_init[k]),np.log(sigma_init[k])],\\\n # method='CG',jac=True,options={'disp':False}).x\n\n #ℓ = np.exp(θ[0]) ; σn_tilde = np.exp(θ[1])\n ℓ = l_init[k] ; σn_tilde = sigma_init[k]\n Σ_tilde = expm(ℓ*M)\n L_tilde = np.linalg.cholesky(np.linalg.multi_dot([X,Σ_tilde,X.T]) + np.eye(n)*σn_tilde)\n A_tilde = np.linalg.solve(L_tilde.T,np.linalg.solve(L_tilde,y))\n σf = (np.dot(y.T,A_tilde)/n)[0][0]\n σn = σf*σn_tilde\n Σ = σf * expm(ℓ*M)\n L = np.linalg.cholesky(np.linalg.multi_dot([X,Σ,X.T]) + np.eye(n)*σn)\n α = np.linalg.solve(L.T,np.linalg.solve(L,y))\n KXXs = np.linalg.multi_dot([X,Σ,Xs.T])\n KXsXs = np.linalg.multi_dot([Xs,Σ,Xs.T]) + σn\n v = np.linalg.solve(L,KXXs)\n\n fmean[year-fmin] = (np.dot(KXXs.T,α)[0][0]).round(3)\n fvar[year-fmin] = ((KXsXs - np.dot(v.T,v))[0][0]).round(3)\n lineT = (np.arange(year-1979+1)*SIEs_trend[regions[k]][year-(fmin-1)-1,0]) + SIEs_trend[regions[k]][year-(fmin-1)-1,1]\n fmean_rt[year-fmin] = (fmean[year-fmin] + lineT[-1]).round(3)\n\n GPR[regions[k]+'_fmean'] = fmean\n GPR[regions[k]+'_fvar'] = fvar\n GPR[regions[k]+'_fmean_rt'] = fmean_rt\n \n return GPR\n\ndef skill(fmin,fmax):\n regions = ['Pan-Arctic','Beaufort','Chukchi']\n skill_rt = []\n skill_dt = []\n dt_obs = []\n for k in range(3):\n dt = []\n for t in range(fmin,fmax+1):\n n = t - 1979\n dt.append(SIEs_dt[regions[k]][t-(fmin-1),n])\n dt_obs.append(dt)\n forecast_rt = GPR[regions[k]+'_fmean_rt']\n obs_rt = SIEs[regions[k]][fmin-1979:]\n a = np.mean((obs_rt-forecast_rt)**2)\n b = np.mean((obs_rt-np.nanmean(obs_rt))**2)\n skill_rt.append((1 - (a/b)).round(3))\n\n forecast_dt = GPR[regions[k]+'_fmean']\n c = np.mean((dt-forecast_dt)**2)\n d = np.mean((dt-np.nanmean(dt))**2)\n skill_dt.append((1 - (c/d)).round(3))\n return skill_rt,skill_dt,dt_obs\n \n\nhome = os.getcwd()\nif os.path.exists(home+'/DATA')==False:\n os.mkdir(home+'/DATA')\n os.chmod(home+'/DATA',0o0777)\nsie_ftp = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02135'\nsic_ftp1 = 'ftp://sidads.colorado.edu/DATASETS/nsidc0081_nrt_nasateam_seaice/north'\nsic_ftp2 = 'ftp://sidads.colorado.edu/DATASETS/nsidc0051_gsfc_nasateam_seaice/final-gsfc/north/monthly'\n\nymax = int(datetime.date.today().year)\nfmin = int(input('Please specify first year you would like to forecast (must be > 1980):\\n'))\nfmax = int(input('Please specify last year you would like to forecast (must be < '+str(ymax)+'):\\n'))\nif fmin < 1981:\n fmin = 1981\nif fmax > ymax-1:\n fmax = ymax-1\n\nprint('Downloading and reading data...')\nSIEs,SIEs_dt,SIEs_trend = read_SIE(fmin,fmax)\nSIC = readNSIDC(fmin,fmax)\nprint('Processing data...')\ndetrend(SIC,fmin,fmax)\nnetworks(SIC,fmin,fmax)\nprint('Running forecast...')\nGPR = forecast(fmin,fmax)\nskill_rt,skill_dt,dt_obs = skill(fmin,fmax)\n\nyears = np.arange(fmin,fmax+1).tolist()\nyears.append('Skill')\n\ndef prep(data,skill=None):\n if type(data)!=list:\n data = data.tolist()\n if skill is not None:\n data.append(skill)\n else:\n data.append('')\n return data\n\ncolumns1 = ['Pan-Arctic$_o$','Pan-Arctic$_f$','Pan-Arctic$_f$ unc','Beaufort$_o$','Beaufort$_f$','Beaufort$_f$ unc','Chukchi$_o$','Chukchi$_f$','Chukchi$_f$ unc']\ncolumns2 = ['Pan-Arctic$_o$','Pan-Arctic$_f$','Beaufort$_o$','Beaufort$_f$','Chukchi$_o$','Chukchi$_f$']\ndata_dt = list(zip(prep(dt_obs[0]),prep(GPR['Pan-Arctic_fmean'],skill_dt[0]),prep(np.sqrt(GPR['Pan-Arctic_fvar']).round(3)),prep(dt_obs[1]),prep(GPR['Beaufort_fmean'],\\\n skill_dt[1]),prep(np.sqrt(GPR['Beaufort_fvar']).round(3)),prep(dt_obs[2]),prep(GPR['Chukchi_fmean'],skill_dt[2]),prep(np.sqrt(GPR['Chukchi_fvar']).round(3))))\ndf_dt = pd.DataFrame(data_dt, index=years, columns=columns1)\n\ndata_rt = list(zip(prep(SIEs['Pan-Arctic'][fmin-1979:]),prep(GPR['Pan-Arctic_fmean_rt'],skill_rt[0]),prep(SIEs['Beaufort'][fmin-1979:]),\\\n prep(GPR['Beaufort_fmean_rt'],skill_rt[1]),prep(SIEs['Chukchi'][fmin-1979:]),prep(GPR['Chukchi_fmean_rt'],skill_rt[2])))\ndf_rt = pd.DataFrame(data_rt, index=years, columns=columns2)\n\ndf_dt.to_csv(home+'/July1st_detrended_forecasts_'+str(fmin)+'-'+str(fmax)+'.csv')\ndf_rt.to_csv(home+'/July1st_forecasts_with_trend_'+str(fmin)+'-'+str(fmax)+'.csv')\n\ncleanup = input('Would you like to remove all the downloaded data files to save disk space? y n:\\n')\nif cleanup == 'y':\n shutil.rmtree(home+'/DATA',ignore_errors=True)\n\n\n\n\n\n\n\n\n","sub_path":"north/retrospective_forecasts/July1st_retro.py","file_name":"July1st_retro.py","file_ext":"py","file_size_in_byte":15013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"156630572","text":"import boto3\nimport pandas as pd\nimport json\n\n\ndef csv2s3(csv_path, bucket_name):\n file = pd.read_csv(csv_path, sep=';')\n records_list = {}\n records_list['demo_metadata'] = json.loads(file.to_json(orient='records'))\n metadata = json.dumps(records_list)\n s3 = boto3.resource('s3')\n bucket = s3.Bucket(bucket_name)\n bucket.put_object(Key='metadata/metadata.json', Body=metadata)\n\n\nif __name__ == '__main__':\n csv2s3(r\"C:\\Users\\RSTAUNTO\\Desktop\\Python\\projects\\rightcall_robin\\data\\csvs\\odigo4isRecorder_20181121-123619.csv\",\n \"demo.rightcall\")\n","sub_path":"other_scripts/csv2s3.py","file_name":"csv2s3.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"108178926","text":"# utility script to print the Bazel version number to the command line\n# bazel.version_number is only available via repository_rule (why?)\n# so we create a dummy repository.\n\ndef _impl(repository_ctx):\n repository_ctx.file('foo.sh','')\n repository_ctx.file('BUILD','sh_binary(name=\"foo\",srcs=[\"foo.sh\"],visibility=[\"//visibility:public\"])')\n # the actual work happens here\n print(\"Bazel version: \" + native.bazel_version) \n\nbazel_version = repository_rule(\n\timplementation=_impl,\n\tlocal=True,\n\t)","sub_path":"bazel_version.bzl","file_name":"bazel_version.bzl","file_ext":"bzl","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"504381514","text":"# Class for reading and storing rate data from different locations\n# Created based on read_EIRENE.py by holm10 on Jan 27 2020\n# Changelog:\n# 200127 - Rewrote read_EIRENE into a class\n\n\n\nclass RATE_DATA:\n def __init__(self,amjuel='amjuel.tex',hydhel='hydhel.tex',h2vibr='h2vibr.tex',ADAS='ich0-1.dat',UE='ehr1.dat',path='.'):\n ''' Sets up an atomic and molecular reaction rate database\n __init__(*keys)\n \n Optional parameters\n amjuel ('amjuel.tex') - Path to AMJUEL reaction rates relative to the CWD, EIRENE manual tex format\n hydhel ('hydhel.tex') - Path to HYDHEL reaction rates relative to the CWD, EIRENE manual tex format\n h2vibr ('h2vibr.tex') - Path to H2VIBR reaction rates relative to the CWD, EIRENE manual tex format\n ADAS ('ich0-1.dat') - Path to ADAS reaction rates data relative to the CWD, ADAS-04 format\n UE ('ehr1.dat') - Path to UEDGE rates relative to CWD, UEDGE loglog Te,ne fits\n path ('.') - Path if different from CWD\n \n '''\n\n\n # Create a loop that reads the EIRENE tex files, compatible with Jan 2020 versions\n self.reactions= { 'AMJUEL': {'settings' : [500, ['b0','0','a0','h0','p0','k0'], ['a0','h0','p0','k0'], 45], 'path' : amjuel },\n 'HYDHEL': {'settings' : [150, ['b0','0','a0','h0'], ['a0','h0'], 80], 'path' : hydhel },\n 'H2VIBR': {'settings' : [0, ['b0','0','a0','h0'], ['a0','h0'], 20], 'path' : h2vibr },\n 'UE': {},\n 'ADAS': {},\n }\n # For each data point, add the reactions to the appropriate dictionary\n for rate in ['AMJUEL','H2VIBR','HYDHEL']:\n self.read_EIRENE(self.reactions[rate]['path'],self.reactions[rate],self.reactions[rate]['settings'],path=path)\n self.read_ADAS(ADAS,self.reactions['ADAS'])\n self.read_UE(UE,self.reactions['UE'])\n\n\n\n\n def read_EIRENE(self,fname,reactions,settings,path='.'):\n ''' Reads the LaTeX version of the EIRENE input data file and stores cofficients to reactions\n read_EIRENE(fname,reactions,settings,*keys)\n \n fname - File name to open from path \n reactions - Dictionary where to store the read reaction rate coefficients\n settings - List of settings for reading the rate data\n settings[0] - Lines to omit from start of file\n settings[1] - Non-coefficient rate entries \n settings[2] - Data rate entry\n settings[3] - Lines to omit from end of file\n \n Optional parameters\n path ('.') - Path if different from CWD\n '''\n from numpy import zeros\n from csv import reader\n # Script parsing Hydhel.tex\n lst,book=[],[]\n # Open reader object\n rd=reader(open('{}/{}'.format(path,fname),'rt'),delimiter=' ')\n # Store book in list\n for row in rd:\n lst.append(row)\n # Strip all empty arrays\n lst=list(filter(None,lst))\n\n for row in lst:\n book.append(list(filter(None,row)))\n\n i=settings[0] # Read from top\n while True:\n # Loop through rows looking for reactions\n if book[i][0]==\"Reaction\":\n reaction=book[i][1] # Store reaction name\n i+=1\n # Loop through reaction looking for coeffs\n while True:\n if book[i][0] not in settings[1]:\n i+=1\n # break\n # Break if wrong fits\n elif book[i][0] in settings[2]:\n break\n # We are in a T-fit\n elif book[i][0]=='b0':\n coeff=[]\n # Parse the next three lines\n for j in range(3):\n for k in range(3):\n coeff.append(float(book[i+j][1+k*2].replace(',','').replace('D','E')))\n i+=1\n reactions[reaction]=coeff\n break\n # Wea re in a (T,E)-fit\n elif book[i][0]=='0':\n coeff=zeros((9,9))\n # Parse the whole data block in one\n for j in range(3):\n for k in range(9):\n for l in range(3):\n coeff[k,j*3+l]=float(book[i+k+j*9+j*2][l+1].replace('D','E'))\n # Store the coefficients\n # TODO: figure out better way to kill off ne,T fits??\n if reaction not in ['2.2.14','2.0l2']:\n reactions[reaction]=coeff\n \n i+=9+3*3+2*2 # Set counter after block\n break\n if i>=len(book)-settings[3]: # Omit last lines \n break\n\n i+=1\n if i>=len(book)-settings[3]: # Omit last lines\n break\n\n\n def read_ADAS(self,fname,reactions,path='.'):\n ''' Reads the ADAS file fname stores cofficients to reactions\n read_ADAS(fname,reactions,*keys)\n \n fname - File name to open from path \n reactions - Dictionary where to store the read reaction rate coefficients\n \n Optional parameters\n path ('.') - Path if different from CWD\n '''\n # Constants turning the values into \n cm1=1/8065.6\n kB=8.621738e-5\n # The ADAS reading routine follows the ADAS documentation: only parameters relevant to the CRM are extracted\n with open('{}/{}'.format(path,fname)) as f: # Open the file\n l=f.readline() # Discard data\n l=f.readline().split() # Read first data line\n # Loop through energy level data\n while l[0]!='-1':\n reactions['E'+l[0]]=float(l[4])*cm1 # Store as eV in reactions['E']\n l=f.readline().split()\n # Read and store temperature point data\n l=f.readline().split()\n reactions['T']=[float(x.replace('+','e+').replace('-','e-'))*kB for x in l[2:]]\n # Read first rate line\n l=f.readline()\n # Read all unspecified data\n while l[0]==' ':\n if l.strip()=='-1': break\n [ul,ll]=l[:8].split()\n l=l[8:]\n l=[l[i:i+8].strip() for i in range(0,len(l),8)][:-1]\n # Store excitation and relaxation data\n reactions[ul+'-'+ll]=float(l[0][:-3]+'e'+l[0][-3:])\n reactions[ll+'-'+ul]=[float(x[:-3]+'e'+x[-3:]) for x in l[1:len(reactions['T'])+1]]\n l=f.readline()\n\n\n def read_UE(self,fname,reactions,path='.',datalist=['IONIZ','REC','IONIZRAD','RECRAD']):\n ''' Reads the UEDGE rate file fname stores cofficients to reactions\n read_UE(fname,reactions,*keys)\n \n fname - File name to open from path \n reactions - Dictionary where to store the read reaction rate coefficients\n \n Optional parameters\n path ('.') - Path if different from CWD\n datalist (['IONIZ'],['REC']) - List of names to give to blocks in rate data file.\n All blocks are read in consecutive order and stores\n with the corresponding datalist entry as their database.\n '''\n\n from numpy import array,transpose\n with open('{}/{}'.format(path,fname)) as f: # Open the file\n # Read the ionizationa nd recombination data (1st and 2nd blocks in ehr1.dat)\n for k in datalist:\n reactions[k]=[]\n l=f.readline() # Discard header\n # Loop through data blocks\n for i in range(15): # Density points\n buff=[] # Buffer\n for j in range(12): # Temperature points\n l=f.readline().split() \n # Arrange temperature points into 1D array\n if j not in [0,11]:\n for x in l:\n if 'E' not in x: x=x.replace('-','E-').replace('+','E+')\n buff.append(float(x))\n # Append temperature points to reactions['IONIZ/REC'] for each density point\n reactions[k].append(buff)\n reactions[k]=transpose(array(reactions[k])) # Convert 2D list into array(T,n)\n\n def get_coeff(self,database,reaction):\n ''' Returns the cofficients of reaction in database\n get_coeff(database,reaction)\n \n database - Database in which to look for reaction (string)\n reaction - Reaction ID to retrieve from database (string)\n '''\n\n return(self.reactions[database][reaction])\n\n\n\n \n\n\n\n","sub_path":"CRUM/ratedata.py","file_name":"ratedata.py","file_ext":"py","file_size_in_byte":9338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"201916297","text":"\ndef getExecFunction(rg_intentName, rg_filterData):\n import boto3\n import json\n from boto3.dynamodb.conditions import Key, Attr\n # Get the service resource.\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('intentSlotTable')\n response = table.query(\n KeyConditionExpression=Key('Intent').eq(rg_intentName) & Key('Slot').eq(rg_filterData)\n #FilterExpression=Attr('Slot').eq(rg_filterData)\n )\n items = response['Items'] #['Slot']\n if len(items) == 1:\n rtngetExecFunction = items[0]['execFunction']\n else:\n rtngetExecFunction = \"Sorry, I did not get that. Please try later.\"\n return rtngetExecFunction\n\nprint(getExecFunction('backupIntent','stage'))\n","sub_path":"NewScripts/Working/getValueFromDynamoDB.py","file_name":"getValueFromDynamoDB.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"371005565","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nimport time\r\nfrom selenium import webdriver\r\nimport logging\r\nimport numpy as np\r\n\r\n\r\ndef LOG(msg):\r\n logging.basicConfig(filename='html_file.txt',filemode='w+',level=logging.INFO,format='%(asctime)s %(message)s')\r\n logging.info(msg)\r\n\r\n\r\n\r\nurl='https://www.naukri.com/data-scientist-python-jobs-in-bengaluru?k=data%20scientist%20python&l=bengaluru'\r\n\r\n\r\npage=requests.get(url)\r\n#print(page.text)\r\n\r\n\r\ndriver=webdriver.Chrome(r'D:\\ML\\Project_1\\chromedriver.exe')\r\ndriver.get(url)\r\n\r\nsoup = BeautifulSoup(driver.page_source,'html5lib')\r\n#LOG(soup.prettify())\r\n\r\ndriver.close()\r\n\r\n\r\ndf=pd.DataFrame(columns=['Title','Company','Ratings','Reviews'])\r\n\r\nresults = soup.find(class_='list')\r\n\r\n#LOG(results)\r\n\r\njob_elems = results.find_all('article',class_='jobTuple bgWhite br4 mb-8')\r\n#LOG(job_elems)\r\n\r\n#skill_elems=results.find_all('ul',class_='tags has-description')\r\n\r\n#LOG(skill_elems)\r\n\r\njob_title=[]\r\ncompany=[]\r\nrequirements=[]\r\nreviews=[]\r\n\r\nfor job_elem in job_elems:\r\n job_title.append(job_elem.find('a',class_='title fw500 ellipsis').get('title'))\r\n company.append(job_elem.find('a', class_='subTitle ellipsis fleft').get('title'))\r\n requirement=job_elem.find(class_='job-description fs12 grey-text')\r\n requirements.append(requirement.text)\r\n review=job_elem.find(class_='starRating fleft dot')\r\n if review is None:\r\n reviews.append('NA')\r\n else:\r\n reviews.append(review.text)\r\n #df.append({})\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#LOG(job_title)\r\n#LOG(company)\r\n#LOG(requirements)\r\n#LOG(reviews)\r\n\r\ndata=dict()\r\n\r\ndata={'Title':job_title,'Company':company,'Ratings':reviews}\r\n\r\ndf=pd.DataFrame(data=data)\r\n\r\ndf.drop\r\n\r\n\r\nLOG(df.head())\r\n\r\ndf.to_csv('data.csv')\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"beautiful_soap_demo/bs_demo.py","file_name":"bs_demo.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"50883241","text":"#------------------------------------------------------------------------------\n#\n# SWE v1.0 namespace\n#\n# Project: XML Metadata Handling\n# Authors: Martin Paces \n#\n#-------------------------------------------------------------------------------\n# Copyright (C) 2013 EOX IT Services GmbH\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies of this Software or works derived from this Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#-------------------------------------------------------------------------------\n\nfrom lxml.builder import ElementMaker\nfrom xml_utils import nn\n\n#------------------------------------------------------------------------------\n# namespace\n\nNS = \"http://www.opengis.net/swe/1.0\"\nNS_MAP = {\"swe\": NS}\n\n#------------------------------------------------------------------------------\n# element maker\n\nE = ElementMaker(namespace=NS, nsmap=NS_MAP)\n\n#------------------------------------------------------------------------------\n# predefined fully qualified names\n\n# attributes\n\n# elements\n\nDataRecord = nn(NS, 'DataRecord')\nAllowedValues = nn(NS, 'AllowedValues')\nfield = nn(NS, 'field')\nQuantity = nn(NS, 'Quantity')\ndescription = nn(NS, 'description')\nnilValues = nn(NS, 'nilValues')\nuom = nn(NS, 'uom')\nconstraint = nn(NS, 'constraint')\ninterval = nn(NS, 'interval')\nsignificantFigures = nn(NS, 'significantFigures')\n\n","sub_path":"metadata/ns_swe10.py","file_name":"ns_swe10.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"317116275","text":"import win32api\nimport win32print\nimport traceback\nimport tkinter as gui\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom PIL import ImageTk, Image\nimport mysql.connector # pip install mysql-connector\nimport pymysql # pip install pymysq\nfrom datetime import date\n\n\n\n \n \ndef MainMEntradaDeNotas():\n \n Pedidos_window = Toplevel()\n Pedidos_window.title(\"Lanchonete | Entrada de Notas\")\n Pedidos_window.resizable(False, False) \n Pedidos_window.geometry(\"950x500\") \n Pedidos_window.iconbitmap(\"imagens/ico.lanchonete.ico\")\n Pedidos_window.configure(bg=\"#DCDCDC\")\n \n def Ultimocodigo(): # Pega o maior valor da coluna cod_pedido para colocar na entry Cod \n\n connection = mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n sqlid = \"SELECT MAX(cod_entrada) FROM entradanotas\"\n mycursor.execute(sqlid)\n for i in mycursor:\n print(i)\n if i[0] == None :\n i = 0 \n ultimocod = i\n CodEntrada_entry[\"state\"] = \"normal\"\n CodEntrada_entry.delete(0,END)\n CodEntrada_entry.insert(0,ultimocod[0]+1)\n CodEntrada_entry[\"state\"] = \"disabled\"\n \n def tecla(e): # Função para quando apertar enter no teclado fazer o calculo\n \n VlTotal_entry.delete(0,END) # Apagando o valor ja existente\n qtd = Qtd_label_entry.get() # pegando a quantidade\n \n vlUnit = VlUnit_entry.get() # pegando o valor unitario\n \n if qtd == \"\" or vlUnit == \"\":\n messagebox.showwarning(\"Atenção\",\"Digite os valores\")\n \n Qtd_label_entry.focus()\n qtd = 1\n\n total = int(qtd) * float(vlUnit) # multiplicando a quantidade com o valor unitário\n VlTotal_entry.insert(0,total) # inserindo no campo\n def excluir_item_lista(): # função para excluir item selecionado\n try:\n itemSelecionado = ShowItens_tv.selection()[0] # pegando o item selecionado\n ShowItens_tv.delete(itemSelecionado) # apagando o item\n SomandoItens() # recalculando o valor totas do pedido\n except:\n messagebox.showinfo(title=\"ERRO\",message=\"Selecione um item\") # mensagem caso não exista item selecionado\n def moeda (qtd = 0, vlunit = 0 , moeda ='R$'): # Função para converte em moeda\n int(qtd)\n total = int(qtd) * float(vlunit)\n return f'{total:.2f}'#.replace('.',',') \n def inserir_lista():\n if CodProd_entry.get() == \"\":\n messagebox.showinfo(title=\"ERRO\",message=\"Digite o codigo do produto\")\n CodProd_entry.focus()\n return\n if DescProd_entry.get() == \"\":\n messagebox.showinfo(title=\"ERRO\",message=\"Digite a Descrição\")\n DescProd_entry.focus()\n return \n if Un_entry.get() == \"\":\n messagebox.showinfo(title=\"ERRO\",message=\"Digitea Unidade\")\n Un_entry.focus()\n return\n if Qtd_label_entry.get() == \"\":\n messagebox.showinfo(title=\"ERRO\",message=\"Digite a Quantidade\")\n Qtd_label_entry.focus()\n return \n if VlUnit_entry.get() == \"\":\n messagebox.showinfo(title=\"ERRO\",message=\"Digite o Valor Unitarios\")\n VlUnit_entry.focus()\n return \n if VlTotal_entry.get() == \"\":\n messagebox.showinfo(title=\"ERRO\",message=\"Valor total não calculado\")\n VlTotal_entry.focus()\n return \n\n ShowItens_tv.insert(\"\",\"end\",values=(CodProd_entry.get(), DescProd_entry.get(), Un_entry.get(), Qtd_label_entry.get(),VlUnit_entry.get(),VlTotal_entry.get())) \n CodProd_entry.delete(0,END)\n DescProd_entry.delete(0,END)\n Un_entry.delete(0,END)\n Qtd_label_entry.delete(0,END)\n VlUnit_entry.delete(0,END)\n VlTotal_entry.delete(0,END)\n Qtd_label_entry.insert(0,1)\n CodProd_entry.focus()\n SomandoItens()\n def pesquisarProdutos(): # Função para pesquisar o produto.\n if CodProd_entry.get() == \"\":\n messagebox.showwarning(\"Warning\",\"Informe um codigo para pesquisar\") # Mensagem caso não encontre produto.\n CodProd_entry.focus()\n else:\n\n connection = pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n\n \n\n sqlPesquisar = \"SELECT * FROM produtos where cod_produto= {}\".format(CodProd_entry.get())\n mycursor.execute(sqlPesquisar)\n valido = mycursor.fetchall()\n\n\n if len(valido) > 0:\n\n\n for produto in valido: # percorrendo o produto \n #print(produto)\n\n # Apagando os campos \n DescProd_entry.delete(0,END)\n VlUnit_entry.delete(0,END)\n VlTotal_entry.delete(0,END)\n\n # Colocando os valos\n DescProd_entry.insert(0,produto[2])\n Un_entry.insert(0,produto[4]) \n VlUnit_entry.insert(0,produto[6])\n\n\n qtd = Qtd_label_entry.get() \n vluni = VlUnit_entry.get() \n total = int(qtd) * float(vluni) # calculando o total\n\n VlTotal_entry.insert(0,total) # Inserindo o total\n \n else:\n messagebox.showwarning(\"Warning\",\"Produto não localizado\") # Mensagem caso não encontre produto.\n \n def SomandoItens(): # Função soma todos os itens da lista\n total = 0\n float(total)\n # Pegando todos os itens da tree\n children = ShowItens_tv.get_children()\n #percorrendo tods os itens e pegado so o valor total do item\n for i in children:\n info = ShowItens_tv.item(i,\"values\")\n item = info[5]\n \n total = float(total) + float(item) \n \n \n total_pedido_entry.delete(0,END)\n total_pedido_entry.insert(0,moeda(1,total)) \n \n def SalvarNota(): # Função Para Salva o pedido, colocando no campo pedido_fechado= \"N\"\n\n connection = mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n\n CodEntrada_entry[\"state\"] = \"normal\"\n sqlselect = \"SELECT cod_entrada FROM entradanotas where cod_entrada = '{}' \".format(CodEntrada_entry.get()) # like (parecido com)\n \n mycursor.execute(sqlselect)\n valido = mycursor.fetchall()\n\n if len(valido) > 0:\n print(\"tem Entrada\")\n connection = pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor= connection.cursor()\n \n CodEntrada_entry[\"state\"] = \"normal\"\n sqldeleteItens = \"DELETE FROM itens_entrada where id_entrada = {};\".format(CodEntrada_entry.get())\n mycursor.execute(sqldeleteItens)\n connection.commit() \n\n CodEntrada_entry[\"state\"] = \"normal\"\n sqldeleteVenda = \"DELETE FROM entradanotas where cod_entrada = {};\".format(CodEntrada_entry.get())\n mycursor.execute(sqldeleteVenda)\n \n mycursor.close()\n connection.commit()\n connection.close()\n\n\n\n\n # salvando os dados do pedido\n connection = pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n\n CodEntrada_entry[\"state\"] = \"normal\" \n CodOperador_entry[\"state\"] = \"normal\" \n sqlVenda = \"INSERT INTO entradanotas(cod_entrada,nome_operador,id_fornecedor,vl_total,nota_fechado) VALUES('{}','{}','{}','{}','{}')\".format(CodEntrada_entry.get(),CodOperador_entry.get(),CodFornecedor_entry.get(),total_pedido_entry.get(),\"N\")\n mycursor.execute(sqlVenda) \n\n mycursor.close()\n connection.commit()\n connection.close()\n \n children = ShowItens_tv.get_children()\n #percorrendo tods os itens e pegado so o valor total do item\n for i in children:\n info = ShowItens_tv.item(i,\"values\")\n cod = info[0]\n desc = info[1]\n un = info[2]\n qtd = info[3]\n vlunit = info[4]\n total_item = info[5]\n\n connection = pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n sqlItens = \"INSERT INTO itens_entrada(id_entrada, cod_prod_entrada, prod_des_entrada,un_entrada, qtd_entrada, vl_init_entrada, vl_total_entrada) VALUES('{}','{}','{}','{}','{}','{}','{}')\".format(CodEntrada_entry.get(),cod,desc,un,qtd,vlunit,total_item)\n mycursor.execute(sqlItens)\n mycursor.close()\n connection.commit()\n connection.close()\n\n\n ShowItens_tv.delete(*ShowItens_tv.get_children()) #limpa a lista\n total_pedido_entry.delete(0,END)\n total_pedido_entry.insert(0,\"0,00\")\n CodEntrada_entry[\"state\"] = \"disabled\"\n CodOperador_entry[\"state\"] = \"disabled\" \n Ultimocodigo()\n else:\n print(\"Não tem pedido\")\n # salvando os dados do pedido\n connection = pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n\n CodEntrada_entry[\"state\"] = \"normal\"\n CodOperador_entry[\"state\"] = \"normal\" \n CodFornecedor = Cod_Pesquisa_entry.get() \n sqlEntrada = \"INSERT INTO entradanotas(cod_entrada,nome_operador,id_fornecedor,vl_total,nota_fechado) VALUES('{}','{}','{}','{}','{}')\".format(CodEntrada_entry.get(),CodOperador_entry.get(),CodFornecedor,total_pedido_entry.get(),\"N\")\n mycursor.execute(sqlEntrada) \n\n mycursor.close()\n connection.commit()\n connection.close()\n \n children = ShowItens_tv.get_children()\n #percorrendo tods os itens e pegado so o valor total do item\n for i in children:\n info = ShowItens_tv.item(i,\"values\")\n cod = info[0]\n desc = info[1]\n un = info[2]\n qtd = info[3]\n vlunit = info[4]\n total_item = info[5]\n\n connection = pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n sqlItens = \"INSERT INTO itens_entrada(id_entrada, cod_prod_entrada, prod_des_entrada,un_entrada, qtd_entrada, vl_init_entrada, vl_total_entrada) VALUES('{}','{}','{}','{}','{}','{}','{}')\".format(CodEntrada_entry.get(),cod,desc,un,qtd,vlunit,total_item)\n mycursor.execute(sqlItens)\n mycursor.close()\n connection.commit()\n connection.close()\n\n\n ShowItens_tv.delete(*ShowItens_tv.get_children()) #limpa a lista\n total_pedido_entry.delete(0,END)\n total_pedido_entry.insert(0,\"0,00\")\n CodEntrada_entry[\"state\"] = \"disabled\"\n CodOperador_entry[\"state\"] = \"disabled\"\n Ultimocodigo()\n def PesquisarNota(): # Função para pesquisar pedido\n\n connection = mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n \n sqlselect = \"SELECT cod_entrada, nota_fechado FROM entradanotas where cod_entrada = '{}' \".format(Cod_Pesquisa_entry.get()) # like (parecido com)\n \n mycursor.execute(sqlselect)\n valido = mycursor.fetchall()\n \n if len(valido) > 0: \n for venda in valido: \n \n if venda[1] == \"S\":\n messagebox.showinfo(title=\"ERRO\",message=\"Nota fechada não pode ser alterada\")\n Cod_Pesquisa_entry.delete(0,END)\n Cod_Pesquisa_entry.focus()\n\n else:\n \n connection = mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n\n \n sqlselect = \"SELECT * FROM entradanotas where cod_entrada = '{}' \".format(Cod_Pesquisa_entry.get()) \n mycursor.execute(sqlselect) \n\n for pedido in mycursor:\n\n codigoPedido = pedido[0]\n operador = pedido[1]\n fornecedor = pedido[2]\n valorTotal = pedido[3]\n\n CodEntrada_entry[\"state\"] = \"normal\"\n CodEntrada_entry.delete(0,END)\n CodEntrada_entry.insert(0,codigoPedido)\n CodEntrada_entry[\"state\"] = \"disabled\"\n\n CodOperador_entry.delete(0,END)\n CodOperador_entry.insert(0,operador)\n\n CodFornecedor_entry.delete(0,END)\n CodFornecedor_entry.insert(0,fornecedor)\n\n NomeFornecedor_entry.insert(0,BuscarFornecedor())\n\n total_pedido_entry.delete(0,END)\n total_pedido_entry.insert(0,valorTotal)\n\n\n ShowItens_tv.delete(*ShowItens_tv.get_children()) #limpa a lista\n connection = mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n\n sqlselect = \"SELECT cod_prod_entrada,prod_des_entrada,un_entrada,qtd_entrada,vl_init_entrada,vl_total_entrada FROM itens_entrada where id_entrada = '{}' \".format(Cod_Pesquisa_entry.get()) \n mycursor.execute(sqlselect) \n\n for itens in mycursor:\n ShowItens_tv.insert(\"\",\"end\",values=(itens))\n Cod_Pesquisa_entry.delete(0,END)\n \n\n\n \n else:\n messagebox.showinfo(title=\"ERRO\",message=\"Nota não encontrada\")\n Cod_Pesquisa_entry.delete(0,END)\n Cod_Pesquisa_entry.focus()\n\n\n def BuscarFornecedor():\n\n connection = mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n\n sqlBuscarFornecedor = \"SELECT * FROM fornecedor WHERE cod_fornecedor = {}\".format(CodFornecedor_entry.get())\n mycursor.execute(sqlBuscarFornecedor)\n\n for data in mycursor:\n cod = data[0]\n razao_social = data[1]\n\n NomeFornecedor_entry[\"state\"] = \"normal\"\n NomeFornecedor_entry.delete(0,END)\n NomeFornecedor_entry.insert(0,razao_social)\n NomeFornecedor_entry[\"state\"] = \"disabled\"\n\n return razao_social\n\n def EntradaEstoque(): # Função para da Baixa no estoque dos produtos vendidos\n\n children = ShowItens_tv.get_children()\n #percorrendo tods os itens e pegado so a quantidade e o codigo do item\n for i in children: \n\n info = ShowItens_tv.item(i,\"values\")\n cod = info[0]\n qtd = info[3]\n\n connection = pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n\n # Buscando a quantidade do estoque do produto.\n sqlEstoqueAtual = \"SELECT estoque FROM produtos WHERE cod_produto = {}\".format(cod)\n mycursor.execute(sqlEstoqueAtual)\n\n for estoqueAtual in mycursor:\n\n print(estoqueAtual)\n UpdateEstoque = int(estoqueAtual[0]) + int(qtd) # fazendo a operação para diminuir a quantidade vendida\n\n connection = pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n # Baixando o estoque\n sqlUpdateEstoque = \"UPDATE produtos SET estoque = {} WHERE cod_produto = {}\".format(UpdateEstoque,cod)\n mycursor.execute(sqlUpdateEstoque)\n mycursor.close()\n connection.commit()\n connection.close() \n def FecharNota(): # Função Para fecjar a nota, colocando no campo nota_fechado= \"S\"\n\n connection = mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n\n CodEntrada_entry[\"state\"] = \"normal\"\n sqlselect = \"SELECT cod_entrada FROM entradanotas where cod_entrada = '{}' \".format(CodEntrada_entry.get()) # like (parecido com)\n \n mycursor.execute(sqlselect)\n valido = mycursor.fetchall()\n\n if len(valido) > 0:\n print(\"tem Entrada\")\n connection = pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor= connection.cursor()\n \n CodEntrada_entry[\"state\"] = \"normal\"\n sqldeleteItens = \"DELETE FROM itens_entrada where id_entrada = {};\".format(CodEntrada_entry.get())\n mycursor.execute(sqldeleteItens)\n connection.commit() \n\n CodEntrada_entry[\"state\"] = \"normal\"\n sqldeleteVenda = \"DELETE FROM entradanotas where cod_entrada = {};\".format(CodEntrada_entry.get())\n mycursor.execute(sqldeleteVenda)\n \n mycursor.close()\n connection.commit()\n connection.close()\n\n\n\n\n # salvando os dados do pedido\n connection = pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n\n CodEntrada_entry[\"state\"] = \"normal\" \n CodOperador_entry[\"state\"] = \"normal\" \n sqlVenda = \"INSERT INTO entradanotas(cod_entrada,nome_operador,id_fornecedor,vl_total,nota_fechado) VALUES('{}','{}','{}','{}','{}')\".format(CodEntrada_entry.get(),CodOperador_entry.get(),CodFornecedor_entry.get(),total_pedido_entry.get(),\"S\")\n mycursor.execute(sqlVenda) \n\n mycursor.close()\n connection.commit()\n connection.close()\n \n children = ShowItens_tv.get_children()\n #percorrendo tods os itens e pegado so o valor total do item\n for i in children:\n info = ShowItens_tv.item(i,\"values\")\n cod = info[0]\n desc = info[1]\n un = info[2]\n qtd = info[3]\n vlunit = info[4]\n total_item = info[5]\n\n connection = pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n sqlItens = \"INSERT INTO itens_entrada(id_entrada, cod_prod_entrada, prod_des_entrada,un_entrada, qtd_entrada, vl_init_entrada, vl_total_entrada) VALUES('{}','{}','{}','{}','{}','{}','{}')\".format(CodEntrada_entry.get(),cod,desc,un,qtd,vlunit,total_item)\n mycursor.execute(sqlItens)\n mycursor.close()\n connection.commit()\n connection.close()\n EntradaEstoque() \n\n\n ShowItens_tv.delete(*ShowItens_tv.get_children()) #limpa a lista\n total_pedido_entry.delete(0,END)\n total_pedido_entry.insert(0,\"0,00\")\n CodEntrada_entry[\"state\"] = \"disabled\"\n CodOperador_entry[\"state\"] = \"disabled\"\n Ultimocodigo()\n \n else:\n print(\"Não tem pedido\")\n # salvando os dados do pedido\n connection = pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n\n CodEntrada_entry[\"state\"] = \"normal\"\n CodOperador_entry[\"state\"] = \"normal\" \n CodFornecedor = CodFornecedor_entry.get() \n sqlEntrada = \"INSERT INTO entradanotas(cod_entrada,nome_operador,id_fornecedor,vl_total,nota_fechado) VALUES('{}','{}','{}','{}','{}')\".format(CodEntrada_entry.get(),CodOperador_entry.get(),CodFornecedor,total_pedido_entry.get(),\"S\")\n mycursor.execute(sqlEntrada) \n\n mycursor.close()\n connection.commit()\n connection.close()\n \n children = ShowItens_tv.get_children()\n #percorrendo tods os itens e pegado so o valor total do item\n for i in children:\n info = ShowItens_tv.item(i,\"values\")\n cod = info[0]\n desc = info[1]\n un = info[2]\n qtd = info[3]\n vlunit = info[4]\n total_item = info[5]\n\n connection = pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n sqlItens = \"INSERT INTO itens_entrada(id_entrada, cod_prod_entrada, prod_des_entrada,un_entrada, qtd_entrada, vl_init_entrada, vl_total_entrada) VALUES('{}','{}','{}','{}','{}','{}','{}')\".format(CodEntrada_entry.get(),cod,desc,un,qtd,vlunit,total_item)\n mycursor.execute(sqlItens)\n mycursor.close()\n connection.commit()\n connection.close()\n EntradaEstoque()\n\n\n ShowItens_tv.delete(*ShowItens_tv.get_children()) #limpa a lista\n total_pedido_entry.delete(0,END)\n total_pedido_entry.insert(0,\"0,00\")\n CodEntrada_entry[\"state\"] = \"disabled\"\n CodOperador_entry[\"state\"] = \"disabled\"\n Ultimocodigo() \n \n \n \n # Label(Pedidos_window,text=\"Pedidos\").grid(row=0,column=0,sticky=W,pady=10)\n #codigo\n CodEntrada_label = Label(Pedidos_window,text=\"Cód. Entrada:\",bg=\"#C0C0C0\", font=\"Britannic 10 bold\")\n CodEntrada_label.grid(row=0,column=0,sticky=W)\n\n CodEntrada_entry = Entry(Pedidos_window,width=8, bd=4)\n CodEntrada_entry.grid(row=0,column=1,sticky=W)\n Ultimocodigo()\n \n #operador\n CodOperador_label = Label(Pedidos_window,text=\"Operador:\",bg=\"#C0C0C0\", font=\"Britannic 10 bold\")\n CodOperador_label.grid(row=0,column=2,sticky=W)\n\n \n \n \n connection = mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdlanchonete\")\n mycursor = connection.cursor()\n\n sqlselectUsuario = \"select * from log_usuario \" # like (parecido com) \n mycursor.execute(sqlselectUsuario)\n\n for user in mycursor:\n usuario = user[0]\n\n CodOperador_entry = Entry(Pedidos_window,width=15, bd=4)\n CodOperador_entry.grid(row=0,column=3,sticky=W)\n\n CodOperador_entry.insert(0,usuario)\n CodOperador_entry[\"state\"] = \"disabled\"\n\n #Fornecedor\n CodFornecedor_label = Label(Pedidos_window,text=\"Fornecedor:\",bg=\"#C0C0C0\", font=\"Britannic 10 bold\")\n CodFornecedor_label.grid(row=0,column=12,sticky=W)\n\n CodFornecedor_entry = Entry(Pedidos_window,width=15, bd=4)\n CodFornecedor_entry.grid(row=0,column=13,sticky=W)\n\n\n\n #Pesquisar Pedido\n Cod_Pesquisa_label = Label(Pedidos_window,text=\"Pesquisar Entrada:\",bg=\"#C0C0C0\", font=\"Britannic 10 bold\")\n Cod_Pesquisa_label.grid(row=0,column=4,sticky=W)\n \n Cod_Pesquisa_entry = Entry(Pedidos_window,width=8, bd=4)\n Cod_Pesquisa_entry.grid(row=0,column=8,sticky=W)\n \n # Colocando Imagem no botao\n width = 20\n height = 20\n img = Image.open(\"imagens/ico.pesquisar.png\")\n img = img.resize((width,height), Image.ANTIALIAS)\n photoImg2 = ImageTk.PhotoImage(img)\n\n # imgpesq = PhotoImage(file=\"Imagens/ico.pesquisar.png\")\n # imgpesq.configure(width=5,height=5)\n btPesquisar = Button(Pedidos_window,text=\"Pesquisar\",image=photoImg2, bg=\"#DCDCDC\",command=BuscarFornecedor)\n btPesquisar.place(x=790,y=0)\n\n NomeFornecedor_entry = Entry(Pedidos_window,width=15, bd=4)\n NomeFornecedor_entry.place(x=820,y=0)\n NomeFornecedor_entry[\"state\"] = \"disabled\"\n\n\n\n #produto\n CodProd_label = Label(Pedidos_window,text=\"Cód. Prod:\",bg=\"#C0C0C0\", font=\"Britannic 10 bold\")\n CodProd_label.grid(row=2,column=0,sticky=W)\n\n CodProd_entry = Entry(Pedidos_window,width=8, bd=4)\n CodProd_entry.place(x=89,y=25)\n \n # Colocando Imagem no botao\n width = 20\n height = 20\n img = Image.open(\"imagens/ico.pesquisar.png\")\n img = img.resize((width,height), Image.ANTIALIAS)\n photoImg = ImageTk.PhotoImage(img)\n\n # imgpesq = PhotoImage(file=\"Imagens/ico.pesquisar.png\")\n # imgpesq.configure(width=5,height=5)\n btvendas = Button(Pedidos_window,text=\"Pesquisar\",image=photoImg, bg=\"#DCDCDC\",command=pesquisarProdutos)\n btvendas.place(x=150,y=25)\n\n DescProd_entry = Entry(Pedidos_window,width=19, bd=4)\n DescProd_entry.place(x=195,y=25)\n #UN\n Un_label = Label(Pedidos_window,text=\"UN:\", bg=\"#C0C0C0\",font=\"Britannic 10 bold\")\n Un_label.place(x=325,y=25)\n\n Un_entry = Entry(Pedidos_window,width=7, bd=4)\n Un_entry.place(x=350,y=25)\n \n #quantidade\n Qtd_label = Label(Pedidos_window,text=\"Qtd.:\",width=7, bg=\"#C0C0C0\",font=\"Britannic 10 bold\")\n Qtd_label.place(x=410,y=25)\n \n\n Qtd_label_entry = Entry(Pedidos_window,width=16, bd=4)\n Qtd_label_entry.place(x=480,y=25)\n Qtd_label_entry.insert(0,1)\n Qtd_label_entry.bind(\"\", tecla)\n #valor\n VlUnit_label = Label(Pedidos_window,text=\"Valor Unit:\",bg=\"#C0C0C0\", font=\"Britannic 10 bold\")\n VlUnit_label.grid(row=3,column=0,sticky=W)\n\n VlUnit_entry = Entry(Pedidos_window,width=10, bd=4)\n VlUnit_entry.grid(row=3,column=1,sticky=W)\n VlUnit_entry.bind(\"\", tecla)\n #valor total\n VlTotal_label = Label(Pedidos_window,text=\"Total:\",bg=\"#C0C0C0\", font=\"Britannic 10 bold\")\n VlTotal_label.place(x=160,y=50)\n\n VlTotal_entry = Entry(Pedidos_window,width=12, bd=4)\n VlTotal_entry.place(x=200,y=50)\n\n #botoes\n\n PesquisarPedido= Button(Pedidos_window,text=\"Pesquisar\", bg=\"#C0C0C0\", width= 5, padx=20, pady=2,command=PesquisarNota)\n PesquisarPedido.grid(row=0,column=11,sticky=W)\n\n view_exist = Button(Pedidos_window,text=\"Salva Nota\", bg=\"#C0C0C0\", width= 10, padx=20, pady=2, borderwidth=5,command=SalvarNota)\n view_exist.place(x=20,y=350)\n\n BtnFecharEntrada = Button(Pedidos_window,text=\"Fechar Nota\", bg=\"#C0C0C0\", width= 10, padx=20, pady=2, borderwidth=5,command=FecharNota)\n BtnFecharEntrada.place(x=150,y=350)\n\n Item_add = Button(Pedidos_window,text=\"Adicionar\", bg=\"#C0C0C0\", width= 5, height=1, padx=20, pady=2, command=inserir_lista)\n Item_add.place(x=300,y=50)\n\n Item_ex = Button(Pedidos_window,text=\"Excluir\", bg=\"#C0C0C0\", width= 5, padx=20, pady=2, command=excluir_item_lista)\n Item_ex.place(x=390,y=50)\n\n\n\n Total_label = Label(Pedidos_window,text=\"Valor Total:\",bg=\"#C0C0C0\", font=\"Britannic 10 bold\")\n Total_label.place(x=460,y=350)\n\n total_pedido_entry = Entry(Pedidos_window,width=9, bd=4)\n total_pedido_entry.place(x=540,y=350)\n total_pedido_entry.insert(0,\"0,00\")\n\n\n\n\n #Treeview\n ShowItens_tv = ttk.Treeview(Pedidos_window,columns=('id','descricao','un','qtd','valorunit','valortotal'),show='headings')\n ShowItens_tv.column('id',minwidth=0,width=60)\n ShowItens_tv.column('descricao',minwidth=0,width=250)\n ShowItens_tv.column('un',minwidth=0,width=55)\n ShowItens_tv.column('qtd',minwidth=0,width=55)\n ShowItens_tv.column('valorunit',minwidth=0,width=55)\n ShowItens_tv.column('valortotal',minwidth=0,width=55)\n\n ShowItens_tv.heading('id',text=\"Cod. Pro\")\n ShowItens_tv.heading('descricao',text=\"Descrição\")\n ShowItens_tv.heading('un',text=\"Unid.\")\n ShowItens_tv.heading('qtd',text=\"Qtde.\")\n ShowItens_tv.heading('valorunit',text=\"Vr Unitário\")\n ShowItens_tv.heading('valortotal',text=\"Vr Total\")\n ShowItens_tv.place(x=90,y=100)\n\n \n\n\n Pedidos_window.mainloop()\n\n","sub_path":"entradaNotas.py","file_name":"entradaNotas.py","file_ext":"py","file_size_in_byte":31090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"113757262","text":"#encoding=utf-8\nimport io\n\nimport scrapy\n\nclass BaseSpider(scrapy.Spider):\n name = 'base'\n allowed_domains = ['scrapyd.cn']\n start_urls = [\n 'http://lab.scrapyd.cn/page/1/',\n ]\n\n def parse(self, response):\n content = response.xpath(\"//div[@class='quote post']//span[@class='text']/text()\").extract()\n with io.open(\"lab.html\", 'w', encoding='utf-8') as f:\n for i in content:\n f.write(i+\"\\n\")\n print ('保存文件: lab.html')\n f.close()","sub_path":"spider/hello/hello/spiders/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"118219227","text":"\"\"\"Testing for dynamodb backend\"\"\"\n\nimport gludb.config\n\nfrom simple_data_tests import SimpleStorage, DefaultStorageTesting\nfrom index_tests import IndexReadWriteTesting, IndexedData\n\n\nclass SpecificStorageTesting(DefaultStorageTesting):\n def setUp(self):\n gludb.config.default_database(None) # no default database\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'dynamodb'\n ))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n # Undo any database setup\n gludb.backends.dynamodb.delete_table(\n SimpleStorage.get_table_name()\n )\n gludb.config.clear_database_config()\n\n\nclass DynamoDBIndexReadWriteTesting(IndexReadWriteTesting):\n def setUp(self):\n gludb.config.default_database(gludb.config.Database('dynamodb'))\n IndexedData.ensure_table()\n\n def tearDown(self):\n # Undo any database setup\n gludb.backends.dynamodb.delete_table(\n IndexedData.get_table_name()\n )\n gludb.config.clear_database_config()\n","sub_path":"tests/dynamodb_tests.py","file_name":"dynamodb_tests.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"39425625","text":"\nimport os\nimport numpy as np\nimport cv2\nimport glob\nimport io\n\nimport tensorflow as tf\nfrom PIL import Image\nfrom object_detection.utils import dataset_util\n\nSAMPLES_PER_TAG = 200\nmetadata = []\n\nscenery = './scenery/*'\nannotated_images = './annotated_images/'\n\ntags = {'red', 'green', 'yellow', 'red-green'}\nlabelmap = dict(zip(tags, range(1, len(tags)+1)))\n\ndef create_yaml_entry(tag, x_width, xmin, y_height, ymin, filename):\n anno = '''- annotations:\n - {class: %s, x_width: %d, xmin: %d, y_height: %d, ymin: %d}\n class: image\n filename: %s\n''' % (tag, x_width, xmin, y_height, ymin, filename)\n return anno\n\ndef create_csv_entry(tag, labelmap, x_width, xmin, y_height, ymin, filename):\n anno = '''%s, %d, %d, %d, %d, %d, %s\n''' % (tag, labelmap[tag], x_width, xmin, y_height, ymin, filename)\n return anno\n\ndef create_label_map(labelmap):\n with open(\"labelmap.pbtxt\", \"w\") as lmpbtxt:\n for k,v in labelmap.items():\n t = '''item {\n id: %d\n name: \\'%s\\'\n}\n''' % (v, k)\n\n lmpbtxt.write(t)\n\ndef create_tf_record(tag, labelmap, x_width, xmin, y_height, ymin, fname):\n global annotated_images\n path = annotated_images\n\n with tf.gfile.GFile(os.path.join(path, '{}'.format(fname)), 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n width, height = image.size\n\n filename = fname.encode()\n image_format = b'jpg'\n xmins = []\n xmaxs = []\n ymins = []\n ymaxs = []\n classes_text = []\n classes = []\n\n xmins.append(xmin / width)\n xmaxs.append((xmin+x_width) / width)\n ymins.append(ymin / height)\n ymaxs.append((ymin+y_height) / height)\n classes_text.append(tag.encode('utf8'))\n classes.append(labelmap[tag])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(filename),\n 'image/source_id': dataset_util.bytes_feature(filename),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature(image_format),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n }))\n return tf_example\n\ndef simple_augment(tags, labelmap, scenery, metadata, train_or_test, amount):\n directory = \"./positives/*/{}/*\".format(\"red\")\n\n writer = tf.python_io.TFRecordWriter(\"{}.record\".format(train_or_test))\n\n S = []\n scenery_files = glob.glob(scenery)\n print(\"found %d files in scenery directory %s\" % (len(scenery_files), scenery))\n\n for s in scenery_files:\n img = cv2.imread(s)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.resize(img, (800,600), interpolation=cv2.INTER_CUBIC)\n S.append(img)\n \n S = np.array(S)\n\n # Read X Vector\n\n for tag in tags:\n directory = \"./positives/*/{}/*\".format(tag)\n\n rois_images = glob.glob(directory)\n \n R = []\n \n for r in rois_images:\n img = cv2.imread(r)\n R.append(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n \n R = np.array(R)\n \n for i in range(SAMPLES_PER_TAG):\n r_rand = np.random.randint(0, len(R))\n s_rand = np.random.randint(0, len(S))\n\n roi = R[r_rand]\n scenery = S[s_rand]\n \n scale_down = np.random.randint(10, 100) / 100.\n roi = cv2.resize(roi, (0,0), fx=scale_down, fy=scale_down)\n \n annotated_image = np.copy(scenery)\n \n x_width = roi.shape[1]\n xmin = np.random.randint(10, scenery.shape[1] - x_width - 10)\n y_height = roi.shape[0]\n ymin = np.random.randint(10, scenery.shape[0] - y_height - 10)\n \n filename = \"%s_%d.jpg\" % (tag, i)\n \n annotated_image[ymin:ymin+y_height,xmin:xmin+x_width ] = roi\n \n annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_RGB2BGR)\n cv2.imwrite(annotated_images + filename, annotated_image)\n \n #yaml = create_yaml_entry(tag, x_width, xmin, y_height, ymin, filename)\n csv = create_csv_entry(tag, labelmap, x_width, xmin, y_height, ymin, filename)\n tfrec = create_tf_record(tag, labelmap, x_width, xmin, y_height, ymin, filename)\n\n writer.write(tfrec.SerializeToString())\n metadata.append(csv)\n\n\n writer.close()\n\nsimple_augment(tags, labelmap, scenery, metadata, \"train\", 200)\n\ncreate_label_map(labelmap)\n\nwith open(\"./annotation.csv\", \"w\") as metadata_file:\n for line in metadata:\n metadata_file.write(line)\n\n\n","sub_path":"workbench/Augmentation/SimpleAugment.py","file_name":"SimpleAugment.py","file_ext":"py","file_size_in_byte":5159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"353163976","text":"#Generator functions create iterators\n\ndef isprimenumber(num):\n\tif num == 1: \n\t\t\treturn False\n\tfor x in range(2, num):\n\t\tif num % x == 0: \n\t\t\t\treturn False\n\telse: \n\t\treturn True\n\t\t\ndef primes(num = 1):\n\twhile(True):\n\t\tif isprimenumber(num): yield num\n\t\tnum += 1\n\n#main\nfor num in primes():\n\tif num > 100: break\n\tprint(num)","sub_path":"generation.py","file_name":"generation.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"539787855","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 4 08:05:16 2018\r\n\r\n@author: rundo\r\n\"\"\"\r\n\r\nfrom astropy.table import Table\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.lines as mlines\r\nimport matplotlib.transforms as mtransforms\r\n\r\n\r\ndata = Table.read('asu.fit')\r\nprint(data.colnames)\r\nprint (len(data))\r\n\r\n\r\nband4_5 = data['__4_5_']\r\nband5_8 = data['__5_8_']\r\nband3_6 = data['__3_6_']\r\nband8_0 = data['__8_0_']\r\nband24 = data['__24_']\r\n\r\na = band4_5-band8_0\r\nb = band3_6-band5_8\r\nPAH_index_1 = []\r\nfor i in range(0,332442):\r\n if b[i] < 1.5:\r\n if a[i]>1:\r\n if b[i]<(1.5/2)*(a[i]-1):\r\n if band4_5[i] > 11.5:\r\n PAH_index_1.append(i)\r\nPAH_1 = data[PAH_index_1]\r\ndata.remove_rows(PAH_index_1) \r\n\r\nband4_5 = data['__4_5_']\r\nband5_8 = data['__5_8_']\r\nband3_6 = data['__3_6_']\r\nband8_0 = data['__8_0_']\r\nband24 = data['__24_']\r\n \r\nc = band5_8-band8_0\r\nd = band4_5-band5_8\r\nPAH_index_2 = []\r\nfor i in range(0,len(data)):\r\n if d[i] < 1.05:\r\n if c[i]>1:\r\n if d[i]<(1.05/1.2)*(c[i]-1):\r\n if band4_5[i] > 11.5:\r\n PAH_index_2.append(i)\r\n\r\nPAH_2 = data[PAH_index_2]\r\ndata.remove_rows(PAH_index_2)\r\n \r\n\r\n\r\n###############################################\r\nband4_5 = data['__4_5_']\r\nband5_8 = data['__5_8_']\r\nband3_6 = data['__3_6_']\r\nband8_0 = data['__8_0_']\r\nband24 = data['__24_']\r\n\r\na = band4_5-band8_0\r\nb = band4_5\r\nAGN_index = []\r\n\r\n\r\n\r\nfor i in range(0,len(data)):\r\n if band4_5[i] > 13.5:\r\n if a[i] >0.5:\r\n if b[i]>13.5+(a[i]-2.3)/0.4:\r\n AGN_index.append(i)\r\n elif band4_5[i] > 14.5:\r\n if b[i]> 14+(a-0.5):\r\n if b[i] > 14.5-(a-1.2)/0.3:\r\n AGN_index.append(i)\r\n\r\nAGN = data[AGN_index]\r\ndata.remove_rows(AGN_index)\r\nprint ('#PAH_1=',len(PAH_1))\r\nprint ('#PAH_2=',len(PAH_2))\r\nprint ('#AGN=',len(AGN))\r\nprint (len(data))\r\n\r\n\r\na_1 = PAH_1['__4_5_']-PAH_1['__8_0_']\r\nb_1 = PAH_1['__4_5_']\r\na_2 = PAH_2['__4_5_']-PAH_2['__8_0_']\r\nb_2 = PAH_2['__4_5_']\r\nplt.figure(figsize=(10,10))\r\n\r\nplt.scatter(a,b)\r\nplt.scatter(a[AGN_index],b[AGN_index], color = 'b')\r\nplt.scatter(a_1,b_1,color = 'black', marker = '.')\r\nplt.scatter(a_2,b_2, color = 'black', marker = '.')\r\nplt.xlabel('[4.5]-[8.0]')\r\nplt.ylabel('[4.5]')\r\nplt.title('AGN star')\r\n\r\n\r\n \r\n","sub_path":"Murray/rework/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"418281799","text":"from datetime import date\r\nimport os\r\n\r\nclass Empresa:\r\n id=0\r\n def __init__(self, ruc='',direccion='' , tlfn='', rS='', empleado='', descrip=''):\r\n Empresa.id+=1\r\n self.id= Empresa.id\r\n self.ruc= ruc\r\n self.direccion=direccion\r\n self.telefono= tlfn\r\n self.razonSocial=rS\r\n self.empleados= empleado\r\n self.departamento= Departamento(descrip, empleado)\r\n\r\n def mostrarEmpresa(self):\r\n print(\"Empresa: {}\\nId: {}\\n Ruc: {}\\nTelefono: {}\\nDirección: {}\".format(self.razonSocial, self.id, self.ruc, self.telefono, self.direccion))\r\n\r\nclass Departamento:\r\n id=0\r\n def __init__(self, descripcion,empleado):\r\n Departamento.id += 1\r\n self.id=Departamento.id\r\n self.descripcion= descripcion\r\n self.empleado=empleado\r\n\r\n def mostrarDepartamento(self):\r\n print(\"Id: {}\\n Descripcion: {}\" .format(self.id, self.descripcion))\r\n\r\nclass Empleado:\r\n id = 0 \r\n def __init__(self, nom='', sueldo='', tel='', fechaIng='', valorH=0):\r\n Empleado.id += 1\r\n self.id= Empleado.id\r\n self.nombre= nom\r\n self.sueldo= sueldo\r\n self.telefono= tel\r\n self.fechaIngreso= fechaIng\r\n self.valorHora= valorH\r\n \r\n def valorHor(self):\r\n self.valorHora= self.sueldo/240\r\n return self.valorHora\r\n\r\n def mostrarEmpleado(self):\r\n print(\"Empleado: {}\\nID: {}\\nTelefono: {}\\nSueldo: {}\\nFecha de Ingreso: {}\".format(self.nombre, self.id, self.telefono,\r\n self.sueldo, self.fechaIngreso))\r\n\r\nclass EmpleadoObrero(Empleado):\r\n id=0\r\n def __init__(self,nom='', sueldo=0, tel='', fechaIng='', valorH=0,sindicato= True, contratoColectivo= True):\r\n super().__init__(nom,sueldo,tel,fechaIng, valorH)\r\n EmpleadoObrero.id += 1\r\n self.id=EmpleadoObrero.id\r\n self.sindicato= sindicato\r\n self.__contratoColectivo= contratoColectivo\r\n \r\n @property\r\n def contraCole(self):\r\n return self.__contratoColectivo\r\n \r\n def valorHor(self):\r\n super().valorHora()\r\n\r\n def mostrarEmpleadoObrero(self):\r\n print(\"Empleado Obrero: {}\\n Id: {}\\n Sindicato: {}\\n ContratoColectivo: {}\".format(self.nombre, self.id, self.sindicato, self.__contratoColectivo))\r\n \r\nclass EmpleadoAdministrativo(Empleado):\r\n id=0\r\n def __init__(self, nom='', sueldo='', tel='', fechaIng='',valorH=0, comision=True):\r\n super().__init__(nom, sueldo, tel, fechaIng, valorH)\r\n EmpleadoAdministrativo.id += 1\r\n self.id=EmpleadoAdministrativo.id\r\n self.comision=comision\r\n \r\n def valorHor(self):\r\n super().valorHora()\r\n \r\n def mostrarEmpleadoAdministrativo(self):\r\n print(\"Empleado Oficina: {}\\n Id: {}\\n Comisión: {}%\".format(self.nombre, self.id, self.comision))\r\n\r\nclass Prestamos:\r\n id=0\r\n def __init__(self, fecha='', valor=0, numPagos=0, empleado='', saldo=0, cuota=0, estado=True):\r\n Prestamos.id +=1\r\n self.id= Prestamos.id\r\n self.fecha= fecha\r\n self.valor= valor\r\n self.numPagos=numPagos\r\n self.cuota= cuota\r\n self.saldo= saldo\r\n self.estado= estado\r\n self.empleado= empleado\r\n \r\n def prestamo(self):\r\n self.cuota= self.valor/self.numPagos\r\n self.saldo= self.valor-self.cuota\r\n return self.cuota, self.saldo\r\n \r\n def mostrarPrestamos(self):\r\n print(\"Fecha de prestamos: {} \\nValor prestado: {}\\nNumero de Pagos: {}\\nCuota: {}\\nEstado:{}\\nSaldo: {}\" .format(self.fecha, self.valor, self.numPagos, \r\n self.cuota, self.estado, self.saldo))\r\n\r\nclass Sobretiempo:\r\n id=0\r\n def __init__(self, horRe=0, horExt=0, fecha='', empleado='', estado=False, totSobret=0):\r\n Sobretiempo.id += 1\r\n self.id= Sobretiempo.id\r\n self.horasRecargos= horRe\r\n self.horasExtraordinarias= horExt\r\n self.fecha= fecha\r\n self.estado= estado\r\n self.totalSobretiempo=totSobret\r\n self.empleado= empleado\r\n \r\n def sobretiempo(self):\r\n self.totalSobretiempo = round((self.empleado.valorHora + (horasRecargados*0.50+horasExtraordinaria*2)), 2)\r\n return self.totalSobretiempo\r\n\r\n def mostrarSobretiempo(self):\r\n print(\"Id: {}\\n Horas Recargadas: {}\\n Horas Extraordinarias: {}\\n Estado: {}\" .format(self.id, self.horasRecargos,\r\n self.horasExtraordinarias, self.estado))\r\n\r\nclass Deducciones:\r\n id= 0\r\n def __init__(self, iess, comision=0, antiguedad=0):\r\n Deducciones.id += 1\r\n self.id= Deducciones.id\r\n self.iess= iess \r\n self.__comision= comision\r\n self.__antiguedad= antiguedad\r\n \r\n @property\r\n def comision(self):\r\n return self.__comision\r\n \r\n @property\r\n def antiguedad(self):\r\n return self.__antiguedad\r\n\r\n def mostrarDeducciones(self):\r\n print(\"Id: {}\\n Iees: {}\\n Comisión: {}\\n Antiguedad: {}\" .format(self.id, self.iess, self.comision, self.antiguedad))\r\n\r\nclass Nomina:\r\n id= 0\r\n def __init__(self, fecha='', sueldo=0, comision=0, antiguedad=0, iess=0, empleado='', sobret=0, prest=0):\r\n Nomina.id += 1\r\n self.id= Nomina.id\r\n self.fecha= fecha\r\n self.sueldo= sueldo\r\n self.comision= round((comision * sueldo), 2)\r\n self.antiguedad= self.calculoAnti(antiguedad, self.fecha, empleado.fechaIngreso, self.sueldo)\r\n self.iess= round((iess*(self.sueldo+sobret.totalSobretiempo)), 2)\r\n self.totIngreso= self.sueldo + sobret.totalSobretiempo + self.comision + self.antiguedad\r\n self.totDes= round((self.iess + prest.cuota), 2)\r\n self.liquidoRecibir= self.totIngreso - self.totDes\r\n self.empleado= empleado\r\n \r\n def calculoAnti(self, anti=0, fechaNomina=0, fechaIngreso=0, sueldo=0):\r\n fechas = str(fechaNomina - fechaIngreso)\r\n numeroDiasStr = []\r\n dias = ''\r\n # OBTENGO EL NUMERO DE DIAS DE DIFERENCIA EN STR.\r\n for num in fechas:\r\n try:\r\n int(num)\r\n numeroDiasStr.append(num)\r\n except ValueError:\r\n break\r\n # OBTENGO EL NUMERO DE DIAS DE DIFERENCIA EN INT.\r\n for numeroDia in numeroDiasStr:\r\n dias += numeroDia\r\n dias = int(dias)\r\n return round(((anti*dias)/(365*sueldo)), 2)\r\n\r\n def mostrarNomina(self):\r\n print(\"Id: {}\\nFecha Nomina: {}\\nSueldo: {}\\nTotal Ingreso: {}\\nComision: {}\\nAntiguedad: {}\\nIess: {}\\nTotal Descuento: {}\\nLiquido a Recibir: {}\" .format(\r\n self.id, self.fecha, self.sueldo, self.totIngreso, self.comision, self.antiguedad, self.iess, self.totDes, self.liquidoRecibir))\r\n\r\n\r\nos.system(\"cls\")\r\nprint(\"---DATOS DE LA EMPRESA---\")\r\nrazonsocial= input(\"Ingrese la razón social de la empresa: \")\r\nruc= int(input(\"Ingrese el R.U.C de la empresa: \"))\r\ndireccion= input(\"Ingrese la dirección donde se encuentra la empresa: \")\r\ntlfn= int(input(\"Ingrese el número telefonico de la empresa: \"))\r\nprint(\"\")\r\nprint(\"---DATOS EMPLEADO---\")\r\nnombre= input(\"Ingrese el nombre del empleado: \")\r\ntelefono= int(input(\"Ingrese el número celular del empleado {}: \".format(nombre)))\r\nsueldo= float(input(\"Ingrese el sueldo del empleado {}: \".format(nombre)))\r\naño= int(input(\"Ingrese el año que ingreso el empleado {}: \".format(nombre)))\r\nmes= int(input(\"Ingrese el mes que ingreso el empleado {}: \".format(nombre)))\r\ndia= int(input(\"Ingrese el dia que ingreso el empleado {}: \".format(nombre)))\r\nfechaIngreso= date(año,mes,dia)\r\ndescripcion= input(\"El empleado a que departamento pertenece [Administrativo, Obrero]: \").capitalize()\r\ndescripcionDepa = input(\"Descripción del departamento: \")\r\nif descripcion==\"Administrativo\":\r\n comision=float(input(\"Ingrese la comision que posee el empleado: \"))\r\n pres= input(\"El empleado {} va a realizar prestamos [Si, No]: \".format(nombre)).capitalize()\r\n if pres==\"Si\":\r\n añoP= int(input(\"Ingrese el año que hizo el prestamo el empleado {}: \".format(nombre)))\r\n mesP= int(input(\"Ingrese el mes que hizo el prestamo el empleado {}: \".format(nombre)))\r\n diaP= int(input(\"Ingrese el dia que hizo el prestamo el empleado {}: \".format(nombre)))\r\n fechaP= date(añoP,mesP,diaP)\r\n valor= float(input(\"Ingrese el valor del prestamo: \"))\r\n numPagos= int(input(\"Ingrese los numeros de pago que va realizar: \"))\r\n sobret= input(\"El empleado {} realizo sobretiempo [Si, No]: \".format(nombre)).capitalize()\r\n if sobret==\"Si\":\r\n añoS= int(input(\"Ingrese el año que hizo el sobretiempo el empleado {}: \".format(nombre)))\r\n mesS= int(input(\"Ingrese el mes que hizo el sobretiempo el empleado {}: \".format(nombre)))\r\n diaS= int(input(\"Ingrese el dia que hizo el sobretiempo el empleado {}: \".format(nombre)))\r\n fechaS= date(añoS,mesS,diaS)\r\n horasRecargados= float(input(\"Ingrese las horas recargadas que hizo el empleado {}: \".format(nombre)))\r\n horasExtraordinaria= float(input(\"Ingrese las horas extraordinarias que hizo el empleado {}: \".format(nombre)))\r\n iess= float(input(\"Ingrese el porcentaje del Iess: \"))\r\n añoN= int(input(\"Ingrese el año que se realizo el pago al empleado {}: \".format(nombre)))\r\n mesN= int(input(\"Ingrese el mes que se realizo el pago al empleado {}: \".format(nombre)))\r\n diaN= int(input(\"Ingrese el dia que se realizo el pago al empleado {}: \".format(nombre)))\r\n fechaN= date(añoN,mesN,diaN)\r\nelse:\r\n sindicato= input(\"Ingrese el sindicato que pertenece el empleado Obrero: \")\r\n pres= input(\"El empleado {} va a realizar prestamos [Si, No]: \".format(nombre)).capitalize()\r\n if pres==\"Si\":\r\n añoP= int(input(\"Ingrese el año que hizo el prestamo el empleado {}: \".format(nombre)))\r\n mesP= int(input(\"Ingrese el mes que hizo el prestamo el empleado {}: \".format(nombre)))\r\n diaP= int(input(\"Ingrese el dia que hizo el prestamo el empleado {}: \".format(nombre)))\r\n fechaP= date(añoP,mesP,diaP)\r\n valor= float(input(\"Ingrese el valor del prestamo: \"))\r\n numPagos= int(input(\"Ingrese los numeros de pago que va realizar: \"))\r\n sobret= input(\"El empleado {} realizo sobretiempo [Si, No]: \".format(nombre)).capitalize()\r\n if sobret==\"Si\":\r\n añoS= int(input(\"Ingrese el año que hizo el sobretiempo el empleado {}: \".format(nombre)))\r\n mesS= int(input(\"Ingrese el mes que hizo el sobretiempo el empleado {}: \".format(nombre)))\r\n diaS= int(input(\"Ingrese el dia que hizo el sobretiempo el empleado {}: \".format(nombre)))\r\n fechaS= date(añoS,mesS,diaS)\r\n horasRecargados= float(input(\"Ingrese las horas recargadas que hizo el empleado {}: \".format(nombre)))\r\n horasExtraordinaria= float(input(\"Ingrese las horas extraordinarias que hizo el empleado {}: \".format(nombre)))\r\n iess= float(input(\"Ingrese el porcentaje del Iess: \"))\r\n antiguedad = float(input(\"Por antiguedad, cuanto es el recargo: $\"))\r\n añoN= int(input(\"Ingrese el año que se realizo el pago al empleado {}: \".format(nombre)))\r\n mesN= int(input(\"Ingrese el mes que se realizo el pago al empleado {}: \".format(nombre)))\r\n diaN= int(input(\"Ingrese el dia que se realizo el pago al empleado {}: \".format(nombre)))\r\n fechaN= date(añoN,mesN,diaN)\r\n\r\n\r\nos.system(\"cls\")\r\nprint(\"\")\r\nprint(\"---EMPRESA---\")\r\nif descripcion == 'Administrativo': emp = EmpleadoAdministrativo(nombre,sueldo,telefono,fechaIngreso,0,comision)\r\nelse: emp= EmpleadoObrero(nombre,sueldo,telefono,fechaIngreso, 0,sindicato, True)\r\nempresa= Empresa(ruc, direccion, tlfn, razonsocial, emp, descripcionDepa)\r\nif pres==\"Si\":\r\n prest= Prestamos(fechaP, valor, numPagos, emp, 0,0, True)\r\nelse: prest=Prestamos()\r\nif sobret==\"Si\":\r\n sobret= Sobretiempo(horasRecargados, horasExtraordinaria, fechaS, emp, True, 0)\r\nelse: sobret = Sobretiempo()\r\nif descripcion == 'Administrativo':\r\n deduc= Deducciones(iess, comision)\r\n nomin= Nomina(fechaN, sueldo, deduc.comision, 0, iess, emp, sobret, prest)\r\nelse:\r\n deduc= Deducciones(iess=iess, antiguedad=antiguedad)\r\n nomin= Nomina(fechaN, sueldo, deduc.comision, deduc.antiguedad, iess, emp, sobret,prest)\r\n\r\nprint(\"\")\r\nempresa.mostrarEmpresa()\r\nprint(\"\")\r\nprint(\"-----DEPARTAMENTO-----\")\r\nempresa.departamento.mostrarDepartamento()\r\nprint(\"\")\r\nprint(\"-----EMPLEADO-----\")\r\nemp.mostrarEmpleado()\r\nprint(\"\")\r\nif isinstance(emp, EmpleadoAdministrativo): emp.mostrarEmpleadoAdministrativo()\r\nelse: emp.mostrarEmpleadoObrero()\r\nprint(\"\")\r\nprint(\"-----PRESTAMO-----\")\r\nprest.mostrarPrestamos()\r\nprint(\"\")\r\nprint(\"-----SOBRETIEMPO-----\")\r\nsobret.mostrarSobretiempo()\r\nprint(\"\")\r\nprint(\"-----DEDUCCIONES-----\")\r\ndeduc.mostrarDeducciones()\r\nprint(\"\")\r\nprint(\"-----PAGO DE NOMINA-----\")\r\nnomin.mostrarNomina()\r\nprint(\"\")\r\n","sub_path":"Tarea_Nomina.py","file_name":"Tarea_Nomina.py","file_ext":"py","file_size_in_byte":13056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"432284725","text":"peso = 0\r\nmaior = 0\r\nmenor = 0\r\nqnt = int(input('Digite quantas pessoas você quer analisar: '))\r\nfor c in range(1, qnt + 1):\r\n peso = int(input('Digite o peso da pessoa {}: '.format(c)))\r\n if c == 1:\r\n maior = peso\r\n menor = peso\r\n else:\r\n if peso > maior:\r\n maior = peso\r\n elif peso < menor:\r\n menor = peso\r\n\r\nprint('O maior peso é {}'.format(maior))\r\nprint('O menor peso é {}'.format(menor))","sub_path":"aula 13/desafio 055.py","file_name":"desafio 055.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"398323903","text":"#!/usr/bin/env python3\n# camel\n#jacob poncher\n# 11/8/17\n\n\"\"\" Camel Game \"\"\"\n\nimport random\nprint('Welcome to Camel!')\nprint('You have stolen a camel to make your way across the great mobi desert')\nprint('The natives want their camel back and are chasing you down!')\nprint('Survive your desert trek and out run the natives')\nmiles_traveled = 0\nthirst = 0\ncamel_tiredness =0 \nnatives_distance =-20\ndrinks = 5\ndone = False\nwhile not done:\n print()\n print('A.Drink from your canteen.')\n print('b.ahead moderate speed. ')\n print('c.Ahead full speed.')\n print('d.stop for the night.')\n print('e.status check.')\n print('q .Quit')\n choice = input('choice: ')\n print()\n if choice == \"q\":\n done = True\n elif choice == \"e\":\n print()\n print(\"miles traveled: \",miles_traveled)\n print(\"drinks in canteen: \", drinks)\n print('the natives are',abs(natives_distance),\"miles behind you.\")\n print()\n elif choice == 'd':\n camel_tiredness = 0\n print('the camel is happy')\n natives_distance = natives_distance + random.randrange(7,14)\n elif choice == \"c\":\n miles_traveled = miles_traveled + random.randrange(10,21)\n print('miles traveled:',miles_traveled)\n thirst = thirst + 1\n camel_tiredness = camel_tiredness + random.randrange(1,4)\n natives_distance = natives_distance + random.randrange(7,14)\n elif choice == \"b\":\n miles_traveled = miles_traveled + random.randrange(5,12)\n print('miles traveled',miles_traveled)\n thirst = thirst + 1 \n camel_tiredness = camel_tiredness + 1\n natives_distance = natives_distance + random.randrange(7,14)\n elif choice == \"a\":\n if drinks > 0:\n drinks = drinks - 1\n else:\n print('error')\n else:\n print(\"That is not a valid choice! \")\n \n \n if thirst > 4 and thirst < 6:\n print('you are thirsty')\n if thirst > 6:\n print('you died of thirst')\n done = True \n \n if natives_distance >= 0:\n print('the natives have caught up')\n done = True \n elif natives_distance >= -15:\n print('The natives are getting close')\n \n if miles_traveled >= 200 and done == False:\n print('you win the game')\n miles_travled = 0\n thirst = 0 \n camel_tiredness = 0\n drinks = 5\n natives_distance = -20\n","sub_path":"Lab 04 - Camel/main_program.py","file_name":"main_program.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"359843278","text":"import logging \nimport sys \n\ndef get_logger(logger_name):\n logger = logging.getLogger(logger_name)\n formatter=logging.Formatter('(%(asctime)s)-%(name)-5s-[%(levelname)-8s] %(message)s')\n stream_handler = logging.StreamHandler(sys.stdout) \n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n logger.setLevel(logging.DEBUG)\n return logger","sub_path":"ljango/utils/loggingwrapper.py","file_name":"loggingwrapper.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"649250189","text":"import datetime\nfrom peewee import CharField, IntegerField, SmallIntegerField, DateTimeField\nfrom peewee import CompositeKey\nfrom BlockFileUpload import block_config\nfrom BlockFileUpload.models.base import BaseModel\n\n\nclass File(BaseModel):\n \"\"\"所有要进行分块上传的文件\n\n 存在的意义:\n 1. 为了防止文件重复上传.\n 2. 记录文件上传进度(也可以计算block得到)\n \"\"\"\n md5 = CharField() # 文件 md5\n location = CharField() # 全路径.\n size = IntegerField() # size in bytes\n status = SmallIntegerField(choices=block_config.FILE_STATUS, default=block_config.FILE_200_UPLOADING) # 文件状态\n allocate_offset = IntegerField(default=0) # 已分配的进度\n upload_offset = IntegerField(default=0) # 标识上传进度\n\n\nclass Block(BaseModel):\n \"\"\"已分配的块\n\n 分块上传的核心, 通过该表, 才能对分块进行控制, 才能进行断点续传.\n \"\"\"\n file_id = IntegerField()\n start = IntegerField()\n end = IntegerField()\n status = SmallIntegerField() # allocate, finish, cancel\n start_time = DateTimeField()\n end_time = DateTimeField(null=True)\n path = CharField(null=True, default=None)\n","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"52829690","text":"'''\nCreated on Dec 29, 2015\n\n@author: rasmus\n'''\nimport unittest\nimport numpy as np\n\nfrom entities.agent import Agent\nfrom entities.agent_action_manager import AgentActionManager\nfrom entities.simulation_element import Floor, Goal, Start, Wall, \\\n create_new_cell_from_existing_cell\nfrom entities.simulation_environment import SimulationEnvironment\nfrom graphics.rotation import Rotation, ROTATION\nfrom graphics.shape import RectangularShape, CircularShape, TriangularShape\nfrom utilities.action import Direction\nfrom utilities.position import Position2D\nfrom utilities.score import Score\n\n\nclass AgentTest(unittest.TestCase):\n def test_has_the_appropriate_attributes_and_functions(self):\n pos = Position2D(x=10, y=10)\n shape = RectangularShape(width=100, height=100)\n rotation = Rotation(ROTATION.UP)\n agent = Agent(shape=shape,\n position=pos,\n rotation=rotation)\n\n self.assertEqual(agent.get_x(), 10)\n self.assertEqual(agent.get_y(), 10)\n np.testing.assert_array_equal(agent.get_position(), Position2D(x=10, y=10))\n\n self.assertEqual(agent.get_shape().get_width(), 100)\n self.assertEqual(agent.get_shape().get_height(), 100)\n self.assertEqual(agent.get_rotation(), Rotation(ROTATION.UP))\n\n shape = CircularShape(radius=10)\n agent = Agent(shape=shape,\n position=pos,\n rotation=rotation)\n\n self.assertEqual(agent.get_shape().get_radius(), 10)\n\n agent1 = Agent(shape=shape,\n position=pos,\n rotation=rotation)\n agent2 = Agent(shape=shape,\n position=pos,\n rotation=rotation)\n\n self.assertNotEqual(agent1, agent2)\n\n\nclass SimulationElementTest(unittest.TestCase):\n def test_has_the_appropriate_attributes_and_functions(self):\n pos = Position2D(x=10, y=10)\n shape = RectangularShape(width=100, height=100)\n score = Score(value=10)\n rotation = Rotation(ROTATION.UP)\n floor = Floor(position=pos,\n shape=shape,\n rotation=rotation,\n score=score)\n\n self.assertEqual(floor.get_x(), 10)\n self.assertEqual(floor.get_y(), 10)\n self.assertEqual(floor.get_shape().get_width(), 100)\n self.assertEqual(floor.get_shape().get_height(), 100)\n self.assertEqual(floor.get_value(), 10)\n self.assertEqual(floor.get_rotation(), Rotation(ROTATION.UP))\n\n shape = TriangularShape(base=20, height=30)\n floor = Floor(position=pos,\n shape=shape,\n rotation=rotation,\n score=score)\n\n self.assertEqual(floor.get_shape().get_base(), 20)\n self.assertEqual(floor.get_shape().get_height(), 30)\n\n\nclass SimulationEnvironmentTest(unittest.TestCase):\n def test_has_the_appropriate_attributes_and_functions(self):\n sim_env = SimulationEnvironment(shape=RectangularShape(width=600, height=600),\n cell_size=9,\n width_margin=1,\n height_margin=1)\n\n self.assertIsInstance(sim_env.get_shape(), RectangularShape)\n self.assertEqual(sim_env.get_shape().get_width(), 600)\n self.assertEqual(sim_env.get_shape().get_height(), 600)\n self.assertEqual(sim_env.get_cell_size(), 9)\n self.assertEqual(sim_env.get_width_margin(), 1)\n self.assertEqual(sim_env.get_height_margin(), 1)\n\n sim_env.set_cell_size(10)\n sim_env.set_width_margin(2)\n sim_env.set_height_margin(2)\n\n self.assertEqual(sim_env.get_cell_size(), 10)\n self.assertEqual(sim_env.get_width_margin(), 2)\n self.assertEqual(sim_env.get_height_margin(), 2)\n\n def test_valid_coords_when_creating_a_new_cell(self):\n sim_env = SimulationEnvironment(shape=RectangularShape(width=600, height=600),\n cell_size=9,\n width_margin=1,\n height_margin=1)\n\n self.assertEqual(sim_env.valid_cell_coordinates(300, 300), True)\n self.assertEqual(sim_env.valid_cell_coordinates(590, 590), True)\n self.assertEqual(sim_env.valid_cell_coordinates(591, 591), False)\n self.assertEqual(sim_env.valid_cell_coordinates(0, 0), True)\n self.assertEqual(sim_env.valid_cell_coordinates(-1, -1), False)\n self.assertEqual(sim_env.valid_cell_coordinates(601, 601), False)\n\n def test_successor_is_returning_the_valid_neighbors_of_a_cell(self):\n sim_env = SimulationEnvironment(shape=RectangularShape(30, 30),\n cell_size=10,\n width_margin=0,\n height_margin=0)\n\n successors = sim_env.get_successors(sim_env.cells[1][1])\n\n self.assertEqual(len(successors), 4)\n self.assertEqual(successors[0], sim_env.cells[2][1])\n self.assertEqual(successors[1], sim_env.cells[0][1])\n self.assertEqual(successors[2], sim_env.cells[1][0])\n self.assertEqual(successors[3], sim_env.cells[1][2])\n\n successors = sim_env.get_successors(sim_env.cells[0][0])\n\n self.assertEqual(len(successors), 2)\n self.assertEqual(successors[0], sim_env.cells[1][0])\n self.assertEqual(successors[1], sim_env.cells[0][1])\n\n def test_paint_function_is_changing_the_cells_to_right_type(self):\n sim_env = SimulationEnvironment(shape=RectangularShape(30, 30),\n cell_size=10,\n width_margin=0,\n height_margin=0)\n\n self.assertIsInstance(sim_env.cells[0][0], Floor)\n self.assertIsInstance(sim_env.cells[1][1], Floor)\n self.assertIsInstance(sim_env.cells[2][2], Floor)\n\n sim_env.paint(5, 5, Wall)\n sim_env.paint(15, 15, Goal)\n sim_env.paint(25, 25, Start)\n\n self.assertIsInstance(sim_env.cells[0][0], Wall)\n self.assertIsInstance(sim_env.cells[1][1], Goal)\n self.assertIsInstance(sim_env.cells[2][2], Start)\n\n def test_finding_goal_and_start_element(self):\n sim_env = SimulationEnvironment(shape=RectangularShape(30, 30),\n cell_size=10,\n width_margin=0,\n height_margin=0)\n\n sim_env.cells[0][0] = create_new_cell_from_existing_cell(sim_env.cells[0][0],\n Goal)\n sim_env.cells[2][2] = create_new_cell_from_existing_cell(sim_env.cells[2][2],\n Start)\n goal_cell = sim_env.get_goal_cell()\n start_cell = sim_env.get_start_cell()\n\n self.assertEqual(sim_env.cells[0][0], goal_cell)\n self.assertEqual(sim_env.cells[2][2], start_cell)\n\n\nclass AgentActionManagerTest(unittest.TestCase):\n def test_move_actions(self):\n sim_env = SimulationEnvironment(shape=RectangularShape(50, 50),\n cell_size=10,\n width_margin=0,\n height_margin=0)\n agent = Agent(position=Position2D(10, 10),\n shape=RectangularShape(10, 10),\n rotation=Rotation(ROTATION.UP))\n agent_action_manager = AgentActionManager(agent, sim_env)\n\n self.assertEqual(agent.get_position(), Position2D(10, 10))\n agent_action_manager.move(Direction.SOUTH)\n self.assertEqual(agent.get_position(), Position2D(10, 0))\n agent_action_manager.move(Direction.NORTH)\n self.assertEqual(agent.get_position(), Position2D(10, 10))\n agent_action_manager.move(Direction.EAST)\n self.assertEqual(agent.get_position(), Position2D(20, 10))\n agent_action_manager.move(Direction.WEST)\n self.assertEqual(agent.get_position(), Position2D(10, 10))\n\n def test_rotation_and_move_forward_actions(self):\n sim_env = SimulationEnvironment(shape=RectangularShape(50, 50),\n cell_size=10,\n width_margin=0,\n height_margin=0)\n agent = Agent(position=Position2D(10, 10),\n shape=RectangularShape(10, 10),\n rotation=Rotation(ROTATION.UP))\n agent_action_manager = AgentActionManager(agent, sim_env)\n\n self.assertEqual(agent.get_position(), Position2D(10, 10))\n\n agent_action_manager.move_forward()\n self.assertEqual(agent.get_position(), Position2D(10, 20))\n\n agent_action_manager.rotate(\"clockwise\")\n agent_action_manager.move_forward()\n self.assertEqual(agent.get_position(), Position2D(20, 20))\n\n agent_action_manager.rotate(\"counterclockwise\")\n agent_action_manager.move_forward()\n self.assertEqual(agent.get_position(), Position2D(20, 30))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"rl_simulator/src/entities/tests/unit_tests.py","file_name":"unit_tests.py","file_ext":"py","file_size_in_byte":9283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"281923974","text":"import tensorflow as tf\n\n\npopulation = tf.feature_column.numeric_column(\"population\")\n\n# estimator = tf.estimator.DNNClassifier(hidden_units=[10, 10], feature_columns=[population], n_classes=2)\n\nkeras_model = tf.keras.applications.inception_v3.InceptionV3(weights=None)\nkeras_model.compile(optimizer=tf.keras.optimizers.SGD(lr=0.001),\n loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"])\n\nprint(keras_model.input_names)\n","sub_path":"estimator_basic.py","file_name":"estimator_basic.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"165932461","text":"class Solution:\n def jump(self, nums: List[int]) -> int:\n if len(nums) < 2:\n return 0\n maxPos, maxSteps, jumps = nums[0], nums[0], 1\n for i in range(1, len(nums)):\n if maxSteps < i:\n jumps += 1\n maxSteps = maxPos\n maxPos = max(maxPos, i + nums[i])\n return jumps","sub_path":"LeetCode/Jump Game II.py","file_name":"Jump Game II.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"450979605","text":"# -*- coding: utf-8 -*-\nfrom app import constants\n\n__all__ = [\"Controller\"]\n\n\nclass Controller:\n def __init__(self):\n self.aim_method = constants.DEFAULT_AIM_METHOD\n self.action_state = True\n self.gimbal_action = ''\n self.hp = 0\n self.heat = 0\n self.bat = 0\n","sub_path":"app/core/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"70310158","text":"import adsk.core\nimport adsk.fusion\nimport adsk.cam\nimport traceback\nimport json\nimport re\nimport os\nimport os.path\nimport datetime\nimport sys\nfrom .Modules import xlsxwriter\n\n\n\n# Copy the folder to: %AppData%\\Autodesk\\Autodesk Fusion 360\\API\\AddIns\n# (This is usually: C:\\Users\\userName\\AppData\\Autodesk\\Autodesk Fusion 360\\API\\AddIns\n\n# Global list to keep all event handlers in scope.\n# This is only needed with Python.\nhandlers = []\napp = adsk.core.Application.get()\nui = app.userInterface\ncmdId = \"BomAddInMenu\"\ncmdName = \"Bill Of Materials\"\ndialogTitle = \"Create BOM->Excel\"\ncmdDesc = \"Tworzy listę elementów (BOM) oraz zapisuje ją do pliku XLSX lub CSV.\"\ncmdRes = \".//resources//BOM-Excel\"\npanelID = \"SolidCreatePanel\"\nname_logo_file = \"logoBOM\"\n\n# Event handler for the commandCreated event.\nclass SampleCommandCreatedEventHandler(adsk.core.CommandCreatedEventHandler):\n global cmdId\n global ui\n\n def __init__(self):\n super().__init__()\n\n def openFileLogo(self, file):\n try:\n strlogo_name = open(file, 'r').read()\n return strlogo_name\n except: \n return \"Fusion 360\"\n\n def notify(self, args):\n product = app.activeProduct\n design = adsk.fusion.Design.cast(product)\n lastPrefs = design.attributes.itemByName(cmdId, \"lastUsedOptions\")\n _onlySelectedComps = False\n _ignoreCompsWithoutBodies = True\n _ignoreLinkedComps = False\n _ignoreVisibleState = True\n _ignoreUnderscorePrefixedComps = True\n _underscorePrefixStrip = False\n _fullList = True\n _sortDims = False\n _openFile = True\n _dataCSV = True\n _nameProj = True\n _includeArea = False\n _includeMass = False\n _includeDensity = False\n _includeMaterial = False\n _includeDesc = True\n _fileType = True\n _stringlogo = self.openFileLogo(name_logo_file)\n _decimalPlaces = True\n\n if lastPrefs:\n lastPrefs = json.loads(lastPrefs.value)\n _onlySelectedComps = lastPrefs.get(\"onlySelComp\", False)\n _ignoreCompsWithoutBodies = lastPrefs.get(\"ignoreCompWoBodies\", True)\n _ignoreLinkedComps = lastPrefs.get(\"ignoreLinkedComp\", True)\n _ignoreVisibleState = lastPrefs.get(\"ignoreVisibleState\", True)\n _ignoreUnderscorePrefixedComps = lastPrefs.get(\"ignoreUnderscorePrefComp\", True)\n _underscorePrefixStrip = lastPrefs.get(\"underscorePrefixStrip\", False) \n _sortDims = lastPrefs.get(\"sortDims\", False)\n _openFile = lastPrefs.get(\"openFile\", True)\n _dataCSV = lastPrefs.get(\"dataCSV\", False)\n _nameProj = lastPrefs.get(\"nameProj\", False)\n _fullList = lastPrefs.get(\"fullList\", False)\n _includeDesc = lastPrefs.get(\"includeDesc\", False)\n _includeArea = lastPrefs.get(\"includeArea\", False)\n _includeMass = lastPrefs.get(\"includeMass\", False)\n _includeDensity = lastPrefs.get(\"includeDensity\", False)\n _includeMaterial = lastPrefs.get(\"includeMaterial\", False)\n _fileType = lastPrefs.get(\"fileType\", False)\n _stringlogo = lastPrefs.get(\"stringlogo\", False)\n _decimalPlaces = lastPrefs.get(\"decimalPlaces\", True)\n\n try: \n eventArgs = adsk.core.CommandCreatedEventArgs.cast(args)\n # Get the command\n cmd = eventArgs.command\n # Get the CommandInputs collection to create new command inputs. \n inputs = cmd.commandInputs\n\n ipSelectComps = inputs.addBoolValueInput(cmdId + \"_onlySelectedComps\", \"Tylko wybrane\", True, \"\", _onlySelectedComps)\n ipSelectComps.tooltip = \"Zostaną użyte tylko wybrane komponenty\"\n\n ipWoBodies = inputs.addBoolValueInput(cmdId + \"_ignoreCompsWithoutBodies\", \"Wyklucz, jeśli nie ma ciał\", True, \"\", _ignoreCompsWithoutBodies)\n ipWoBodies.tooltip = \"Wyklucz wszystkie komponenty, jeśli mają co najmniej jedno ciało\"\n\n ipLinkedComps = inputs.addBoolValueInput(cmdId + \"_ignoreLinkedComps\", \"Wyklucz dołączone\", True, \"\", _ignoreLinkedComps)\n ipLinkedComps.tooltip = \"Wyklucz wszystkie komponenty, które są połączone z projektem\"\n\n ipVisibleState = inputs.addBoolValueInput(cmdId + \"_ignoreVisibleState\", \"Ignoruje stan widoczności\", True, \"\", _ignoreVisibleState)\n ipVisibleState.tooltip = \"Ignoruje widoczność elementu\"\n\n ipUnderscorePrefix = inputs.addBoolValueInput(cmdId + \"_ignoreUnderscorePrefixedComps\", 'Wyklucz \"_\"', True, \"\", _ignoreUnderscorePrefixedComps)\n ipUnderscorePrefix.tooltip = 'Wyklucz wszystkie komponenty, których nazwa zaczyna się od \"_\"'\n\n ipUnderscorePrefixStrip = inputs.addBoolValueInput(cmdId + \"_underscorePrefixStrip\", 'Usuń \"_\"', True, \"\", _underscorePrefixStrip)\n ipUnderscorePrefixStrip.tooltip = 'Jeśli zaznaczone, \"_\" jest usuwane z nazwy komponentów'\n ipUnderscorePrefixStrip.isVisible = not _ignoreUnderscorePrefixedComps \n\n infullList = inputs.addBoolValueInput(cmdId + '_fullList', 'Zwarta lista', True, '', _fullList)\n infullList.tooltip = \"Kasuje puste wpisy oraz jeśli elementy powtarzające sie mają takie same wymiary\\n to je sumuje i pokazuje jako jedna pozycja. \"\n #infullList.isVisible = True\n \n ipsortDims = inputs.addBoolValueInput(cmdId + '_sortDims', 'Sortowanie wymiarów', True, '', _sortDims)\n ipsortDims.tooltip = \"Sortuje wymiary tak aby najdłuższy wymiar był jako długość.\"\n #ipsortDims.isVisible = True\n\n ipdecimalPlaces = inputs.addIntegerSpinnerCommandInput(cmdId + '_decimalPlaces', 'Miejsca po przecinku', 0, 5, 1, 0)\n ipdecimalPlaces.tooltip = \"Dane w BOM będą podawane z taką ilością miejsc po przecinku.\"\n\n grpFile = inputs.addGroupCommandInput(cmdId + '_grpFile', 'PLIK')\n grpFileChildren = grpFile.children\n \n inOpenFile = grpFileChildren.addBoolValueInput(cmdId + '_openFile', 'Otwórz plik', True, '', _openFile)\n inOpenFile.tooltip = \"Otwiera automatycznie plik po utworzeniu.\"\n\n inFileType = grpFileChildren.addDropDownCommandInput(cmdId + '_fileType', 'Rodzaj pliku', adsk.core.DropDownStyles.TextListDropDownStyle);\n fileItems = inFileType.listItems\n fileItems.add('Excel', True, '')\n fileItems.add('CSV', False, '')\n\n grpPhysics = inputs.addGroupCommandInput(cmdId + '_grpPhysics', 'DOŁĄCZ')\n # if _dataCSV or _nameProj or _includeDesc or _includeArea or _includeMass or _includeDensity or _includeMaterial:\n # grpPhysics.isExpanded = True\n # else:\n # \tgrpPhysics.isExpanded = False\n grpPhysicsChildren = grpPhysics.children\n\n strInput = grpPhysicsChildren.addStringValueInput(cmdId + '_stringlogo', 'Logo', _stringlogo)\n\n inDataCSV = grpPhysicsChildren.addBoolValueInput(cmdId + '_dataCSV', 'Data utworzenia BOM', True, '', _dataCSV)\n inDataCSV.tooltip = \"Dołącza datę utworzenia BOM do pliku.\"\n #inDataCSV.isVisible = True\n\n inNameProj = grpPhysicsChildren.addBoolValueInput(cmdId + '_nameProj', 'Nazwa projektu', True, '', _nameProj)\n inNameProj.tooltip = \"Dołącza nazwę projektu do pliku.\"\n #inNameProj.isVisible = True\n\n ipIncludeArea = grpPhysicsChildren.addBoolValueInput(cmdId + \"_includeArea\", \"Powierzchnia\", True, \"\", _includeArea)\n ipIncludeArea.tooltip = \"Dołącza powierzchnię komponentu w cm^2\"\n\n ipIncludeMass = grpPhysicsChildren.addBoolValueInput(cmdId + \"_includeMass\", \"Waga\", True, \"\", _includeMass)\n ipIncludeMass.tooltip = \"Dołącza masę komponentu w kg\"\n\n ipIncludeDensity = grpPhysicsChildren.addBoolValueInput(cmdId + \"_includeDensity\", \"Gęstość\", True, \"\", _includeDensity)\n ipIncludeDensity.tooltip = \"Dołącza gęstość komponentu w kg/cm^3\"\n\n ipIncludeMaterial = grpPhysicsChildren.addBoolValueInput(cmdId + \"_includeMaterial\", \"Materiał\", True, \"\", _includeMaterial)\n ipIncludeMaterial.tooltip = \"Dołącza materiał\"\n\n ipCompDesc = grpPhysicsChildren.addBoolValueInput(cmdId + '_includeDesc', 'Opis', True, '', _includeDesc)\n ipCompDesc.tooltip = \"Zawiera opis komponentu. Możesz dodać opis, klikając komponent\\n prawym przyciskiem myszy i otwierając panel Właściwości.\"\n \n \n except:\n if ui:\n ui.messageBox('Failed:\\n{}'.format(traceback.format_exc()))\n\n # Connect to the execute event.\n onExecute = SampleCommandExecuteHandler()\n cmd.execute.add(onExecute)\n handlers.append(onExecute)\n\n # Connect to the inputChanged event.\n onInputChanged = SampleCommandInputChangedHandler()\n cmd.inputChanged.add(onInputChanged)\n handlers.append(onInputChanged)\n\n\n# Event handler for the inputChanged event.\nclass SampleCommandInputChangedHandler(adsk.core.InputChangedEventHandler):\n def __init__(self):\n super().__init__()\n def notify(self, args):\n global ui\n global cmdId\n\n # eventArgs = adsk.core.InputChangedEventArgs.cast(args)\n command = args.firingEvent.sender\n inputs = command.commandInputs\n\n if inputs.itemById(cmdId + \"_ignoreUnderscorePrefixedComps\").value is True:\n inputs.itemById(cmdId + \"_underscorePrefixStrip\").isVisible = False\n else:\n inputs.itemById(cmdId + \"_underscorePrefixStrip\").isVisible = True\n\n\n# Event handler for the execute event.\nclass SampleCommandExecuteHandler(adsk.core.CommandEventHandler):\n global cmdId\n def __init__(self):\n super().__init__()\n \n def replacePointDelimterOnPref(self, pref, value):\n if (pref):\n return str(value).replace(\".\", \",\")\n return str(value)\n\n def getDataTime(self):\n now = datetime.date.today()\n return now.strftime(\"%d-%m-%Y\")\n\n def openFile(self, file):\n try:\n strlogo_name = open(file, 'r').read()\n return strlogo_name\n except OSError as e: ## if failed, report it back to the user ##\n return (e.filename +'\\n ' + e.strerror)\n\n def formatDecimal(self, file, decimal):\n if decimal == 0:\n return \"{0:.0f}\".format(file)\n elif decimal == 1:\n return \"{0:.1f}\".format(file)\n elif decimal == 2:\n return \"{0:.2f}\".format(file)\n elif decimal == 3:\n return \"{0:.3f}\".format(file)\n elif decimal >= 4:\n return \"{0:.4f}\".format(file)\n\n def collectDataExcel(self, design, bom, prefs, filename):\n defaultUnit = design.fusionUnitsManager.defaultLengthUnits \n # Document name\n app = adsk.core.Application.get()\n docNameWithVersion = app.activeDocument.name\n docName = docNameWithVersion.rsplit(' ',1)[0]\n\n self.saveFile(name_logo_file, prefs[\"stringlogo\"]) \n logo_name = self.openFile(name_logo_file)\n\n workbook = xlsxwriter.Workbook(filename)\n worksheet = workbook.add_worksheet(logo_name)\n worksheet.set_tab_color('red')\n # worksheet2.set_tab_color('green')\n # worksheet3.set_tab_color('#FF9900') # Orange\n\n # Widen the first column to make the text clearer.\n worksheet.set_column('A:A', 20)\n # Set up some formats to use.\n\n max_row = len(bom)\n #######################################################################\n #\n # Example 1. Freeze pane on the top row.\n #\n \n worksheet.freeze_panes(4, 1)\n worksheet.autofilter(3, 0, max_row, 4)\n #######################################################################\n #\n # Set up some formatting and text to highlight the panes.\n #\n bold = workbook.add_format({'bold': True})\n italic = workbook.add_format({'italic': True})\n align_left = workbook.add_format({'align': 'left'})\n align_center = workbook.add_format({'align': 'center'})\n bold_center = workbook.add_format({'bold': True,\n 'align': 'center'})\n name_format = workbook.add_format({'bold': True,\n 'align': 'left',\n 'valign': 'vcenter',\n 'fg_color': '#d2f2c7'}) \n instances_format = workbook.add_format({'bold': True,\n 'align': 'center',\n 'valign': 'vcenter',\n 'fg_color': '#ded38e'}) \n header_format = workbook.add_format({'bold': True,\n 'align': 'center',\n 'valign': 'vcenter',\n 'fg_color': '#D7E4BC',\n 'border': 1}) \n merge_format = workbook.add_format({'bold': 1,\n 'border': 1,\n 'align': 'center',\n 'font_size': 30,\n 'valign': 'vcenter',\n 'fg_color': '#D7E4BC'})\n\n # Other sheet formatting.\n worksheet.set_column('A:A', 50)\n worksheet.set_column('B:B', 10)\n worksheet.set_column('C:E', 18)\n worksheet.set_row(0, 20)\n worksheet.set_selection('A4')\n\n\n row = 0\n column = 1\n\n worksheet.merge_range('A1:A3', logo_name, merge_format)\n\n if prefs[\"dataCSV\"]:\n worksheet.write_rich_string(row, column, \"Data utworzenia dokumentu: \", bold, self.getDataTime())\n if prefs[\"nameProj\"]:\n row +=1\n if prefs[\"onlySelComp\"]:\n worksheet.write(row, column, \"UWAGA!\",bold)\n worksheet.write_rich_string(row + 1, column, \"Utworzono na podstawie wybranych elementów z projektu: \", bold, docName)\n else:\n worksheet.write_rich_string(row, column, \"Utworzono na podstawie projektu: \", bold, docName)\n column = 0 \n row = 3\n worksheet.write(row, column, \"NAZWA ELEMENTU\", header_format) \n column += 1\n worksheet.write(row, column, \"ILOŚĆ\", header_format)\n column += 1\n worksheet.write(row, column, \"SZEROKOŚĆ [\" + defaultUnit + \"]\", header_format)\n column += 1\n worksheet.write(row, column, \"DŁUGOŚĆ [\" + defaultUnit + \"]\", header_format)\n column += 1\n worksheet.write(row, column, \"WYSOKOŚĆ [\" + defaultUnit + \"]\", header_format)\n column += 1\n if prefs[\"includeArea\"]:\n worksheet.set_column(column, column, 20)\n worksheet.write(row, column, \"POWIERZCHNIA [cm^2]\", header_format)\n column += 1\n if prefs[\"includeMass\"]:\n worksheet.set_column(column, column, 20)\n worksheet.write(row, column, \"CIĘŻAR [kg]\", header_format)\n column += 1\n if prefs[\"includeDensity\"]:\n worksheet.set_column(column, column, 20)\n worksheet.write(row, column, \"GĘSTOŚĆ [kg/cm^2]\", header_format)\n column += 1\n if prefs[\"includeMaterial\"]:\n worksheet.set_column(column, column, 30)\n worksheet.write(row, column, \"MATERIAŁ\", header_format)\n column += 1\n if prefs[\"includeDesc\"]:\n worksheet.set_column(column, column, 70)\n worksheet.write(row, column, \"OPIS\", header_format)\n column += 1\n\n double_bom = []\n \n for item in bom:\n dimX = float(design.fusionUnitsManager.formatInternalValue(item[\"boundingBox\"][\"x\"], defaultUnit, False))\n dimY = float(design.fusionUnitsManager.formatInternalValue(item[\"boundingBox\"][\"y\"], defaultUnit, False))\n dimZ = float(design.fusionUnitsManager.formatInternalValue(item[\"boundingBox\"][\"z\"], defaultUnit, False))\n \n dim = 0\n for k in item[\"boundingBox\"]:\n dim += item[\"boundingBox\"][k]\n if dim > 0: \n dimSorted = sorted([dimX, dimY, dimZ])\n if prefs[\"sortDims\"]:\n dimSorted = sorted([dimX, dimY, dimZ])\n bbZ = self.formatDecimal(dimSorted[2], prefs[\"decimalPlaces\"])\n bbX = self.formatDecimal(dimSorted[0], prefs[\"decimalPlaces\"])\n bbY = self.formatDecimal(dimSorted[1], prefs[\"decimalPlaces\"])\n else:\n bbX = self.formatDecimal(dimX, prefs[\"decimalPlaces\"])\n bbY = self.formatDecimal(dimY, prefs[\"decimalPlaces\"])\n bbZ = self.formatDecimal(dimZ, prefs[\"decimalPlaces\"]) \n \n name = self.filterFusionCompNameInserts(item[\"name\"])\n append = True\n for it in double_bom: \n if prefs[\"fullList\"]:\n if name == it[\"double_name\"] and bbX == it[\"double_dimX\"] and bbY == it[\"double_dimY\"] and bbZ == it[\"double_dimZ\"]:\n it[\"double_instances\"] = it[\"double_instances\"] + item[\"instances\"]\n append = False\n if append: \n double_bom.append({\n # \"double_component\": comp,\n \"double_name\": name,\n \"double_instances\": item[\"instances\"],\n # \" double_volume\": self.getBodiesVolume(comp.bRepBodies),\n \"double_dimX\": bbX,\n \"double_dimY\": bbY,\n \"double_dimZ\": bbZ,\n \"double_area\": item[\"area\"],\n \"double_mass\": item[\"mass\"],\n \"double_density\": item[\"density\"],\n \"double_material\": item[\"material\"],\n \"double_desc\": item[\"desc\"]\n })\n\n column = 0\n for double_item in double_bom:\n row += 1\n name = self.filterFusionCompNameInserts(double_item[\"double_name\"])\n worksheet.write(row, column, name, name_format)\n column += 1\n worksheet.write(row, column, double_item[\"double_instances\"], instances_format) \n column += 1\n worksheet.write(row, column, double_item[\"double_dimX\"], align_center) \n column += 1\n worksheet.write(row, column, double_item[\"double_dimY\"], align_center) \n column += 1\n worksheet.write(row, column, double_item[\"double_dimZ\"], align_center) \n if prefs[\"includeArea\"]:\n column += 1 \n worksheet.write(row, column, self.formatDecimal(double_item[\"double_area\"], prefs[\"decimalPlaces\"]), align_left) \n if prefs[\"includeMass\"]:\n column += 1\n worksheet.write(row, column, self.formatDecimal(double_item[\"double_mass\"], prefs[\"decimalPlaces\"]), align_left) \n if prefs[\"includeDensity\"]:\n column += 1\n worksheet.write(row, column, self.formatDecimal(double_item[\"double_density\"], prefs[\"decimalPlaces\"]), align_left) \n if prefs[\"includeMaterial\"]:\n column += 1\n worksheet.write(row, column, double_item[\"double_material\"], align_left) \n if prefs[\"includeDesc\"]:\n column += 1\n worksheet.write(row, column, double_item[\"double_desc\"], align_left) \n column = 0\n workbook.close() \n\n\n def collectData(self, design, bom, prefs):\n defaultUnit = design.fusionUnitsManager.defaultLengthUnits \n # Document name\n app = adsk.core.Application.get()\n docNameWithVersion = app.activeDocument.name\n docName = docNameWithVersion.rsplit(' ',1)[0]\n\n csvStr = ''\n if prefs[\"dataCSV\"]:\n csvStr += '\"' + \"Data utworzenia dokumentu: \" + self.getDataTime() + '\",\\n\\n'\n if prefs[\"nameProj\"]:\n csvStr += '\"' + \"Utworzono na podstawie projektu: \" + docName + '\",\\n\\n\\n'\n\n csvHeader = [\"NAZWA ELEMENTU\", \"ILOŚĆ\"]\n\n if prefs[\"sortDims\"]:\n csvHeader.append(\"WYSOKOŚĆ [\" + defaultUnit + \"]\")\n csvHeader.append(\"SZEROKOŚĆ [\" + defaultUnit + \"]\")\n csvHeader.append(\"DŁUGOŚĆ [\" + defaultUnit + \"]\") \n else:\n csvHeader.append(\"SZEROKOŚĆ [\" + defaultUnit + \"]\")\n csvHeader.append(\"DŁUGOŚĆ [\" + defaultUnit + \"]\")\n csvHeader.append(\"WYSOKOŚĆ [\" + defaultUnit + \"]\")\n if prefs[\"includeArea\"]:\n csvHeader.append(\"POWIERZCHNIA [cm^2]\")\n if prefs[\"includeMass\"]:\n csvHeader.append(\"CIĘŻAR [kg]\")\n if prefs[\"includeDensity\"]:\n csvHeader.append(\"GĘSTOŚĆ [kg/cm^2]\")\n if prefs[\"includeMaterial\"]:\n csvHeader.append(\"MATERIAŁ\") \n if prefs[\"includeDesc\"]:\n csvHeader.append(\"OPIS\") \n \n for k in csvHeader:\n csvStr += '\"' + k + '\",'\n csvStr += '\\n'\n \n double_bom = []\n \n for item in bom:\n dimX = float(design.fusionUnitsManager.formatInternalValue(item[\"boundingBox\"][\"x\"], defaultUnit, False))\n dimY = float(design.fusionUnitsManager.formatInternalValue(item[\"boundingBox\"][\"y\"], defaultUnit, False))\n dimZ = float(design.fusionUnitsManager.formatInternalValue(item[\"boundingBox\"][\"z\"], defaultUnit, False))\n \n dim = 0\n for k in item[\"boundingBox\"]:\n dim += item[\"boundingBox\"][k]\n if dim > 0:\n if prefs[\"sortDims\"]:\n dimSorted = sorted([dimX, dimY, dimZ])\n bbZ = self.formatDecimal(dimSorted[2], prefs[\"decimalPlaces\"])\n bbX = self.formatDecimal(dimSorted[0], prefs[\"decimalPlaces\"])\n bbY = self.formatDecimal(dimSorted[1], prefs[\"decimalPlaces\"])\n else:\n bbX = self.formatDecimal(dimX, prefs[\"decimalPlaces\"])\n bbY = self.formatDecimal(dimX, prefs[\"decimalPlaces\"])\n bbZ = self.formatDecimal(dimX, prefs[\"decimalPlaces\"])\n \n name = self.filterFusionCompNameInserts(item[\"name\"])\n append = True\n for it in double_bom:\n if prefs[\"fullList\"]:\n if name == it[\"double_name\"] and bbX == it[\"double_dimX\"] and bbY == it[\"double_dimY\"] and bbZ == it[\"double_dimZ\"]:\n it[\"double_instances\"] = it[\"double_instances\"] + item[\"instances\"]\n append = False \n if append: \n double_bom.append({\n \"double_name\": name,\n \"double_instances\": item[\"instances\"],\n \"double_dimX\": bbX,\n \"double_dimY\": bbY,\n \"double_dimZ\": bbZ,\n \"double_area\": item[\"area\"],\n \"double_mass\": item[\"mass\"],\n \"double_density\": item[\"density\"],\n \"double_material\": item[\"material\"],\n \"double_desc\": item[\"desc\"]\n })\n\n interspace = '\",\"'\n interspace_start = '\"'\n interspace_end = '\",'\n \n for double_item in double_bom:\n csvStr += interspace_start + double_item[\"double_name\"] + interspace + self.replacePointDelimterOnPref(prefs[\"useComma\"], double_item[\"double_instances\"])\n csvStr += interspace + double_item[\"double_dimX\"] + interspace + double_item[\"double_dimY\"] + interspace + double_item[\"double_dimZ\"]\n if prefs[\"includeArea\"]:\n csvStr += interspace + self.replacePointDelimterOnPref(prefs[\"useComma\"], self.formatDecimal(double_item[\"double_area\"], prefs[\"decimalPlaces\"]))\n if prefs[\"includeMass\"]:\n csvStr += interspace + self.replacePointDelimterOnPref(prefs[\"useComma\"], self.formatDecimal(double_item[\"double_mass\"], prefs[\"decimalPlaces\"]))\n if prefs[\"includeDensity\"]:\n csvStr += interspace + self.replacePointDelimterOnPref(prefs[\"useComma\"], self.formatDecimal(double_item[\"double_density\"], prefs[\"decimalPlaces\"]))\n if prefs[\"includeMaterial\"]:\n csvStr += interspace + double_item[\"double_material\"]\n if prefs[\"includeDesc\"]:\n csvStr += interspace + double_item[\"double_desc\"]\n csvStr += interspace_end\n csvStr += '\\n'\n\n return csvStr\n\n def getPrefsObject(self, inputs):\n \n obj = {\n \"onlySelComp\": inputs.itemById(cmdId + \"_onlySelectedComps\").value,\n \"ignoreLinkedComp\": inputs.itemById(cmdId + \"_ignoreLinkedComps\").value,\n \"ignoreCompWoBodies\": inputs.itemById(cmdId + \"_ignoreCompsWithoutBodies\").value,\n \"ignoreVisibleState\": inputs.itemById(cmdId + \"_ignoreVisibleState\").value,\n \"ignoreUnderscorePrefComp\": inputs.itemById(cmdId + \"_ignoreUnderscorePrefixedComps\").value,\n \"underscorePrefixStrip\": inputs.itemById(cmdId + \"_underscorePrefixStrip\").value,\n \"sortDims\": inputs.itemById(cmdId + \"_sortDims\").value,\n \"openFile\": inputs.itemById(cmdId + \"_openFile\").value,\n \"dataCSV\": inputs.itemById(cmdId + \"_dataCSV\").value,\n \"nameProj\": inputs.itemById(cmdId + \"_nameProj\").value, \n \"fullList\": inputs.itemById(cmdId + \"_fullList\").value, \n \"includeDesc\": inputs.itemById(cmdId + \"_includeDesc\").value,\n \"includeArea\" : inputs.itemById(cmdId + \"_includeArea\").value,\n \"includeMass\" : inputs.itemById(cmdId + \"_includeMass\").value,\n \"includeDensity\" : inputs.itemById(cmdId + \"_includeDensity\").value,\n \"includeMaterial\" : inputs.itemById(cmdId + \"_includeMaterial\").value,\n \"fileType\": inputs.itemById(cmdId + \"_fileType\").selectedItem.name,\n \"stringlogo\": inputs.itemById(cmdId + \"_stringlogo\").value,\n \"decimalPlaces\": inputs.itemById(cmdId + \"_decimalPlaces\").value, \n \"generateCutlList\": True,\n \"useComma\": True\n }\n return obj \n\n def getBodiesVolume(self, bodies):\n volume = 0\n for bodyK in bodies:\n if bodyK.isSolid:\n volume += bodyK.volume\n return volume\n\n # Calculates a tight bounding box around the input body. An optional\n # tolerance argument is available. This specificies the tolerance in\n # centimeters. If not provided the best existing display mesh is used.\n def calculateTightBoundingBox(self, body, tolerance = 0):\n try:\n # If the tolerance is zero, use the best display mesh available.\n if tolerance <= 0:\n # Get the best display mesh available.\n triMesh = body.meshManager.displayMeshes.bestMesh\n else:\n # Calculate a new mesh based on the input tolerance.\n meshMgr = adsk.fusion.MeshManager.cast(body.meshManager)\n meshCalc = meshMgr.createMeshCalculator()\n meshCalc.surfaceTolerance = tolerance\n triMesh = meshCalc.calculate()\n \n # Calculate the range of the mesh.\n smallPnt = adsk.core.Point3D.cast(triMesh.nodeCoordinates[0])\n largePnt = adsk.core.Point3D.cast(triMesh.nodeCoordinates[0])\n vertex = adsk.core.Point3D.cast(None)\n for vertex in triMesh.nodeCoordinates:\n if vertex.x < smallPnt.x:\n smallPnt.x = vertex.x\n \n if vertex.y < smallPnt.y:\n smallPnt.y = vertex.y\n \n if vertex.z < smallPnt.z:\n smallPnt.z = vertex.z\n \n if vertex.x > largePnt.x:\n largePnt.x = vertex.x\n \n if vertex.y > largePnt.y:\n largePnt.y = vertex.y\n \n if vertex.z > largePnt.z:\n largePnt.z = vertex.z\n \n # Create and return a BoundingBox3D as the result.\n return(adsk.core.BoundingBox3D.create(smallPnt, largePnt))\n except:\n # An error occurred so return None.\n return(None)\n \n def getBodiesBoundingBox(self, bodies):\n minPointX = maxPointX = minPointY = maxPointY = minPointZ = maxPointZ = 0\n # Examining the maximum min point distance and the maximum max point distance. \n for body in bodies:\n if body.isSolid:\n bb = self.calculateTightBoundingBox(body, 500)\n if not bb:\n return None\n if not minPointX or bb.minPoint.x < minPointX:\n minPointX = bb.minPoint.x\n if not maxPointX or bb.maxPoint.x > maxPointX:\n maxPointX = bb.maxPoint.x\n if not minPointY or bb.minPoint.y < minPointY:\n minPointY = bb.minPoint.y\n if not maxPointY or bb.maxPoint.y > maxPointY:\n maxPointY = bb.maxPoint.y\n if not minPointZ or bb.minPoint.z < minPointZ:\n minPointZ = bb.minPoint.z\n if not maxPointZ or bb.maxPoint.z > maxPointZ:\n maxPointZ = bb.maxPoint.z\n \n return {\n \"x\": maxPointX - minPointX,\n \"y\": maxPointY - minPointY,\n \"z\": maxPointZ - minPointZ\n } \n\n def getPhysicsArea(self, bodies):\n area = 0\n for body in bodies:\n if body.isSolid:\n if body.physicalProperties:\n area += body.physicalProperties.area\n return area\n\n def getPhysicalMass(self, bodies):\n mass = 0\n for body in bodies:\n if body.isSolid:\n if body.physicalProperties:\n mass += body.physicalProperties.mass\n return mass\n\n def getPhysicalDensity(self, bodies):\n density = 0\n if bodies.count > 0:\n body = bodies.item(0)\n if body.isSolid:\n if body.physicalProperties:\n density = body.physicalProperties.density\n return density\n\n def getPhysicalMaterial(self, bodies):\n matList = []\n for body in bodies:\n if body.isSolid and body.material:\n mat = body.material.name\n if mat not in matList:\n matList.append(mat)\n return ', '.join(matList)\n \n def filterFusionCompNameInserts(self, name):\n name = re.sub(\"\\([0-9]+\\)$\", '', name)\n name = name.strip()\n name = re.sub(\"v[0-9]+$\", '', name)\n return name.strip()\n \n def saveFile(self, fnm, fbom):\n try:\n output = open(fnm, 'w')\n output.writelines(fbom)\n output.close()\n return 0\n except OSError as e: ## if failed, report it back to the user ##\n return (e.filename +'\\n ' + e.strerror)\n\n \n def notify(self, args):\n global dialogTitle\n ui = None\n try:\n app = adsk.core.Application.get()\n ui = app.userInterface\n\n product = app.activeProduct\n design = adsk.fusion.Design.cast(product)\n eventArgs = adsk.core.CommandEventArgs.cast(args) \n inputs = eventArgs.command.commandInputs\n \n if not design:\n ui.messageBox('Brak aktywnego projektu', dialogTitle)\n return\n prefs = self.getPrefsObject(inputs)\n \n \n # Get all occurrences in the root component of the active design\n root = design.rootComponent\n\n occs = []\n if prefs[\"onlySelComp\"]:\n if ui.activeSelections.count > 0:\n selections = ui.activeSelections\n for selection in selections:\n if (hasattr(selection.entity, \"objectType\") and selection.entity.objectType == adsk.fusion.Occurrence.classType()):\n occs.append(selection.entity)\n if selection.entity.component:\n for item in selection.entity.component.allOccurrences:\n occs.append(item)\n else:\n ui.messageBox('No components selected!\\nPlease select some components.')\n return\n else:\n ui.messageBox('No components selected!\\nPlease select some components.')\n return\n else:\n occs = root.allOccurrences\n\n\n \n if len(occs) == 0:\n ui.messageBox('W tym projekcie nie ma żadnych komponentów.')\n return\n \n # Set styles of progress dialog.\n progressDialog = ui.createProgressDialog()\n progressDialog.cancelButtonText = 'Zakończ'\n progressDialog.isBackgroundTranslucent = False\n progressDialog.isCancelButtonShown = True\n \n steps = 0\n for occ1 in occs:\n occ1.component\n steps += 1\n \n # Show dialog\n progressDialog.show('Postęp', 'Procent: %p%, Aktualny element: %v', 0, steps)\n \n # Gather information about each unique component\n bom = []\n \n for occ in occs:\n if progressDialog.wasCancelled:\n break\n progressDialog.progressValue += 1\n comp = occ.component\n if comp.name.startswith('_') and prefs[\"ignoreUnderscorePrefComp\"]:\n continue\n elif prefs[\"ignoreLinkedComp\"] and design != comp.parentDesign:\n continue\n elif not comp.bRepBodies.count and prefs[\"ignoreCompWoBodies\"]:\n continue\n elif not occ.isVisible and prefs[\"ignoreVisibleState\"] is False:\n continue\n else:\n jj = 0\n for bomI in bom:\n if bomI['component'] == comp:\n # Increment the instance count of the existing row.\n bomI['instances'] += 1\n break\n jj += 1 \n\n if jj == len(bom):\n # Add this component to the BOM\n bb = self.getBodiesBoundingBox(comp.bRepBodies)\n if not bb:\n if ui:\n ui.messageBox('Nie wszystkie moduły Fusion są jeszcze załadowane, kliknij element główny, aby je załadować i spróbuj ponownie.')\n return\n \n bom.append({\n \"component\": comp,\n \"name\": comp.name,\n \"instances\": 1,\n \"volume\": self.getBodiesVolume(comp.bRepBodies),\n \"boundingBox\": bb,\n \"area\": self.getPhysicsArea(comp.bRepBodies),\n \"mass\": self.getPhysicalMass(comp.bRepBodies),\n \"density\": self.getPhysicalDensity(comp.bRepBodies),\n \"material\": self.getPhysicalMaterial(comp.bRepBodies),\n \"desc\": comp.description\n })\n \n # Hide the progress dialog at the end.\n progressDialog.hide()\n\n fileSaveType = str(prefs[\"fileType\"])\n if fileSaveType == 'Excel':\n saveExcel = True\n else:\n saveExcel = False\n \n fileDialog = ui.createFileDialog()\n fileDialog.isMultiSelectEnabled = False\n fileDialog.title = dialogTitle + \" filename\"\n if saveExcel:\n fileDialog.filter = 'XLSX (*.xlsx)'\n else:\n fileDialog.filter = 'CSV (*.csv)' \n fileDialog.filterIndex = 0\n dialogResult = fileDialog.showSave()\n if dialogResult == adsk.core.DialogResults.DialogOK:\n filename = fileDialog.filename\n else:\n return\n\n\n if saveExcel:\n\n self.collectDataExcel(design, bom, prefs, filename)\n\n else:\n bomStr = self.collectData(design, bom, prefs) \n checkFilename = self.saveFile(filename, bomStr)\n \n if checkFilename == 0: \n # Save last chosen options \n design.attributes.add(cmdId, \"lastUsedOptions\", json.dumps(prefs)) \n else: \n message = \"BŁĄD ZAPISU PLIKU!!!\\n \\\"\" + str(checkFilename) + \"\\\" \\n\"\n ui.messageBox(message)\n return\n \n if prefs[\"openFile\"]:\n os.startfile(filename)\n else:\n ui.messageBox('Zapisano do pliku \"' + filename + '\"')\n \n design.attributes.add(cmdId, \"lastUsedOptions\", json.dumps(prefs)) \n \n\n except:\n if ui:\n ui.messageBox('Failed:\\n{}'.format(traceback.format_exc()))\n\ndef run(context):\n global ui\n global cmdId\n global dialogTitle\n global cmdDesc\n global cmdRes\n global panelID\n\n try: \n # Get the CommandDefinitions collection.\n cmdDefs = ui.commandDefinitions\n \n # Create a button command definition.\n buttonSample = cmdDefs.addButtonDefinition(cmdId, dialogTitle, cmdDesc, cmdRes)\n \n # Connect to the command created event.\n sampleCommandCreated = SampleCommandCreatedEventHandler()\n buttonSample.commandCreated.add(sampleCommandCreated)\n handlers.append(sampleCommandCreated)\n \n # Get the ADD-INS panel in the model workspace. \n addInsPanel = ui.allToolbarPanels.itemById(panelID)\n \n # Add the button to the bottom of the panel.\n buttonControl = addInsPanel.controls.addCommand(buttonSample, \"\")\n buttonControl.isVisible = True\n except:\n if ui:\n ui.messageBox('Failed:\\n{}'.format(traceback.format_exc()))\n\n\ndef stop(context):\n try:\n global app\n global ui\n global cmdId\n global panelID\n \n # Clean up the UI.\n cmdDef = ui.commandDefinitions.itemById(cmdId)\n if cmdDef:\n cmdDef.deleteMe()\n \n addinsPanel = ui.allToolbarPanels.itemById(panelID)\n cntrl = addinsPanel.controls.itemById(cmdId)\n if cntrl:\n cntrl.deleteMe()\n except:\n if ui:\n ui.messageBox('Failed:\\n{}'.format(traceback.format_exc()))","sub_path":"BOM-Excel.py","file_name":"BOM-Excel.py","file_ext":"py","file_size_in_byte":40214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"527050366","text":"\"\"\"\nCustom ring topology\n\nauthor: Narendran Thangarajan (narendran.thangarajan@gmail.com)\n\"\"\"\n\nfrom mininet.topo import Topo, Node\n\nclass MyRingTopo (Topo) :\n\t\"Simple Ring Topology with 5 switches and 2 hosts connected to each - A precursor to DCell\"\n\n\tdef __init__(self,enable_all=True):\n\t\t\"Call Super constructor\"\n\t\tsuper(MyRingTopo,self).__init__();\n\n\t\t\"\"\"\n\t\tFollowing is the topology (just the switches):\n\t\t\t\t\n\t\t\t\t1\n\n\t\t\t5\t\t2\n\n\t\t\t4\t\t3\n\t\t\n\t\t\"\"\"\n\n\t\ttopSwitch = 1\n\t\tmidRightSwitch = 2\n\t\tbottomRightSwitch = 3\n\t\tbottomLeftSwitch = 4\n\t\tmidLeftSwitch = 5\n\n\t\thost1 = 1001\n\t\thost2 = 1002\n\t\thost3 = 1003\n\t\thost4 = 1004\n\t\thost5 = 1005\n\t\thost6 = 1006\n\t\thost7 = 1007\n\t\thost8 = 1008\n\t\thost9 = 1009\n\t\thost10 = 1010\n\n\t\t\"Add nodes to topology\"\n\t\tself.add_node(topSwitch, Node(is_switch=True))\n\t\tself.add_node(midRightSwitch, Node(is_switch=True))\n\t\tself.add_node(midLeftSwitch, Node(is_switch=True))\n\t\tself.add_node(bottomRightSwitch, Node(is_switch=True))\n\t\tself.add_node(bottomLeftSwitch, Node(is_switch=True))\n\n\t\t\"Add hosts\"\n\t\tself.add_node(host1, Node(is_switch=False))\n\t\tself.add_node(host2, Node(is_switch=False))\n\t\tself.add_node(host3, Node(is_switch=False))\n\t\tself.add_node(host4, Node(is_switch=False))\n\t\tself.add_node(host5, Node(is_switch=False))\n\t\tself.add_node(host6, Node(is_switch=False))\n\t\tself.add_node(host7, Node(is_switch=False))\n\t\tself.add_node(host8, Node(is_switch=False))\n\t\tself.add_node(host9, Node(is_switch=False))\n\t\tself.add_node(host10, Node(is_switch=False))\n\n\t\t\"Add edges to nodes\"\n\t\tself.add_edge(topSwitch,host1)\n\t\tself.add_edge(topSwitch,host2)\n\n\t\tself.add_edge(midRightSwitch,host3)\n\t\tself.add_edge(midRightSwitch,host4)\n\n\t\tself.add_edge(bottomRightSwitch,host5)\n\t\tself.add_edge(bottomRightSwitch,host6)\n\n\t\tself.add_edge(bottomLeftSwitch,host7)\n\t\tself.add_edge(bottomLeftSwitch,host8)\n\n\t\tself.add_edge(midLeftSwitch,host9)\n\t\tself.add_edge(midLeftSwitch,host10)\n\n\t\t\"Ring connection of switches\"\n\n\t\tself.add_edge(topSwitch,midRightSwitch)\n\t\tself.add_edge(midRightSwitch,bottomRightSwitch)\n\t\tself.add_edge(bottomRightSwitch,bottomLeftSwitch)\n\t\tself.add_edge(bottomLeftSwitch,midLeftSwitch)\n\t\tself.add_edge(midLeftSwitch,topSwitch)\n\n\t\tself.enable_all();\n\n\n\n\"The following line allows users to pass --topo myringtopo from command line\"\ntopos = { \"myringtopo\" : (lambda : MyRingTopo())}","sub_path":"ringtopo-5sw-10-host.py","file_name":"ringtopo-5sw-10-host.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"349170280","text":"from numpy import *\n\ndigit_error = 0.5\n\ndef moving_average(data, N=100):\n ma = convolve(data, ones((N,))/N, mode='valid')\n if N%2 == 0:\n s_ma = ((sqrt(convolve((data[N/2:data.shape[0] - N/2 + 1] - convolve(data, ones((N,))/N, mode='valid'))**2, ones((N,)), mode = 'same') / (N - 1.0)))/ sqrt(N + 0.0))\n else:\n s_ma = ((sqrt(convolve((data[N/2 + 1:data.shape[0] - N/2] - convolve(data, ones((N,))/N, mode='valid'))**2, ones((N,)), mode = 'same') / (N - 1.0)))/ sqrt(N + 0.0))\n return ma, s_ma + 0.5/sqrt(N)\n \n\n\ndef moving_average_all(data, time, N=100):\n if N%2 == 0:\n data_copy = zeros_like(data[:, N/2:time.shape[0] - N/2 +1])\n errors = zeros_like(data[:, N/2:time.shape[0] - N/2 +1])\n else:\n data_copy = zeros_like(data[:, N/2 + 1:time.shape[0] - N/2+1])\n errors = zeros_like(data[:, N/2 + 1:time.shape[0] - N/2+1])\n for i, item in enumerate(data):\n data_copy[i,:], errors[i,:] = moving_average(item, N)\n if N%2 == 0:\n return data_copy, errors, time[N/2:time.shape[0] - N/2 +1]\n else:\n return data_copy, errors, time[N/2 + 1:time.shape[0] - N/2]\n","sub_path":"AstroWeek/Background/moving_average.py","file_name":"moving_average.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"226801807","text":"# ax2 + bx + c = 0\n\nfrom math import sqrt\n\ndef giai_phuong_trinh_bac_hai(a,b,c):\n if a != 0:\n # Tinh delta\n delta = b**2-4*a*c\n\n # Xet dieu kien delta\n if delta < 0:\n print('Phuong trinh vo nghiem!')\n elif delta == 0:\n x=-b/2*a\n ketqua = {'x1':x,'x2':x}\n return ketqua\n elif delta > 0:\n x1=-b-sqrt(delta)/2*a\n x2=-b+sqrt(delta)/2*a\n ketqua = {'x1':x1,'x2':x2}\n return ketqua\n else:\n print('Khong phai phuong trinh bac 2')\n\n\ndef phuong_trinh_bat_hai():\n\n while True:\n a_input = input('Nhap a:')\n b_input = input('Nhap b:')\n c_input = input('Nhap c:')\n ketqua = giai_phuong_trinh_bac_hai(a_input, b_input, c_input)\n if ketqua != None:\n if ketqua['x1'] == ketqua['x2']:\n x = ketqua['x1']\n print(\"Phuong trinh co nghiem kep la: x1 = x2 = \" + str(x))\n\nphuong_trinh_bat_hai()\n","sub_path":"giai phuong trinh/giaiPhuongTrinhBatHai.py","file_name":"giaiPhuongTrinhBatHai.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"490067737","text":"import rospy\nimport threading\nimport time\nfrom nav_msgs.msg import Odometry\nfrom rosgraph_msgs.msg import Clock\n\nglobal sim_time, flg, t1, t2\n\nsim_time = 0.0\nflg = 0\n\ndef time_callback(data):\n global sim_time\n _sec = data.clock.secs\n _nsec = data.clock.nsecs\n sim_time = _sec + _nsec * 0.000000001\n\ndef listener():\n rospy.init_node('logger', anonymous=True)\n rospy.Subscriber(\"/clock\", Clock, time_callback)\n rospy.spin()\n\n\ndef printer():\n global sim_time, t1, t2, flg\n while not rospy.is_shutdown():\n if sim_time != 0.0 and flg == 0:\n t1 = float(sim_time)\n flg = 1\n time.sleep(0.01)\n t2 = float(sim_time)\n print()\n print(\"Total time: {} secs\".format(t2 - t1))\n\n\ndef load_printer():\n print_thread = threading.Thread(target=printer)\n print_thread.start()\n\nif __name__ == \"__main__\":\n\tload_printer()\n\tlistener()\nelse:\n print(\"ERROR!\")\n","sub_path":"crowd_nav/log_time.py","file_name":"log_time.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"322990877","text":"from rest_framework import status\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAuthenticated\nfrom api import serializers\nfrom daft_analytics.utils import price_regressor, cork_path, dublin_path, galway_path, limerick_path\nfrom data_science.utils import skill_cleaner, skill_compare\n\n\nclass EstimatePropertyView(APIView):\n \"\"\"Apply Property Value Estimator algorithm\"\"\"\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n serializer_class = serializers.PropertySerializer\n\n def get(self, request):\n return Response({\"message\": \"Specify estimated property features\"})\n\n def post(self, request):\n serializer = serializers.PropertySerializer(data=request.data)\n\n if serializer.is_valid():\n city = request.POST.get('city')\n if city == \"Cork\":\n dataset = cork_path\n elif city == \"Dublin\":\n dataset = dublin_path\n elif city == \"Galway\":\n dataset = galway_path\n else:\n dataset = limerick_path\n area = request.POST.get('area')\n bedrooms = request.POST.get('bedrooms')\n bathrooms = request.POST.get('bathrooms')\n estimated_type = request.POST.get('property_type')\n if estimated_type == \"House\":\n property_type = 1\n else:\n property_type = 0\n try:\n estimators = price_regressor(dataset, area, property_type, bedrooms, bathrooms)\n estimators = sorted(estimators, key=int)\n except:\n return Response({'message': \"{} area doesn't belong to {} city\".format(area, city)},\n status=status.HTTP_400_BAD_REQUEST)\n return Response({\"data\": request.data,\n \"Estimated Property Value\": \"{} - {}\".format(estimators[0], estimators[1])})\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass TestProfileView(APIView):\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n serializer_class = serializers.SkillsSerializer\n\n def get(self, request):\n return Response({\"message\": \"Add your profile skills separated by comma\"})\n\n def post(self, request):\n serializer = serializers.SkillsSerializer(data=request.data)\n\n if serializer.is_valid():\n skillset = request.POST.get('skills')\n try:\n skills = skill_cleaner(skillset)\n results_dictionary, missing_dictionary = skill_compare(skills)\n except:\n return Response({'message': \"Invalid Request\"},\n status=status.HTTP_400_BAD_REQUEST)\n\n return Response({\"Found on % of Data Scientists Linkedin Profiles\": results_dictionary,\n \"% of Data Scientists also listed\": missing_dictionary})\n\n\n\n\n\n\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n","sub_path":"api/views_algorithm.py","file_name":"views_algorithm.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"137752227","text":"from flask import Flask, render_template, flash, redirect, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom config.database import db\nfrom config.appconfig import TinyLinkConfig\nfrom views.index import index_blueprint\nfrom flask_wtf.csrf import CSRFProtect\n\ndef create_app(db: SQLAlchemy, config: TinyLinkConfig):\n\n app = Flask(__name__)\n csrf = CSRFProtect(app)\n app.config['SQLALCHEMY_DATABASE_URI'] = config.sqlalchemy_url\n app.config['SECRET_KEY'] = config.secretkey\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = config.track_modifications\n app.register_blueprint(index_blueprint)\n db.init_app(app)\n csrf.init_app(app)\n return app\n\n\ndef init_app():\n config = TinyLinkConfig()\n app = create_app(db, config)\n return app\n\n\nif __name__ == \"__main__\":\n app = init_app()\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"332702467","text":"import socket\r\nimport sys\r\nimport struct\r\nimport threading\r\n\r\nclass ServerSocket:\r\n def __init__(self, address, queue=5):\r\n self.sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.sock.bind(address)\r\n self.sock.listen(queue)\r\n return\r\n\r\n def accept(self):\r\n client, (host, port)=self.sock.accept()\r\n self.client=client\r\n self.address=(host, port)\r\n return (client, (host, port))\r\n\r\n def recv(self, le):\r\n msg=b''\r\n while len(msg)\n#\n# LICENSE\n# This program is free software; you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by the Free\n# Software Foundation; either version 2 of the License, or(at your option)\n# any later version.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for\n# more details at http://www.gnu.org/copyleft/gpl.html\n#\n# Brief\n# Solves LeetCode Problem 58: Length of Last Word\n\nclass Solution(object):\n def lengthOfLastWord(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if not s:\n return 0\n lastlen, i, slen = 0, 0, len(s)\n while i < slen:\n while s[slen - 1 - i] == ' ':\n i += 1\n if lastlen > 0 or i >= slen:\n return lastlen\n if s[slen - 1 - i].isalpha():\n lastlen += 1\n i += 1\n else:\n lastlen = 0\n while i < slen and s[slen - 1 - i] != ' ':\n i += 1\n return lastlen\n","sub_path":"Problem58/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"594770079","text":"from django.shortcuts import render\nfrom rest_framework.response import Response\nfrom .models import Students\nfrom App1.serializers import StudentSerializers\nfrom django.db.models import Avg, Min, Max, Sum, Count\nimport djqscsv\nimport pandas as pd\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nfrom App1.models import Files_upload\n\n\n\n\n# Create your views here.\nfrom rest_framework.decorators import api_view\n@api_view(['GET','POST','DELETE'])\ndef students_list(request):\n if request.method == 'GET':\n qs=Students.objects.all()\n q = qs.values('id' , 'name' , 'score')\n df = pd.DataFrame.from_records(q)\n mean_df=df['score'].mean()\n min_df=df['score'].min() \n max_df=df['score'].max() # Students_csv = djqscsv.render_to_csv_response(students)\n # hi = pandas.read_csv(Students_csv)\n print(mean_df)\n context ={ \n \"mean\":mean_df, \n \"min\":min_df,\n \"max\":max_df\n } # serializers = StudentSerializers(students,many=True)\n return Response(context)\n\n \n\n if request.method == 'POST':\n name = request.data.get(\"name\")\n print(name)\n students = Students.objects.create( name= name, score= request.data.get(\"score\"))\n # serializers = StudentSerializers(students,many=True)\n return Response(\"success\")\n\n if request.method == 'DELETE':\n students = Students.objects.get(id=100)\n students.delete()\n students.save()\n # serializers = StudentSerializers(students,many=True)\n return Response(\"success\")\n\n# def students_list(request):\n# try:\n# p=Project(name=request.POST.get('project1'))\n# p.save()\n# return redirect('http://www.google.com/')\n# except:\n# return redirect('http://www.google.com/')\n\n@api_view(['POST'])\ndef project_upload(request):\n if request.method == 'POST' :\n # and request.FILES['myfile']:\n # # myfile = request.FILES['myfile'].name\n for myfile, file in request.FILES.items():\n name = request.FILES[myfile]\n sqs= Files_upload.objects.create(document = name)\n print(name)\n \n # fs = FileSystemStorage()\n # filename = fs.save(myfile.name, myfile)\n # uploaded_file_url = fs.url(filename)\n return Response('Hello')\n\n\n\n\n\n\n","sub_path":"media/document/views_A9FYs5O.py","file_name":"views_A9FYs5O.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"192644556","text":"\"\"\"Unit tests for hit processing in Annif\"\"\"\n\nfrom annif.hit import AnalysisHit, AnalysisResult, LazyAnalysisResult, \\\n ListAnalysisResult, HitFilter\nfrom annif.corpus import SubjectIndex\nimport numpy as np\n\n\ndef generate_hits(n, subject_index):\n hits = []\n for i in range(n):\n hits.append(AnalysisHit(uri='http://example.org/{}'.format(i),\n label='hit {}'.format(i),\n score=1.0 / (i + 1)))\n return ListAnalysisResult(hits, subject_index)\n\n\ndef test_hitfilter_limit(subject_index):\n orighits = generate_hits(10, subject_index)\n hits = HitFilter(limit=5)(orighits)\n assert isinstance(hits, AnalysisResult)\n assert len(hits) == 5\n\n\ndef test_hitfilter_threshold(subject_index):\n orighits = generate_hits(10, subject_index)\n hits = HitFilter(threshold=0.5)(orighits)\n assert isinstance(hits, AnalysisResult)\n assert len(hits) == 2\n\n\ndef test_hitfilter_zero_score(subject_index):\n orighits = ListAnalysisResult(\n [AnalysisHit(uri='uri', label='label', score=0.0)],\n subject_index)\n hits = HitFilter()(orighits)\n assert isinstance(hits, AnalysisResult)\n assert len(hits) == 0\n\n\ndef test_lazyanalysisresult(subject_index):\n lar = LazyAnalysisResult(lambda: generate_hits(10, subject_index))\n assert lar._object is None\n assert len(lar) == 10\n assert len(lar.hits) == 10\n assert lar.vector is not None\n assert lar[0] is not None\n filtered = lar.filter(limit=5, threshold=0.0)\n assert len(filtered) == 5\n assert lar._object is not None\n\n\ndef test_analysishits_vector(document_corpus):\n subjects = SubjectIndex(document_corpus)\n hits = ListAnalysisResult(\n [\n AnalysisHit(\n uri='http://www.yso.fi/onto/yso/p7141',\n label='sinetit',\n score=1.0),\n AnalysisHit(\n uri='http://www.yso.fi/onto/yso/p6479',\n label='viikingit',\n score=0.5)],\n subjects)\n assert isinstance(hits.vector, np.ndarray)\n assert len(hits.vector) == len(subjects)\n assert hits.vector.sum() == 1.5\n for subject_id, score in enumerate(hits.vector):\n if subjects[subject_id][1] == 'sinetit':\n assert score == 1.0\n elif subjects[subject_id][1] == 'viikingit':\n assert score == 0.5\n else:\n assert score == 0.0\n\n\ndef test_analysishits_vector_notfound(document_corpus):\n subjects = SubjectIndex(document_corpus)\n hits = ListAnalysisResult(\n [\n AnalysisHit(\n uri='http://example.com/notfound',\n label='not found',\n score=1.0)],\n subjects)\n assert hits.vector.sum() == 0\n","sub_path":"tests/test_hit.py","file_name":"test_hit.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"255813226","text":"\nclass solution(object):\n def linkedlistsolution(self, head):\n #write code below to traverse a linked list\n temp = head\n while temp:\n print(temp.val)\n temp = temp.next\n\n\n\ndef push_after(llist, new_data):\n node1 = Node(new_data)\n temp = llist.head\n if temp is None:\n node1.next = llist.head\n llist.head = node1\n else:\n while temp:\n if temp.next is None:\n temp.next = node1\n break\n temp = temp.next\n","sub_path":"linked-list-interview.py","file_name":"linked-list-interview.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"448760346","text":"# -*- coding: utf-8 -*-\n__doc__ = '''\n==============================================\n:mod:`irc3.plugin.userlist` User list plugin\n==============================================\n\nThis plugin maintain a know user list and a channel list.\n\nUsage::\n\n >>> from irc3 import IrcBot\n >>> bot = IrcBot(async=False)\n >>> bot.include('irc3.plugins.userlist')\n >>> bot.test(':gawel!user@host JOIN #chan')\n\n >>> plugin = bot.get_plugin('irc3.plugins.userlist.Userlist')\n >>> plugin.channels.items()\n dict_items([('#chan', Channel({'gawel'}))])\n >>> plugin.nicks.items()\n dict_items([('gawel', 'gawel!user@host')])\n\n'''\nfrom irc3 import plugin\nfrom irc3 import event\nfrom irc3 import rfc\nfrom irc3.utils import IrcString\nfrom collections import defaultdict\n\n\nclass Channel(set):\n\n def __init__(self):\n self.ops = set()\n\n\n@plugin\nclass Userlist:\n\n def __init__(self, bot):\n self.bot = bot\n self.connection_lost()\n\n def connection_lost(self):\n self.channels = defaultdict(Channel)\n self.nicks = {}\n\n @event(rfc.JOIN_PART_QUIT)\n def join_part_quit(self, mask, event, channel=None, **kw):\n getattr(self, event.lower())(mask, channel)\n\n def join(self, mask, channel):\n nick = mask.lnick\n if nick != self.bot.nick.lower():\n self.channels[channel].add(mask.nick)\n self.nicks[mask.nick] = mask\n\n def part(self, mask, channel):\n nick = mask.lnick\n if nick == self.bot.nick.lower():\n del self.channels[channel]\n else:\n self.channels[channel].remove(nick)\n if True not in [nick in c for c in self.channels.values()]:\n del self.nicks[nick]\n\n def quit(self, mask, channel):\n nick = mask.lnick\n if nick == self.bot.nick.lower():\n self.connection_lost()\n else:\n for channel in self.channels.values():\n if nick in channel:\n channel.remove(nick)\n del self.nicks[nick]\n\n @event('^:\\S+ 353 [^&#]+(?P\\S+) :(?P.*)')\n def names(self, channel=None, nicknames=None):\n nicknames = nicknames.split(' ')\n for nick in nicknames:\n nick = nick.strip('+%@')\n lnick = nick.lower()\n self.channels[channel].add(lnick)\n self.nicks[lnick] = nick\n\n @event(rfc.RPL_WHOREPLY)\n def who(self, channel=None, nick=None, user=None, host=None, **kw):\n self.channels[channel].add(nick.lower())\n mask = IrcString(nick + '!' + user + '@' + host)\n self.nicks[nick.lower()] = mask\n","sub_path":"irc3/plugins/userlist.py","file_name":"userlist.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"252729804","text":"import unittest\nfrom unittest.mock import patch\nfrom memsource import api_rest, exceptions\nimport requests\n\n\nclass TestBaseApi(unittest.TestCase):\n @patch.object(requests.Session, \"request\", side_effect=requests.exceptions.Timeout())\n def test_request_timeout(self, mock_request):\n \"\"\"\n Raise MemsourceApiException when timed out\n \"\"\"\n api = api_rest.BaseApi()\n self.assertRaises(\n exceptions.MemsourceApiException,\n lambda: api._post(\"path\", {})\n )\n\n @patch.object(requests.Session, \"request\", side_effect=requests.exceptions.ConnectionError())\n def test_request_connection_failed(self, mock_request):\n \"\"\"\n Raise MemsourceApiException when connection failed.\n \"\"\"\n api = api_rest.BaseApi()\n self.assertRaises(\n exceptions.MemsourceApiException,\n lambda: api._post(\"path\", {})\n )\n\n def test_init(self):\n headers = {\"Authorization\": \"Bearer TEST-HEADER-TOKEN\"}\n token = \"TEST-TOKEN\"\n api = api_rest.BaseApi(token=token, headers=headers)\n self.assertEqual(api.token, token)\n self.assertEqual(api.headers, headers)\n\n def test_use_session(self):\n api = api_rest.BaseApi()\n session = unittest.mock.Mock()\n api.use_session(session)\n self.assertEqual(api._session, session)\n\n @patch.object(requests.Session, \"request\")\n def test_get(self, mock_request):\n ms_response = unittest.mock.Mock(status_code=200)\n ms_response.json.return_value = {}\n mock_request.return_value = ms_response\n\n api = api_rest.BaseApi(token=\"TEST-TOKEN\")\n response = api._get(\"v1/path\", {\"jobUID\": 1})\n mock_request.assert_called_once_with(\n \"get\",\n \"https://cloud.memsource.com/web/api2/v1/path\",\n headers={\"Authorization\": \"ApiToken TEST-TOKEN\"},\n params={\"jobUID\": 1},\n timeout=60,\n )\n self.assertIsInstance(response, dict)\n\n @patch.object(requests.Session, \"request\")\n def test_get_stream(self, mock_request):\n mock_request.return_value = unittest.mock.Mock(status_code=200)\n\n api = api_rest.BaseApi(token=\"TEST-TOKEN\")\n api._get_stream(\"v1/path\", {\"jobUID\": 1})\n mock_request.assert_called_once_with(\n \"get\",\n \"https://cloud.memsource.com/web/api2/v1/path\",\n headers={\"Authorization\": \"ApiToken TEST-TOKEN\"},\n params={\"jobUID\": 1},\n timeout=300,\n )\n\n @patch.object(requests.Session, \"request\")\n def test_post(self, mock_request):\n mock_request.return_value = unittest.mock.Mock(status_code=200)\n api = api_rest.BaseApi(token=\"TEST-TOKEN\")\n api._post(\"v2/path\", {\"jobUID\": 1})\n mock_request.assert_called_once_with(\n \"post\", \"https://cloud.memsource.com/web/api2/v2/path\",\n json={\"jobUID\": 1}, headers={\"Authorization\": \"ApiToken TEST-TOKEN\"}, timeout=60,\n )\n","sub_path":"test/api_rest/test_base_api.py","file_name":"test_base_api.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"234481575","text":"print('PYTHONs CALCULATOR')\r\nimport time\r\ntime.sleep(0.5) #comment\r\nprint(\"1_addition\")\r\nprint(\"2_subtraction\")\r\nprint(\"3_multiplication\")\r\nprint(\"4_division\")\r\ntime.sleep(0.5)\r\nprint(\"CHOOSE ONE OF THEM\")\r\nchoice=int(input(\"YOUR CHOICE: \"))\r\nwhile (choice>=5):\r\n choice=int(input(\"WRONG INPUT.......TRY AGAIN: \"))\r\nif (choice==1):\r\n a=int(input(\"first num: \"))\r\n b=int(input(\"second num: \"))\r\n print(\"your answer \",a+b)\r\nelif (choice==2):\r\n a=int(input(\"first num: \"))\r\n b=int(input(\"second num: \"))\r\n print(\"your answer \",a-b)\r\nelif (choice==3):\r\n a=int(input(\"first num: \"))\r\n b=int(input(\"second num: \"))\r\n print(\"your answer \",a*b)\r\nelif (choice==4):\r\n a=int(input(\"first num: \"))\r\n b=int(input(\"second num: \"))\r\n print(\"your answer \",a/b)\r\n \r\n#made_by_sid\r\n","sub_path":"calculator - Copy.py","file_name":"calculator - Copy.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"591963674","text":"from django.conf.urls import url, patterns, include\nfrom django.contrib import admin\n\nadmin.autodiscover()\n\nurlpatterns = patterns(\n '',\n url(r'^login/$', 'django.contrib.auth.views.login', name='auth_login'),\n url(r'^logout/$', 'django.contrib.auth.views.logout', {\n 'next_page': '/login'}, name='auth_logout'),\n (r'^admin/doc/', include('django.contrib.admindocs.urls')),\n (r'^admin/', include(admin.site.urls)),\n (r'^workshops/', include('workshops.urls', namespace='workshops')),\n)\n","sub_path":"codetest/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"129416137","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sat import Satellite\r\nfrom GrStat import GroundStation, Reception\r\nimport multiprocessing\r\nimport tqdm\r\nimport os\r\nimport datetime\r\n\r\n# this is a code example and very good approximation of the multi-point calculation used in link_performance.py\r\n\r\n\r\ndef point_availability(args): # function loop - return the availability to a given Lat/Long\r\n point = args[0]\r\n sat = args[1]\r\n reception = args[2]\r\n lat = point['Lat']\r\n long = point['Long']\r\n station = GroundStation(lat, long)\r\n sat.set_grstation(station)\r\n sat.set_reception(reception)\r\n point['availability'] = sat.get_availability()\r\n return point\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n # reading the input table\r\n location = 'input examples\\\\'\r\n file = 'list'\r\n point_list = pd.read_csv(location + file + '.csv', sep=';', encoding='latin1')\r\n point_list['availability'] = np.nan # creating an empty results column\r\n\r\n ##############################\r\n ### satellite parameters ###\r\n ##############################\r\n\r\n sat_long = -70 # [decimal degrees]\r\n freq = 18 # [Ghz]\r\n eirp = 54 # [dBW]\r\n hsat = 35800 # satellite's height [km]\r\n tau = 90 # H=0, V = 90, circ = 45\r\n b_transponder = 36 # transponder bandwidth [MHz]\r\n b_util = 9 # effective used bandwidth [MHz]\r\n backoff = 0 # not used for now!\r\n contour = 0 # not used for now!\r\n mod = '8PSK' # modulation (from modcod file)\r\n fec = '120/180' # FEC (from modcod file)\r\n rolloff = 0.2 # roll-off factor (raised cosine filter)\r\n\r\n # creating the satellite object\r\n sat = Satellite(sat_long, freq, eirp, hsat, b_transponder, b_util, backoff, contour, mod, rolloff, fec)\r\n\r\n ##############################\r\n ### reception parameters ###\r\n ##############################\r\n ant_size = 1.2 # reception antenna diameter [m]\r\n ant_eff = 0.6 # reception antenna efficiency\r\n coupling_loss = 0 # [dB]\r\n polarization_loss = 3 # [dB]\r\n lnb_gain = 55 # [dB]\r\n lnb_noise_temp = 20 # temperatura de ruído do LNBF\r\n cable_loss = 4 # [dB]\r\n max_depoint = 0.1 # maximum depointing angle [degrees]\r\n\r\n # creating a reception object\r\n reception = Reception(ant_size, ant_eff, coupling_loss, polarization_loss, lnb_gain, lnb_noise_temp, cable_loss,\r\n max_depoint)\r\n\r\n cores = multiprocessing.cpu_count() - 2\r\n\r\n p = multiprocessing.Pool(processes=cores)\r\n\r\n # calculation loop\r\n\r\n data = list(\r\n tqdm.tqdm(p.imap_unordered(point_availability, [(city, sat, reception) for index, city in point_list.iterrows()]),\r\n total=len(point_list)))\r\n p.close()\r\n\r\n point_list.drop(point_list.index, inplace=True)\r\n point_list = point_list.append(data, ignore_index=True)\r\n point_list['unavailability time'] = round(((100 - point_list['availability'])/100) * 525600, 0) # calculating the unavailability in minutes\r\n\r\n\r\n # saving the results into a csv file\r\n\r\n path = 'results'\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n\r\n point_list.to_csv(path + '\\\\' + 'results ' + datetime.datetime.now().strftime('%y-%m-%d_%H-%M-%S') + '.csv', sep=';',\r\n encoding='latin1')\r\n\r\n print('Complete!!!')\r\n\r\n","sub_path":"multi_point_example.py","file_name":"multi_point_example.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"416892148","text":"import queue\n\nfrom data.dict import cities_and_paths\nfrom data.graph import Graph\n\n\ndef bfs_paths(start, goal):\n root = Graph(start, start, 0)\n q = queue.Queue()\n visited = []\n q.put(root)\n while not q.empty():\n obj = q.get()\n if obj.name == goal:\n print(\"\\nThe path is\", obj.path)\n print(\"\\nThe cost is\", obj.cost)\n return\n visited.append(obj)\n x = cities_and_paths[obj.name]\n for city, cost in x.items():\n if city in visited:\n continue\n else:\n node = Graph(name=city,\n path=obj.path + ',' + city,\n cost=obj.cost + cost)\n q.put(node)\n","sub_path":"my_code/bfs.py","file_name":"bfs.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"614251328","text":"class hopcroft_karp:\r\n def __init__(self):\r\n with open('matching.inp','r') as f:\r\n a = f.readlines()\r\n def arr(s):\r\n tem = []\r\n b = s.split(' ')\r\n for i in b:\r\n if i != ' ' and i != '\\n':\r\n tem.append(int(i))\r\n return tem\r\n self.N = arr(a[0])[0]\r\n self.M = arr(a[0])[1]\r\n if self.N == 0 or self.M == 0:\r\n pass\r\n else:\r\n G = []\r\n self.edgesum = 0\r\n for i in range(self.N):\r\n tem = []\r\n A = arr(a[i+1])\r\n self.edgesum += A[1]\r\n for j in range(A[1]):\r\n tem.append(A[j+2]-1)\r\n G.append(tem)\r\n A = [[0 for j in range(self.M)] for i in range(self.N)]\r\n for i in range(self.N):\r\n for j in range(len(G[i])):\r\n A[i][G[i][j]] = 1\r\n self.A = A\r\n nn = self.N\r\n gotone = 0\r\n for i in range(nn):\r\n if sum(self.A[i-gotone]) == 0:\r\n del self.A[i-gotone]\r\n gotone += 1\r\n self.N -= 1\r\n self.A = self.transpose(self.A)\r\n gotone = 0\r\n mm = self.M\r\n for i in range(mm):\r\n if sum(self.A[i-gotone]) == 0:\r\n del self.A[i-gotone]\r\n gotone += 1\r\n self.M -= 1\r\n if self.N>>>')\r\n tem = []\r\n found = []\r\n print(Rsides)\r\n for i in Rsides:\r\n if self.Rside[i] == -1:\r\n found.append(i)\r\n else:\r\n tem.append(self.Rside[i])\r\n if found:\r\n print(found)\r\n self.gotsome = True\r\n self.startbackchase(self.cleantem(found))\r\n self.stillneedtime = False\r\n else:\r\n self.RtoLlookingforwakeup(self.cleantem(tem))\r\n\r\n def RtoLlookingforwakeup(self, Ls):\r\n print('in lookfor <<<<<<')\r\n print(Ls)\r\n tem = []\r\n for i in Ls:\r\n for j in self.L[i]:\r\n if j != i:\r\n tem.append(j)\r\n if tem:\r\n self.LtoRlookingforconnectedorend(self.cleantem(tem))\r\n else:\r\n\r\n pass#deadend\r\n def startbackchase(self, founds):\r\n self.newpath.append(founds[self.waitingorder])\r\n self.RtoLbackchaselookingforconnectionormatch(self.R[founds[self.waitingorder]][0])\r\n\r\n def RtoLbackchaselookingforconnectionormatch(self, Lseed):\r\n print(self.newpath)\r\n print('HHHHHHHH <<<<<<')\r\n #print(self.Lside[Lseed])\r\n print(Lseed)\r\n #print(self.Lside[Lseed])\r\n if self.Lside[Lseed] == -1:\r\n self.newpath.append(Lseed)\r\n print('we are here')\r\n self.updatepath()\r\n else:\r\n self.newpath.append(Lseed)\r\n self.LtoRbackchaselookingforcleanones(self.Lside[Lseed])\r\n\r\n def LtoRbackchaselookingforcleanones(self, Rs):\r\n print(self.newpath)\r\n self.newpath.append(Rs)\r\n print(\"HHHHHHHH >>>>>>\")\r\n #print(Rs)\r\n print(self.R[Rs])\r\n isclean = True\r\n for i in self.R[Rs]:\r\n #print('out')\r\n #print(i)\r\n if i != Rs:\r\n #print('in')\r\n #print(i)\r\n for j in range(len(self.newpath)):\r\n\r\n print('2*j+1 < len(self.newpath')\r\n print(i)\r\n print(self.Lside[i])\r\n print(self.newpath)\r\n #print()\r\n print(2*j+1,len(self.newpath))\r\n print(2*j+1 < len(self.newpath))\r\n if 2*j+1 < len(self.newpath):\r\n #print('see')\r\n #print(self.newpath[2*j +1])\r\n if i == self.newpath[2*j +1]:\r\n isclean = False\r\n if isclean:\r\n self.RtoLbackchaselookingforconnectionormatch(i)\r\n break\r\n else:\r\n isclean = True\r\n isclean = True\r\n def updatepath(self):\r\n #print(self.newpath)\r\n for i in range(int(len(self.newpath)/2)):\r\n self.Rside[self.newpath[i*2]] = self.newpath[i*2+1]\r\n self.Lside[self.newpath[i*2+1]] = self.newpath[i*2]\r\n self.count += 1\r\n self.newpath = []\r\n if self.count == self.N:\r\n print('we got {}'.format(self.count))\r\n else:\r\n self.stillneedtime = True\r\n self.gotsome = False\r\n self.lookfor()\r\n\r\n def show(self):\r\n print(self.N)\r\n print(self.M)\r\n #print(self.count)\r\n print(self.Lside)\r\n print(self.Rside)\r\n #print(self.A)\r\n print(self.L)\r\n print(self.R)\r\n #print(self.B)\r\n def run(self):\r\n self.start()\r\n #self.show()\r\n self.lookfor()\r\n def transpose(self,S):\r\n tem = [[0 for j in range(len(S))] for i in range(len(S[0]))]\r\n for i in range(len(S)):\r\n for j in range(len(S[0])):\r\n tem[j][i] = S[i][j]\r\n return tem\r\n def cleantem(self,t):\r\n tem = []\r\n norepeat = True\r\n for i in t:\r\n if tem:\r\n for j in tem:\r\n if i == j:\r\n norepeat = False\r\n if norepeat:\r\n tem.append(i)\r\n else:\r\n norepeat = True\r\n else:\r\n tem.append(i)\r\n return tem\r\n\r\n\r\nmm = hopcroft_karp()\r\nmm.run()\r\n","sub_path":"__OLD_CODE_STORAGE/Hopcroft_Karp_Algorithm/bbtest.py","file_name":"bbtest.py","file_ext":"py","file_size_in_byte":7804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"459096666","text":"from selenium import webdriver\nimport pandas as pd\nimport unicodedata\nfrom selenium.common.exceptions import NoSuchElementException\nimport time\nimport datetime\nimport numpy as np\nimport re\n\n\nlogin = 'YOUR_LOGIN'\npassword = 'YOUR_PASSWORD'\nlink_raw = 'https://m.facebook.com/groups/470640109647012/?ref=group_browse'\ns = \"11/04/2020\"\nstart_date = int(time.mktime(datetime.datetime.strptime(s, \"%d/%m/%Y\").timetuple()))\n\n# Преобразовывает ссылку к виду с лентой в хронологическом порядке\ndef link_creator(link):\n link_splitted = link.split('/')\n id = link_splitted[4]\n url = 'https://www.facebook.com/groups/' + id +'/?sorting_setting=CHRONOLOGICAL'\n return url,id\n\n# функция скролла страницы\ndef scroll():\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n# проверяет не достигли ли итерации нужной даты\ndef date_checking(dates_code):\n for i in range(len(dates_code)):\n dates.append(dates_code[i].get_attribute('data-utime'))\n\n if (float(dates[i]) < start_date) and i > 0:\n stop == 1\n quant_of_posts = i\n return quant_of_posts\n\n\ndriver = webdriver.Chrome('/Users/alex_nau/Desktop/chrm/chromedriver')\ndriver.get('https://m.facebook.com/login/?next=https%3A%2F%2Fm.facebook.com%2Fgroups_browse%2Fyour_groups%2F')\ndriver.implicitly_wait(10)\nbutton1 = driver.find_element_by_id('m_login_email')\nbutton1.click()\nbutton1.send_keys(login)\n\nbutton1 = driver.find_element_by_id('m_login_password')\nbutton1.click()\nbutton1.send_keys(password)\nbutton3 = driver.find_element_by_id('u_0_4')\nbutton3.click()\ndriver.implicitly_wait(10)\n\n# проверяем в каких группах есть обновления\ngroups = driver.find_elements_by_class_name('_7hkg')\nids = []\nlinks_raw = []\nfor i in range(len(groups)):\n ids.append(str(groups[i].get_attribute('href')))\n\n t = groups[i].text.split('\\n')\n if len(t) == 3:\n if int(''.join(filter(str.isdigit, t[2]))) > 0:\n links_raw.append(str(ids[i]))\n print(ids[i])\n i = i + 2\n else:\n i = i + 2\n continue\n\n# переходим по полученным ссылкам и вытаскиваем посты и даты\nfor link_raw in links_raw:\n link,id = link_creator(link_raw)\n\n driver = webdriver.Chrome('/Users/alex_nau/Desktop/chrm/chromedriver')\n chrome_options = webdriver.ChromeOptions()\n prefs = {\"profile.default_content_setting_values.notifications\" : 2}\n chrome_options.add_experimental_option(\"prefs\",prefs)\n driver = webdriver.Chrome(chrome_options=chrome_options)\n\n driver.get(link)\n driver.implicitly_wait(10)\n\n button1 = driver.find_element_by_id('email')\n button1.click()\n button1.send_keys(login)\n\n button1 = driver.find_element_by_id('pass')\n button1.click()\n button1.send_keys(password)\n button3 = driver.find_element_by_id('u_0_2')\n button3.click()\n driver.implicitly_wait(10)\n\n\n stop = 0\n while(stop == 0):\n # в этом классе у нас находится ссылка на пост, в дочернем abbr - его дата\n posts = driver.find_elements_by_class_name('_5pcq')\n\n dates = []\n dates_code = []\n links = []\n posts_sorted = []\n\n for i in posts:\n if (i.get_attribute('href')).find('groups') != -1:\n posts_sorted.append(i)\n links.append(i.get_attribute('href'))\n\n\n for l in posts_sorted:\n try:\n dates_code.append(l.find_element_by_xpath('.//abbr'))\n driver.implicitly_wait(3)\n except NoSuchElementException:\n\n continue\n\n for i in range(len(dates_code)):\n dates.append(dates_code[i].get_attribute('data-utime'))\n\n\n if date_checking(dates_code):\n stop = 1\n quant_of_posts = date_checking(dates_code)\n\n scroll()\n\n\n\n dates_final = []\n links_final = []\n for i in range(quant_of_posts):\n dates_final.append(datetime.datetime.fromtimestamp(int(dates[i])).strftime('%Y-%m-%d %H:%M:%S'))\n links_final.append(links[i])\n\n pd.set_option('display.width', 1000)\n pd.set_option('max_colwidth', 300)\n df_links_dates = pd.DataFrame(list(zip(links_final, dates_final)), columns =['Links', 'Dates'])\n html = df_links_dates.to_html()\n text_file = open(\"/Users/alex_nau/PycharmProjects/parcer/final_2.0.html\", \"a\")\n text_file.write(html)\n text_file.close()\n\n","sub_path":"parsing_page.py","file_name":"parsing_page.py","file_ext":"py","file_size_in_byte":4603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"245791578","text":"import os \n\n# Set the directory of the USA input data and the shell script to call spew\nus_base_dir = \"/mnt/beegfs1/data/shared_group_data/syneco/spew_1.2.0/americas/northern_america/usa\"\nbase_dir_call = \"spew_1.2.0/americas/northern_america/usa\"\ncall_spew = \"/mnt/beegfs1/data/shared_group_data/syneco/olympus/call_spew/call_spew.sh\"\n\n# Run call_spew.sh script, using the directory names \nus_dirs = os.listdir(us_base_dir)\nfor us_dir in us_dirs:\n\t# Skip the non-state directories \n\tif us_dir == \"input\" or us_dir == \"output\" or us_dir == \"logfiles\":\n\t\tcontinue\n\n\t# Construct the shell command, and call_spew for this US directory \n\tbase_dir = base_dir_call + \"/\" + us_dir\n\tdata_group = \"US\"\n\tshell_call = \"bash \" + call_spew + \" \" + base_dir + \" \" + data_group + \" MPI\"\n\tos.system(shell_call)\n","sub_path":"olympus/call_spew/run_usa.py","file_name":"run_usa.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"210363142","text":"import numpy as np\nfrom keras.preprocessing import image\nfrom keras_vggface.vggface import VGGFace\nfrom keras_vggface import utils\n\nfrom detector import FaceDetector_DLIBHOG\nimport cv2\n\n# tensorflow\nmodel = VGGFace(model = 'senet50') # default : VGG16 , you can use model='resnet50' or 'senet50'\n\n\n\ndef findCosineSimilarity(source_representation, test_representation):\n a = np.matmul(np.transpose(source_representation), test_representation)\n b = np.sum(np.multiply(source_representation, source_representation))\n c = np.sum(np.multiply(test_representation, test_representation))\n return 1 - (a / (np.sqrt(b) * np.sqrt(c)))\n\ndef findEuclideanDistance(source_representation, test_representation):\n euclidean_distance = source_representation - test_representation\n euclidean_distance = np.sum(np.multiply(euclidean_distance, euclidean_distance))\n euclidean_distance = np.sqrt(euclidean_distance)\n return euclidean_distance\n\ndlib_detector = FaceDetector_DLIBHOG(None, None, None)\n\n\n# Change the image path with yours.\nimg_path = \"/home/500/anh_lbt/IMAGE_TASK/FaceRecognition_2019/faceidsys/datasets/test/IMG_1417_hog_1.png\"\nimg_path2 = \"/home/500/anh_lbt/IMAGE_TASK/FaceRecognition_2019/faceidsys/datasets/test/IMG_1520_hog_0.png\"\nimg_path3 = \"/home/500/anh_lbt/IMAGE_TASK/FaceRecognition_2019/faceidsys/datasets/test/IMG.png\"\n\n# ima = cv2.imread(img_path)\n# faces = dlib_detector.detect(ima)\n\nimg = image.load_img(img_path, target_size=(224, 224))\nx = image.img_to_array(img)\nx = np.expand_dims(x, axis=0)\nx = utils.preprocess_input(x, version=2) # or version=2\n\nimg2 = image.load_img(img_path2, target_size=(224, 224))\nx2 = image.img_to_array(img2)\nx2 = np.expand_dims(x2, axis=0)\nx2 = utils.preprocess_input(x2, version=2) # or version=2\n\nimg3 = image.load_img(img_path3, target_size=(224, 224))\nx3 = image.img_to_array(img3)\nx3 = np.expand_dims(x3, axis=0)\nx3 = utils.preprocess_input(x3, version=2) # or version=2\n\nimg1_representation = model.predict(x)\nimg2_representation = model.predict(x2)\nimg3_representation = model.predict(x3)\n# print('Predicted:', utils.decode_predictions(preds))\ncosine_similarity = findCosineSimilarity(img1_representation, img2_representation)\neuclidean_distance = findEuclideanDistance(img1_representation, img2_representation)\n\ncosine_similarity3 = findCosineSimilarity(img1_representation, img3_representation)\neuclidean_distance3 = findEuclideanDistance(img1_representation, img3_representation)\n\nprint(\"Cosine similarity: \",cosine_similarity)\nprint(\"Euclidean distance: \",euclidean_distance)","sub_path":"libfaceid/test_vgg.py","file_name":"test_vgg.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"513997451","text":"#!/usr/bin/env python3\n\n\"\"\"Infoset ingest cache daemon.\n\nExtracts agent data from cache directory files.\n\n\"\"\"\n\n# Standard libraries\nimport os\nimport sys\nimport time\nimport argparse\n\n# Infoset libraries\nfrom infoset.cache import cache\nfrom infoset.utils import log\nfrom infoset.utils import jm_configuration\nfrom infoset.utils import hidden\nfrom infoset.utils import Daemon\n\n\nclass IngestDaemon(Daemon):\n \"\"\"Class that manages polling.\n\n Args:\n None\n\n Returns:\n None\n\n \"\"\"\n\n def __init__(self, config):\n \"\"\"Method initializing the class.\n\n Args:\n config: ConfigServer Object\n\n Returns:\n None\n\n \"\"\"\n # Instantiate poller\n self.config = config\n\n # Get PID filename\n agent_name = 'ingestd'\n f_obj = hidden.File()\n self.pidfile = f_obj.pid(agent_name)\n\n # Call up the base daemon\n Daemon.__init__(self, self.pidfile)\n\n def run(self):\n \"\"\"Start polling.\n\n Args:\n None\n\n Returns:\n None\n\n \"\"\"\n # Do the daemon thing\n while True:\n cache.process(self.config)\n time.sleep(15)\n\n\nclass IngestCLI(object):\n \"\"\"Class that manages the agent CLI.\n\n Args:\n None\n\n Returns:\n None\n\n \"\"\"\n\n def __init__(self):\n \"\"\"Method initializing the class.\n\n Args:\n None\n\n Returns:\n None\n\n \"\"\"\n # Initialize key variables\n self.parser = None\n\n log.check_environment()\n self.config_directory = os.environ['INFOSET_CONFIGDIR']\n\n def config_dir(self):\n \"\"\"Return configuration directory.\n\n Args:\n None\n\n Returns:\n value: Configuration directory\n\n \"\"\"\n # Return\n value = self.config_directory\n return value\n\n def process(self, additional_help=None):\n \"\"\"Return all the CLI options.\n\n Args:\n None\n\n Returns:\n args: Namespace() containing all of our CLI arguments as objects\n - filename: Path to the configuration file\n\n \"\"\"\n # Header for the help menu of the application\n parser = argparse.ArgumentParser(\n description=additional_help,\n formatter_class=argparse.RawTextHelpFormatter)\n\n # CLI argument for stopping\n parser.add_argument(\n '--stop',\n required=False,\n default=False,\n action='store_true',\n help='Stop the ingest daemon.'\n )\n\n # CLI argument for starting\n parser.add_argument(\n '--start',\n required=False,\n default=False,\n action='store_true',\n help='Start the ingest daemon.'\n )\n\n # CLI argument for restarting\n parser.add_argument(\n '--restart',\n required=False,\n default=False,\n action='store_true',\n help='Restart the ingest daemon.'\n )\n\n # CLI argument for statusing\n parser.add_argument(\n '--status',\n required=False,\n default=False,\n action='store_true',\n help='Get the status of the ingest daemon.'\n )\n\n # Get the parser value\n self.parser = parser\n\n def control(self, config):\n \"\"\"Start the infoset agent.\n\n Args:\n config: ConfigServer Object\n\n Returns:\n None\n\n \"\"\"\n # Get the CLI arguments\n self.process()\n parser = self.parser\n args = parser.parse_args()\n\n # Run daemon\n daemon = IngestDaemon(config)\n if args.start is True:\n daemon.start()\n elif args.stop is True:\n daemon.stop()\n elif args.restart is True:\n daemon.restart()\n elif args.status is True:\n daemon.status()\n else:\n parser.print_help()\n sys.exit(2)\n\n\ndef main():\n \"\"\"Process agent data.\n\n Args:\n None\n\n Returns:\n None\n\n \"\"\"\n # Get configuration\n cli = IngestCLI()\n config_dir = cli.config_dir()\n config = jm_configuration.ConfigServer(config_dir)\n\n # Do control\n cli.control(config)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"infoset/bin/ingestd.py","file_name":"ingestd.py","file_ext":"py","file_size_in_byte":4338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"377587061","text":"def fibo(n):\n print('-' * 10, 'fibo(%s)' % n, '-' * 10)\n if n <= 2:\n return 1\n else:\n return fibo(n - 1) + fibo(n - 2)\n\n\ndef factorial(n):\n print('-' * 10, 'factorial(%s)' % n, '-' * 10)\n return 1 if n < 2 else n * factorial(n - 1)\n\nif __name__ == '__main__':\n print(factorial(6))","sub_path":"fluent_python/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"259306082","text":"var_in = list(\">><--<>>>\")\nvar_out = 0\nsalute = 0\nother = 0\n\nfor i, char in enumerate(var_in):\n if char == \">\":\n right_count = i\n while (right_count < len(var_in)):\n if var_in[right_count] == \"<\":\n salute = salute + 1\n right_count = right_count + 1\n var_in[i] = \"x\"\n elif char == \"<\":\n left_count = i\n while (left_count != 0):\n if var_in[left_count] == \">\":\n salute = salute + 1\n left_count = left_count - 1\n var_in[i] = \"x\"\n else:\n other = other + 1\n\nsalute = salute * 2\nprint(int(salute))\n","sub_path":"Google/google2.py","file_name":"google2.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"118332847","text":"# -*- coding: utf-8 -*-\nimport tweepy\nimport random\nimport time\nimport os\nimport traceback\n\n'''\nthe @thorskastridin cod tweet bot spreads awareness about the cod wars (https://en.wikipedia.org/wiki/Cod_Wars)\nby asking people tweeting about the cold war and cod 4 whether they meant to tweet about the cod wars\n'''\ntweets = [\"{0}? Are you sure you didn't mean the cod wars? {1}\",\n \"I haven't heard about {0} but I do know about the cod wars {1}\",\n \"ok ok, {0} is pretty cool but nothing compared to the cod wars {1}\",\n \"{0}? Is that a typo? Surely you meant the cod wars {1}\",\n \"the cod wars > {0}, everybody knows that {1}\"\n ]\n\nWIKILINK = 'https://en.wikipedia.org/wiki/Cod_Wars'\nCONSUMER_KEY = os.environ.get('COD_BOT_CONSUMER_KEY')\nCONSUMER_SECRET = os.environ.get('COD_BOT_CONSUMER_SECRET')\nACCESS_TOKEN = os.environ.get('COD_BOT_ACCESS_TOKEN')\nACCESS_TOKEN_SECRET = os.environ.get('COD_BOT_ACCESS_TOKEN_SECRET')\n\n\ndef spreadawareness():\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n api = tweepy.API(auth)\n search_term = random.choice(['\"the cold war\"', '\"cod4\"'])\n search_results = api.search(search_term + \" since:\"+time.strftime(\"%Y-%m-%d\"))\n selectedtweet = random.choice(search_results)\n tweettext = \"@\"+selectedtweet.user.screen_name + \" \"\n tweettext += random.choice(tweets).format(search_term, WIKILINK)\n api.update_status(tweettext, selectedtweet.id_str)\n\nwhile True:\n try:\n spreadawareness()\n except Exception:\n print(traceback.format_exc())\n time.sleep(30 * 60) # the codbot sleeps with the fishes for 30 minutes\n","sub_path":"codwar.py","file_name":"codwar.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"525295634","text":"# Protocol Buffers - Google's data interchange format\n# Copyright 2008 Google Inc. All rights reserved.\n# https://developers.google.com/protocol-buffers/\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Google Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Tests for google.protobuf.internal.keywords.\"\"\"\n\nimport unittest\n\n\nfrom google.protobuf.internal import more_messages_pb2\nfrom google.protobuf import descriptor_pool\n\n\nclass KeywordsConflictTest(unittest.TestCase):\n\n def setUp(self):\n super(KeywordsConflictTest, self).setUp()\n self.pool = descriptor_pool.Default()\n\n def testMessage(self):\n message = getattr(more_messages_pb2, 'class')()\n message.int_field = 123\n self.assertEqual(message.int_field, 123)\n des = self.pool.FindMessageTypeByName('google.protobuf.internal.class')\n self.assertEqual(des.name, 'class')\n\n def testNestedMessage(self):\n message = getattr(more_messages_pb2, 'class')()\n message.nested_message.field = 234\n self.assertEqual(message.nested_message.field, 234)\n des = self.pool.FindMessageTypeByName('google.protobuf.internal.class.try')\n self.assertEqual(des.name, 'try')\n\n def testField(self):\n message = getattr(more_messages_pb2, 'class')()\n setattr(message, 'if', 123)\n setattr(message, 'as', 1)\n self.assertEqual(getattr(message, 'if'), 123)\n self.assertEqual(getattr(message, 'as'), 1)\n\n def testEnum(self):\n class_ = getattr(more_messages_pb2, 'class')\n message = class_()\n # Normal enum value.\n message.enum_field = more_messages_pb2.default\n self.assertEqual(message.enum_field, more_messages_pb2.default)\n # Top level enum value.\n message.enum_field = getattr(more_messages_pb2, 'else')\n self.assertEqual(message.enum_field, 1)\n # Nested enum value\n message.nested_enum_field = getattr(class_, 'True')\n self.assertEqual(message.nested_enum_field, 1)\n\n def testExtension(self):\n message = getattr(more_messages_pb2, 'class')()\n # Top level extension\n extension1 = getattr(more_messages_pb2, 'continue')\n message.Extensions[extension1] = 456\n self.assertEqual(message.Extensions[extension1], 456)\n # None top level extension\n extension2 = getattr(more_messages_pb2.ExtendClass, 'return')\n message.Extensions[extension2] = 789\n self.assertEqual(message.Extensions[extension2], 789)\n\n def testExtensionForNestedMessage(self):\n message = getattr(more_messages_pb2, 'class')()\n extension = getattr(more_messages_pb2, 'with')\n message.nested_message.Extensions[extension] = 999\n self.assertEqual(message.nested_message.Extensions[extension], 999)\n\n def TestFullKeywordUsed(self):\n message = more_messages_pb2.TestFullKeyword()\n message.field2.int_field = 123\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"third_party/protobuf/python/google/protobuf/internal/keywords_test.py","file_name":"keywords_test.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"633961883","text":"class TreeNode(object):\n def __init__(self, value, height=1, lchild=None, rchild=None):\n self.value = value\n self.height = height\n self.lchild = None\n self.rchild = None\n\n def __repr__(self):\n return f'TreeNode({self.value})'\n\n\nclass AVLTree(object):\n def __init__(self):\n self.root = None\n\n def get_height(self, node):\n if isinstance(node, TreeNode):\n return node.height\n return 0\n\n def insert(self, value):\n def _insert(value, node):\n if not isinstance(node, TreeNode):\n node = TreeNode(value)\n else:\n if value < node.value:\n node.lchild = _insert(value, node.lchild)\n node.height = self.__update_height(node)\n if self.get_balance_factor(node) == 2:\n if self.get_balance_factor(node.lchild) == 1:\n node = self.__right_rotation(node)\n elif self.get_balance_factor(node.lchild) == -1:\n node.lchild = self.__left_rotation(node.lchild)\n node = self.__right_rotation(node)\n else:\n node.rchild = _insert(value, node.rchild)\n node.height = self.__update_height(node)\n if self.get_balance_factor(node) == -2:\n if self.get_balance_factor(node.rchild) == -1:\n node = self.__left_rotation(node)\n elif self.get_balance_factor(node.rchild) == 1:\n node.rchild = self.__right_rotation(node.rchild)\n node = self.__left_rotation(node)\n return node\n self.root = _insert(value, self.root)\n\n def __left_rotation(self, node):\n root = node\n new_root = root.rchild\n root.rchild = new_root.lchild\n new_root.lchild = root\n root.height = self.__update_height(root)\n new_root.height = self.__update_height(new_root)\n return new_root\n\n def __right_rotation(self, node):\n root = node\n new_root = root.lchild\n root.lchild = new_root.rchild\n new_root.rchild = root\n root.height = self.__update_height(root)\n new_root.height = self.__update_height(new_root)\n return new_root\n\n def get_balance_factor(self, node):\n return self.get_height(node.lchild) - self.get_height(node.rchild)\n\n def __update_height(self, node):\n return max(self.get_height(node.lchild), self.get_height(node.rchild)) + 1\n\n def __repr__(self):\n return f'AVLTree({self.root})'\n\n\ndef main():\n _ = int(input())\n nodes = map(int, input().split())\n avl = AVLTree()\n for node in nodes:\n avl.insert(node)\n print(avl.root.value)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"PTA/PAT_A/Python3/A1066_AC.py","file_name":"A1066_AC.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"392430119","text":"import json\nimport re\nimport javalang\nimport sys\nimport random\nimport subprocess\nimport os\nimport shutil\nimport argparse\nimport numpy as np\n\nMAX_PULL_TIME = 60\n\ndef extractGitLog():\n\tsmall = open('bugsLarge.json','r')\n\n\t#smallStr = open('/home/robin/Documents/one-line-bug-dataset/BugsSmall.txt', 'w')\n\t#largeStr = open('/home/robin/Documents/one-line-bug-dataset/BugsLarge.txt', 'w')\n\n\tsmallData = json.load(small)\n\n\tProjDir = \"/home/robin/Documents/projects\"\n\tif not os.path.isdir(ProjDir):\n\t\tos.makedirs(ProjDir)\n\n\tprojs = list()\n\n\tfor idx, sd in enumerate(smallData):\n\t\tprojectName = sd[\"projectName\"]\n\t\tprojs.append(projectName)\n\t\t\n\t\t#lineNum = sd[\"lineNum\"]\n\n\tprojs = set(projs)\n\n\tfor proj in projs:\n\n\t\tp = proj.split('.')[-1]\n\t\tbugProj = os.path.join(ProjDir, p)\n\t\tprint(\"Currently pulling: \" + p)\n\t\t\n\t\tif not os.path.isdir(bugProj):\n\t\t\tcmd = \"\"\n\t\t\tcmd += \"cd \" + ProjDir + \";\"\n\t\t\tcmd += \"git clone https://github.com/\" + proj.replace('.', '/') + \".git\"\n\t\t\tprocess = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, shell=True)\n\t\t\ttry:\n\t\t\t\toutput, error = process.communicate(timeout=MAX_PULL_TIME)\n\t\t\texcept subprocess.TimeoutExpired:\n\t\t\t\tprocess.kill()\n\t\t\t\tprocess.wait()\n\t\t\t\tcontinue\n\t\t\n\t\tresultDir = os.path.join(\"/home/robin/Documents/dataset-analysis/results\", proj.replace('.', '___'))\n\t\tif not os.path.isdir(resultDir):\n\t\t\tos.makedirs(resultDir)\n\t\tcmd = \"\"\n\t\tcmd += \"cd \" + bugProj + \";\"\n\t\t#We design the commit in specific formats for further operation:\n\t\t# -----\n\t\t# Sha\n\t\t# k files changed, p insertions, q deletions\n\t\tcmd += \"git log --shortstat --pretty=format:'-----%n%H' --follow *.java > \" + os.path.join(resultDir, \"java_stats.txt\") + \";\"\n\t\t\n\t\n\t\tsubprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, shell=True)\n\n\t\tif os.path.isdir(bugProj):\n\t\t\tshutil.rmtree(bugProj)\n\t'''\n\n\tbaseDirLarge = \"/home/robin/Documents/DataSetLarge\"\n\tif not os.path.isdir(baseDirLarge):\n\t\tos.makedirs(baseDirLarge)\n\n\tbugBaseDirLarge = os.path.join(baseDirLarge, \"buggy\")\n\tif not os.path.isdir(bugBaseDirLarge):\n\t\tos.makedirs(bugBaseDirLarge)\n\n\tpatchBaseDirLarge = os.path.join(baseDirLarge, \"patch\")\n\tif not os.path.isdir(patchBaseDirLarge):\n\t\tos.makedirs(patchBaseDirLarge)\n\n\n\tfor idx, sd in enumerate(largeData):\n\t\tprojectName = sd[\"projectName\"]\n\t\tproj = projectName.split('.')[-1]\n\t\tpatchSHA1 = sd[\"commitSHA1\"]\n\t\tpatchFile = sd[\"commitFile\"]\n\t\t#lineNum = sd[\"lineNum\"]\n\t\tbugDir = os.path.join(bugBaseDirLarge, str(idx))\n\t\tif not os.path.isdir(bugDir):\n\t\t\tos.makedirs(bugDir)\n\n\t\tpatchDir = os.path.join(patchBaseDirLarge, str(idx))\n\t\tif not os.path.isdir(patchDir):\n\t\t\tos.makedirs(patchDir)\n\n\t\tbugProj = os.path.join(ProjDir, proj)\n\t\tif not os.path.isdir(bugProj):\n\t\t\tcmd = \"\"\n\t\t\tcmd += \"cd \" + ProjDir + \";\"\n\t\t\tcmd += \"git clone https://github.com/\" + projectName.replace('.', '/') + \".git\"\n\t\t\tresult = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, shell=True)\n\t\tcmd = \"\"\n\t\tcmd += \"cd \" + bugProj + \";\"\n\t\tcmd += \"git checkout \" + patchSHA1 + \"^\" + \";\"\n\t\tcmd += \"cp \" + patchFile + \" \" + bugDir + \";\"\n\t\tcmd += \"git checkout \" + patchSHA1 + \";\"\n\t\tcmd += \"cp \" + patchFile + \" \" + patchDir\n\t\tsubprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, shell=True)\n\t'''\n\n\ndef extractSingleFileChange():\n\tprojDir = \"/home/robin/Documents/dataset-analysis/tmpProject\"\n\tprevDataDir = \"/home/robin/Documents/dataset-analysis/data/prev\"\n\tpostDataDir = \"/home/robin/Documents/dataset-analysis/data/post\"\n\tstats = \"/home/robin/Documents/dataset-analysis/results\"\n\toneFileChanges = list()\n\n\tif not os.path.isdir(projDir):\n\t\tos.makedirs(projDir) #Dir to store java projects\n\n\t#projs = os.listdir(\"/home/robin/Documents/dataset-analysis/results/\")\n\tprojs = os.listdir(stats)\n\tfileCnt = 0\n\t\n\tfor proj in projs:\n\t\tstatsFile = os.path.join(proj, 'java_stats.txt')\n\t\tsingleFileCommit = list()\n\t\twith open(os.path.join(stats, statsFile), 'r') as sf:\n\t\t\tgitLogs = list(filter(None, sf.read().split(\"-----\\n\"))) #each element is a commit\n\t\t\tfor gl in gitLogs:\n\t\t\t\tif \"1 file changed\" in gl:\n\t\t\t\t\tcommit = list(filter(None, gl.split('\\n')))\n\t\t\t\t\tsingleFileCommit.append(commit[0])\n\n\t\towner, repo = proj.split('___')\n\t\tbugProj = os.path.join(projDir, repo)\n\t\tprint(\"Currently pulling: \" + repo)\n\t\tif not os.path.isdir(bugProj):\n\t\t\t#pull the project\n\t\t\tcmd = \"\"\n\t\t\tcmd += \"cd \" + projDir + \";\"\n\t\t\tcmd += \"git clone https://github.com/\" + proj.replace('___', '/') + \".git\"\n\t\t\tprocess = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, shell=True)\n\t\t\ttry:\n\t\t\t\toutput, error = process.communicate(timeout=MAX_PULL_TIME)\n\t\t\t\tprint (repo + \" is pulled.\")\n\t\t\texcept subprocess.TimeoutExpired:\n\t\t\t\tprocess.kill()\n\t\t\t\tprocess.wait()\n\t\t\t\tcontinue\n\t\t# get the name of modified file\n\t\tfor sha in singleFileCommit:\n\t\t\tprint(\"Currently processing: No. \" + str(fileCnt) + \" sample.\")\n\t\t\tprevDir = os.path.join(prevDataDir, str(fileCnt))\n\t\t\tpostDir = os.path.join(postDataDir, str(fileCnt))\n\t\t\tif not os.path.isdir(prevDir):\n\t\t\t\tos.makedirs(prevDir) #Dir to store original files\n\t\t\tif not os.path.isdir(postDir):\n\t\t\t\tos.makedirs(postDir)\n\n\t\t\tdetail = dict()\n\t\t\tdetail[\"index\"] = str(fileCnt)\n\t\t\tdetail[\"Project\"] = proj\n\t\t\tdetail[\"Commit\"] = sha\n\t\t\tcmd = \"\"\n\t\t\tcmd += \"cd \" + bugProj + \";\"\n\t\t\tcmd += \"git diff --name-only \" + sha + \" \" + sha + '^ ' + \"*.java\" + ';'\n\t\t\tresult = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, shell=True)\n\t\t\tresult = result.stdout.decode('utf-8')\n\t\t\tresult = list(filter(None, result.split('\\n')))\n\n\t\t\tif len(result) > 1:\n\t\t\t\tprint (\"More than 1 file modified\")\n\t\t\t\tprint (result)\n\t\t\tmodifiedFile = result[0]\n\t\t\tdetail[\"Modified\"] = modifiedFile\n\t\t\toneFileChanges.append(detail)\n\n\t\t\tcmd = \"\"\n\t\t\tcmd += \"cd \" + bugProj + \";\"\n\t\t\tcmd += \"git checkout \" + sha + \";\"\n\t\t\tcmd += \"cp \" + modifiedFile + \" \" + postDir + ';'\n\t\t\tcmd += \"git checkout \" + sha + \"^\" + \";\"\n\t\t\tcmd += \"cp \" + modifiedFile + \" \" + prevDir + \";\"\n\t\t\t\n\t\t\tsubprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, shell=True)\n\t\t\tfileCnt += 1\n\n\t\tif os.path.isdir(bugProj):\n\t\t\tshutil.rmtree(bugProj)\n\n\twith open(\"OneFileChanges.json\", 'w') as cf:\n\t\tcf.write(json.dumps(oneFileChanges, indent=4))\n\n\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--type\", help=\"type of extracting: [log, file]\")\n\targs = parser.parse_args()\n\tif args.type == 'log':\n\t\textractGitLog()\n\telif args.type == 'file':\n\t\textractSingleFileChange()","sub_path":"TrainingDataGeneration/extractOneFileChange.py","file_name":"extractOneFileChange.py","file_ext":"py","file_size_in_byte":6456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"615637694","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\nimport copy\nimport pandas as pd\nfrom noise import OUNoise\nfrom replay_buffer import BasicBuffer\nfrom utils import *\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nobs_dim = 30\naction_dim = 1\nhidden_size = 100\nmax_episodes = 60\nbatch_size = 200\ngamma = 0.99\ntau = 0.001\nbuffer_maxlen = 10000\nactor_lr = 1e-3\ncritic_lr = 1e-3\nDELTA_T = 0.1\naction_high = 20\naction_low = -20\nREACTION_TIME = 10\npath = './train40/'\n\n\nclass Environment:\n def __init__(self):\n # state = [v_f(t), Δv(t), Δs(t)]\n self.v_f = 0\n self.delta_v = 0\n self.delta_s = 0\n # next state = [v_f(t + 1), Δv(t + 1), Δs(t + 1)]\n self.v_f_next = 0\n self.delta_v_next = 0\n self.delta_s_next = 0\n self.reward = 0\n\n def step(self, a, v_l_next, s_f, v_f_obs, s_f_obs, state):\n # state = [v_f(t), Δv(t), Δs(t)]\n self.v_f = state[27]\n self.delta_v = state[28]\n self.delta_s = state[29]\n\n # v_f(t+1) = v_f(t) + a * Δt\n self.v_f_next = self.v_f + a * DELTA_T\n # Δv(t+1) = v_l(t+1) - v_f(t+1)\n self.delta_v_next = v_l_next - self.v_f_next\n # Δs(t+1) = Δs(t) + (Δv(t+1) + Δv(t)) * Δt / 2\n self.delta_s_next = self.delta_s + 0.5 * DELTA_T * (self.delta_v_next + self.delta_v)\n\n # get next state\n # next_state = [v_f_next, delta_v_next, delta_s_next]\n next_state = np.array([self.v_f_next, self.delta_v_next, self.delta_s_next], dtype=np.float32)\n next_state = np.concatenate([state[3:], next_state])\n\n # reward = log(|(s_f_obs - s_f_next) / s_f_obs|)\n self.reward_v = - np.log(np.abs(self.v_f_next / v_f_obs - 1) + 1e-8)\n\n return next_state, np.array(self.reward_v, dtype=np.float32)\n\n\nclass Actor(nn.Module):\n\n def __init__(self):\n super(Actor, self).__init__()\n\n self.linear1 = nn.Linear(obs_dim, hidden_size)\n self.linear2 = nn.Linear(hidden_size, action_dim)\n self.tanh = nn.Tanh()\n self.max_action = action_high\n\n nn.init.normal_(self.linear1.weight, mean=0, std=0.1)\n nn.init.uniform_(self.linear1.bias)\n nn.init.normal_(self.linear2.weight, mean=0, std=0.1)\n nn.init.uniform_(self.linear2.bias)\n\n def forward(self, obs):\n x = self.tanh(self.linear1(obs))\n x = self.tanh(self.linear2(x)) * self.max_action\n return x\n\n\nclass Critic(nn.Module):\n\n def __init__(self):\n super(Critic, self).__init__()\n\n self.la = nn.Linear(action_dim, obs_dim)\n self.linear1 = nn.Linear(obs_dim + obs_dim, hidden_size)\n self.linear2 = nn.Linear(hidden_size, 1)\n self.tanh = nn.Tanh()\n\n nn.init.normal_(self.la.weight, mean=0, std=0.1)\n nn.init.uniform_(self.la.bias)\n nn.init.normal_(self.linear1.weight, mean=0, std=0.1)\n nn.init.uniform_(self.linear1.bias)\n nn.init.normal_(self.linear2.weight, mean=0, std=0.1)\n nn.init.uniform_(self.linear2.bias)\n\n def forward(self, x, a):\n x = x.view([batch_size, -1])\n a = self.la(a)\n xa_cat = torch.cat([x, a], dim=1)\n xa = self.tanh(self.linear1(xa_cat))\n q = self.linear2(xa)\n\n return q\n\n\nclass DDPGAgent:\n\n def __init__(self, env, gamma, tau, buffer_maxlen, critic_learning_rate, actor_learning_rate):\n\n self.env = env\n\n # hyperparameters\n self.env = env\n self.gamma = gamma\n self.tau = tau\n\n # initialize actor and critic networks\n self.actor = Actor().to(device)\n self.critic = Critic().to(device)\n\n self.actor_target = copy.deepcopy(self.actor)\n self.critic_target = copy.deepcopy(self.critic)\n\n # Optimizers\n self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=actor_learning_rate)\n self.critic_optimizer = optim.RMSprop(self.critic.parameters(), lr=critic_learning_rate, alpha=0.9)\n\n self.replay_buffer = BasicBuffer(buffer_maxlen)\n\n def load(self):\n self.actor.load_state_dict(torch.load('./trained_para/ddpgrt_actor.pth'))\n self.critic.load_state_dict(torch.load('./trained_para/ddpgrt_critic.pth'))\n self.actor_target.load_state_dict(torch.load('./trained_para/ddpgrt_actor_target.pth'))\n self.critic_target.load_state_dict(torch.load('./trained_para/ddpgrt_critic_target.pth'))\n print('Parameters Loaded')\n\n def get_action(self, obs):\n state = torch.FloatTensor(obs).to(device)\n action = self.actor(state)\n action = action.cpu().data.numpy().flatten()\n return action\n\n def update(self, batch_size):\n\n # sample random batch from replay buffer\n state_batch, action_batch, reward_batch, next_state_batch, _ = self.replay_buffer.sample(batch_size)\n state_batch = torch.FloatTensor(state_batch).to(device)\n action_batch = torch.FloatTensor(action_batch).to(device)\n reward_batch = torch.FloatTensor(reward_batch).to(device)\n next_state_batch = torch.FloatTensor(next_state_batch).to(device)\n\n next_actions = self.actor_target.forward(next_state_batch)\n target_Q = self.critic_target(next_state_batch, next_actions)\n expected_Q = reward_batch + (self.gamma * target_Q).detach()\n\n curr_Q = self.critic.forward(state_batch, action_batch)\n\n # update critic\n critic_loss = F.mse_loss(curr_Q, expected_Q.detach())\n\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n\n # update actor\n actor_loss = - self.critic.forward(state_batch, self.actor.forward(state_batch)).mean()\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n # update target networks\n for target_param, param in zip(self.actor_target.parameters(), self.actor.parameters()):\n target_param.data.copy_(param.data * self.tau + target_param.data * (1.0 - self.tau))\n\n for target_param, param in zip(self.critic_target.parameters(), self.critic.parameters()):\n target_param.data.copy_(param.data * self.tau + target_param.data * (1.0 - self.tau))\n\n\ndef calculate_rmspe(data, actor):\n # data = [v_f(t), Δv(t), Δs(t), v_l(t + 1), s_f(t), v_f(t + 1), s_f(t + 1)]\n v_f_next_set = []\n state = np.reshape(data[0, 0: 30], newshape=[-1])\n\n for k in range(len(data)):\n a = actor(torch.from_numpy(state).to(device))\n # v_f(t+1) = v_f(t) + a * Δt\n v_f_next = state[27] + a.item() * DELTA_T\n v_f_next_set.append(v_f_next)\n # Δv(t+1) = v_l(t+1) - v_f(t+1)\n delta_v_next = data[k, 30] - v_f_next\n # Δs(t+1) = Δs(t) + (Δv(t+1) + Δv(t)) * Δt / 2\n delta_s_next = state[29] + 0.5 * DELTA_T * (state[28] + delta_v_next)\n\n # update state\n next_state = np.array([v_f_next, delta_v_next, delta_s_next], dtype=np.float32)\n state = np.concatenate([state[3:], next_state])\n\n v_f_next_set = np.reshape(v_f_next_set, newshape=[-1, 1])\n v_f_obs = np.reshape(data[:, 32], newshape=[-1, 1])\n\n # calculate RMSPE\n numerator = np.sum(np.square(v_f_obs - v_f_next_set))\n denominator = np.sum(np.square(v_f_obs))\n RMSPEv = np.sqrt(numerator / denominator)\n return RMSPEv\n\n\n# read data\ndataset = np.zeros([30, 400, 34], dtype=np.float32)\n\nfor i in range(30):\n table_i = pd.read_csv(path + \"train\" + ((str)(i + 1)) + \".csv\", sep=\",\", header=None, skiprows=1)\n train_data_i = getData(table_i)\n train_data_i = train_data_i[100: 500]\n dataset[i] = train_data_i\n\nprint('Reading data finished')\n\n# train\nenv = Environment()\nagent = DDPGAgent(env, gamma, tau, buffer_maxlen, critic_learning_rate=critic_lr, actor_learning_rate=actor_lr)\nagent.load()\nnoise = OUNoise()\ncounter = 0\n\nfor step in range(max_episodes):\n np.random.shuffle(dataset)\n for r in range(30):\n train_data = dataset[r]\n state = train_data[0, 0: obs_dim]\n\n for i in range(len(train_data)):\n counter = counter + 1\n if counter < 7000:\n action = np.random.normal(0, 1, size=action_dim)\n state = np.reshape(state, newshape=[obs_dim])\n next_state, reward = env.step(action.item(),\n train_data[i, 30],\n train_data[i, 31],\n train_data[i, 32],\n train_data[i, 33],\n state)\n agent.replay_buffer.push(state, action, reward, next_state, done=0)\n state = next_state\n else:\n state = np.reshape(state, newshape=[1, obs_dim])\n action = agent.get_action(state) + noise.sample()\n state = np.reshape(state, newshape=[obs_dim])\n next_state, reward = env.step(action.item(),\n train_data[i, 30],\n train_data[i, 31],\n train_data[i, 32],\n train_data[i, 33],\n state)\n agent.replay_buffer.push(state, action, reward, next_state, done=0)\n state = next_state\n if (i+1) % 200 == 0:\n agent.update(batch_size)\n","sub_path":"DDPGRT.py","file_name":"DDPGRT.py","file_ext":"py","file_size_in_byte":9572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"341865855","text":"\"\"\"\r\nDefines `Interface`.\r\n\r\nInstantiates the module-level logger with the appropriate naming\r\nconvention.\r\n\"\"\"\r\n\r\nimport logging\r\nimport json\r\nimport os\r\nimport platform\r\nfrom abc import ABC\r\nfrom pycparser import c_ast, parse_file\r\n\r\nfrom verifier.verifier import Verifier\r\nfrom exception.exception import NoneFilePathError\r\n\r\nLOGGER = logging.getLogger(__name__)\r\n\r\n\r\nclass Interface(ABC):\r\n \"\"\"\r\n Define the object responsible for file I/O and `json` interaction.\r\n\r\n `OUT_FILE` is the file name of the final \"bundle\" that is ultimately\r\n dropped to disk under the out/ directory. If that file name needs to\r\n be changed programmatically, this is the place to do it.\r\n\r\n `OUT_DIR` is the directory in which the `OUT_FILE` is placed. This\r\n class-variable exists as a convienient way for `Verifier` to test\r\n directory structure validity.\r\n\r\n `OUT_FILE_PATH` is the fully qualified path to the \"bundle\".\r\n \"\"\"\r\n\r\n OUT_FILE = \"bundle.json\"\r\n OUT_DIR = os.getcwd() + \"/out/\"\r\n OUT_FILE_PATH = os.getcwd() + \"/out/\" + OUT_FILE\r\n\r\n def __init__(self) -> None:\r\n \"\"\"\r\n Initialize the `Interface` object.\r\n\r\n `self.ast` contains the current AST for whichever file is\r\n being processed at the time. On each subsequent file load,\r\n the `self.ast` member variable is reset.\r\n\r\n `self.json_data` contains the \"pretty-formatted\" json string\r\n data that is eventually written to disk.\r\n\r\n :return: returns nothing\r\n \"\"\"\r\n self.ast = None\r\n self.json_data = None\r\n\r\n def load_new_ast(self, file_path: str = \"\") -> c_ast.FileAST:\r\n \"\"\"\r\n Load a new abstract syntax tree (AST).\r\n\r\n Check file path validity before selecting which type of clang\r\n executable to use for PycParser's `parse_file`.\r\n\r\n :param file_path: file to be parsed\r\n :return self.ast: PycParser AST\r\n \"\"\"\r\n # PycParser requires a fully-qualified and valid file path\r\n # for any file to be properly parsed, therfore if a None-type\r\n # is encountered, immediately except\r\n if not file_path:\r\n raise NoneFilePathError(\"File path is not fully qualified\")\r\n\r\n # While the requirements of the project list LLVM and associated\r\n # developer packages, these are sometimes differences between\r\n # clang file extensions of Windows vs. Unix\r\n clang_path = \"clang\"\r\n if platform.system() == \"Windows\":\r\n clang_path = \"clang.exe\"\r\n\r\n # Files of any size are supported, with the limits of execution\r\n # falling only on available user hardware. 50 megabytes of C\r\n # code in one file is a good place to draw the line\r\n size_mb = os.path.getsize(file_path) >> 20\r\n if size_mb > 50:\r\n LOGGER.warning(\"File size exceeds 50MB\")\r\n\r\n # PycParser offers a few different ways to generate ASTs but the\r\n # following is by far the most clean. Clang is well developed\r\n # as a c pre-processor and installed by default on OS X\r\n self.ast = parse_file(file_path,\r\n use_cpp=True,\r\n cpp_path=clang_path,\r\n cpp_args=['-E', '-Iutils/fake_libc_include'])\r\n\r\n return self.ast\r\n\r\n def convert_dict_to_json(self, data: dict) -> None:\r\n \"\"\"\r\n Convert dictionary to `json`-pretty-formatted string.\r\n\r\n :param data: Master `Record` dictionary of string: function\r\n :return: returns nothing\r\n \"\"\"\r\n # Call to dumps returns a sorted, and indented string, instead\r\n # of the json object more common usage of dump. In this case,\r\n # dumps needed to be used so any special escape slashes can\r\n # be properly stripped\r\n self.json_data = json.dumps(data, indent=4, sort_keys=True)\r\n\r\n # Quick reformat of the member variable `json_data` to remove\r\n # all \"\\\\\" and replace with \"\\\"\r\n self.process_out_data()\r\n\r\n # If the conversion comes back with nothing or just {}, that is\r\n # cause for notification but not error or warning. Some files\r\n # will result in no unique strings being found\r\n if not self.json_data:\r\n LOGGER.warning(\"Empty bundle\")\r\n\r\n def process_out_data(self) -> None:\r\n \"\"\"\r\n Strip string of double backslashes and replace with single.\r\n\r\n Any double backslashes that were previously used to escape\r\n special characters need to be completely stripped. If not,\r\n the bundle dict keys will not contain the exact representation\r\n of the strings that exist in the target file.\r\n\r\n :return: returns nothing\r\n \"\"\"\r\n self.json_data = self.json_data.replace(\"\\\\\\\\\", \"\\\\\")\r\n\r\n def drop_bundle_to_disk(self, data) -> None:\r\n \"\"\"\r\n Write dictionary as json string to out/ directory.\r\n\r\n :return: returns nothing\r\n \"\"\"\r\n # Opening files using \"w\" truncates existing content or creates\r\n # a new file if it doesn't already exist. Save the old file\r\n # somewhere else or under a different name if persistence\r\n # between program runs is important\r\n with open(self.OUT_FILE_PATH, \"w\") as outfile:\r\n outfile.write(data)\r\n\r\n # Perform several checks on the validity of both out/ and on\r\n # the bundle itself\r\n Verifier.check_bundle_creation(self.OUT_DIR, self.OUT_FILE_PATH)\r\n","sub_path":"pkg/build/lib/interface/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":5509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"277332257","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport os\nimport json\nimport matplotlib.pyplot as plt\n\n################################################################################\n\n# output folder\nroot_folder = './output/'\n\n################################################################################\n\n# bins to plot\n#\n# mass bin\nMh_bins = [[12.5, 15.0]]\n#\n# redshift bins\n# zbins = [[0.15,0.30],[0.30,0.45],[0.45,0.60],[0.60,0.75],[0.75,0.90]]\nzbins = [[0.15,0.30],[0.30,0.45],[0.45,0.60]]\n\n################################################################################\n\n# plotting setting\nfigsize = [3.5,3.]\nfontsize = 10.\nccolor = 'crimson'\nscolor = 'darkslateblue'\ntcolor = 'k'\ncstyle = '--'\nsstyle = '--'\ntstyle = '-'\nlegend_loc = 'best'\nframeon = False\nlegend_font_size = 9\nbbox_inches = 'tight'\nxlabel = r'$M_{h} \\, [M_{\\rm vir} \\, h^{-1}]$'\nylabel = r'$\\langle N(M_{h}) \\rangle$'\nxleglabel = 'Cen'\nyleglabel = 'Sat'\ntleglabel = 'Tot'\nxscale = 'log'\nyscale = 'log'\ntitle_font_size = 8\ndpi = 500\nxlims = [10.**12.5, 10.**15.0]\nylims = [0.1, 30.]\n\n################################################################################\n\ndef plot_awesome(dpi=dpi,fontsize=fontsize):\n # import pyplot\n import matplotlib.pyplot as plt\n # set some parameters to make plots prettier\n plt.rc('savefig' , dpi=dpi )\n plt.rc('figure' , dpi=dpi )\n plt.rc('text' , usetex=True )\n plt.rc('font' , size=fontsize )\n plt.rc('xtick' , direction='in' )\n plt.rc('ytick' , direction='in' )\n plt.rc('xtick.major', pad=5 )\n plt.rc('xtick.minor', pad=5 )\n plt.rc('ytick.major', pad=5 )\n plt.rc('ytick.minor', pad=5 )\n plt.rc('lines' , dotted_pattern=[0.5,1.1])\n return\n\ndef plot_footprint(mask, mhdr, dpi=500, file_name=None):\n \"\"\"\n Helper function to plot footprint\n \"\"\"\n import healpy as hp\n # make the map\n hpmap = np.zeros(hp.nside2npix(mhdr['NSIDE']))\n hpmap[mask['HPIX']] = mask['HPIX']\n hp.mollview(hpmap)\n hp.graticule()\n plt.savefig(out_folder+file_name, dpi=dpi)\n return\n\n################################################################################\n\ndef plot_HOD(chod, shod, mmeans,\n figsize = [3.5,3.], fontsize=12.,\n ccolor='crimson', scolor='darkslateblue', tcolor='k',\n cstyle='--', sstyle='--', tstyle='-',\n legend_loc='best', frameon=False, legend_font_size=10,\n dpi=500, bbox_inches='tight',\n xlabel=r'$M_{h} \\, [M_{\\rm vir} \\, h^{-1}]$', ylabel=r'$\\langle N(M_{h}) \\rangle$',\n xleglabel='Cen', yleglabel='Sat', tleglabel='Tot',\n xscale='log', yscale='log',\n xlims=None, ylims=None,\n title=None, title_font_size=6,\n figname=None):\n \"\"\"\n Plot the HOD results\n \"\"\"\n # make the plot\n plt.figure(figsize=(figsize))\n # do the plotting\n plt.plot(mmeans, chod , c=ccolor, ls=cstyle, label=xleglabel)\n plt.plot(mmeans, shod , c=scolor, ls=sstyle, label=yleglabel)\n plt.plot(mmeans, shod+chod, c=tcolor, ls=tstyle, label=tleglabel)\n # axis scale\n plt.xscale(xscale)\n plt.yscale(yscale)\n # axis labels\n plt.xlabel(xlabel, fontsize=fontsize)\n plt.ylabel(ylabel, fontsize=fontsize)\n # legend settings\n plt.legend(loc=legend_loc, frameon=frameon, fontsize=legend_font_size)\n # add title\n if title is not None:\n plt.title(title, fontsize=title_font_size)\n # set axis limits\n if xlims is not None:\n plt.xlim(xlims)\n if ylims is not None:\n plt.ylim(ylims)\n # save plot\n plt.savefig(figname, dpi=dpi, bbox_inches=bbox_inches)\n #\n return\n\n################################################################################\n\n# run code to get HOD for all redshift bins\nplot_awesome()\nfor i, _mbin in enumerate(Mh_bins):\n # feedback\n Mh_min, Mh_max = _mbin\n print(\"Doing mass bin [%.2f,%.2f]...\"%(Mh_min,Mh_max))\n temp_folder = root_folder+'Mhalo_%.1f_%.1f/'%(Mh_min,Mh_max)\n for j, _zbin in enumerate(zbins):\n # redshift limits\n zmin, zmax = _zbin\n # feedback\n print(\"--Doing z bin [%.2f,%.2f]...\"%(zmin,zmax))\n # load files\n out_folder = temp_folder+'z_%.2f_%.2f/'%(zmin,zmax)\n #---parameters\n with open(out_folder+'params.json') as json_file:\n params = json.load(json_file)\n #---central HOD\n mmeans, chod = np.loadtxt(out_folder+'hod_cen.txt', unpack=True)\n #---satellite HOD\n _, shod = np.loadtxt(out_folder+'hod_sat.txt', unpack=True)\n #---central galaxy numbers\n _, ccounts = np.loadtxt(out_folder+'num_cen.txt', unpack=True)\n #---satellite galaxy numbers\n _, scounts = np.loadtxt(out_folder+'num_sat.txt', unpack=True)\n #---halo numbers\n _, nhalos = np.loadtxt(out_folder+'halo_num.txt', unpack=True)\n # make the plot\n figure_folder = out_folder+'buzzard_plots/'\n if not os.path.exists(figure_folder): os.mkdir(figure_folder)\n figname = figure_folder+'buzzard_hod_M_%.2f_%.2f_z_%.2f_%.2f.png'%(Mh_min,Mh_max,zmin,zmax)\n title = r'$%.2f < z_{\\ell} < %.2f$'%(zmin,zmax)\n plot_HOD(chod, shod, mmeans,\n figsize=figsize, fontsize=fontsize,\n ccolor=ccolor, scolor=scolor, tcolor=tcolor,\n cstyle=cstyle, sstyle=sstyle, tstyle=tstyle,\n legend_loc=legend_loc, frameon=frameon, legend_font_size=legend_font_size,\n dpi=dpi, bbox_inches=bbox_inches,\n xlabel=xlabel, ylabel=ylabel,\n xleglabel=xleglabel, yleglabel=yleglabel, tleglabel=tleglabel,\n xscale=xscale, yscale=yscale,\n xlims=xlims, ylims=ylims,\n title=title, title_font_size=title_font_size,\n figname=figname)\n print(\" saved HOD plot in \"+figname)\n","sub_path":"simulations/Buzzard/buzzard_plots.py","file_name":"buzzard_plots.py","file_ext":"py","file_size_in_byte":6270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"403551667","text":"\nfrom Graph_ANN import graphAnn\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom keras.callbacks import EarlyStopping\nfrom keras.layers import GaussianNoise\nfrom keras.utils.vis_utils import plot_model\n\ndef get_model():\n neurons =8\n model = keras.Sequential()\n model.add(layers.Dense(neurons, input_dim=4, activation=\"relu\", name=\"input_layer\" ,use_bias=True))\n model.add(layers.Dense(neurons, activation=\"relu\", name=\"hidden_layer\" ,use_bias=True))\n model.add(layers.Dense(1, activation=\"linear\", name=\"output_layer\", use_bias=False))\n model.compile(keras.optimizers.Adam(learning_rate=0.05), loss='mse' ,metrics=['mse' ] )\n plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)\n print(model.summary())\n return model\n\ndef model_fit(model,data_train,results): #To train the ANN with the data\n es = EarlyStopping(monitor='val_loss', mode='min',verbose=1,patience=50)\n history = model.fit(data_train,results,epochs=5000,verbose=0,validation_split=0.2,callbacks=[es])\n graphAnn(history.history['loss'],history.history['val_loss'])\n best_weights=model.get_weights() #New weights that are given to the next ANN\n return best_weights\n\nget_model()","sub_path":"DOE Environment/Artificial_Neural_Network.py","file_name":"Artificial_Neural_Network.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"430562714","text":"from .base import BaseNetwork as base\nimport tensorflow as tf\n\n\nclass mstn_train_net(base):\n def __init__(self, cfg, trainable=True):\n self.inputs = []\n self._cfg = cfg\n\n self.img = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='img')\n self.corner_data = tf.placeholder(tf.float32, shape=[None, None, None, 4], name='corner_data')\n self.img_info = tf.placeholder(tf.float32, shape=[None, None], name='img_info')\n self.resize_info = tf.placeholder(tf.float32, shape=[None, None], name='resize_info')\n self.segmentation_mask = tf.placeholder(tf.float32, shape=[None, None, self._cfg.COMMON.RESIZE_HEIGHT,\n self._cfg.COMMON.RESIZE_WIDTH],\n name='segmentation_mask')\n\n self.layers = dict({\n 'img': self.img,\n 'corner_data': self.corner_data,\n 'img_info': self.img_info,\n 'resize_info': self.resize_info,\n 'segmentation_mask': self.segmentation_mask\n })\n self.trainable = trainable\n self.setup()\n\n def setup(self):\n # 详细查询vgg16的参数\n (self.feed('img')\n .conv(3, 3, 64, 1, 1, name='conv1_1')\n .conv(3, 3, 64, 1, 1, name='conv1_2')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool1')\n .conv(3, 3, 128, 1, 1, name='conv2_1')\n .conv(3, 3, 128, 1, 1, name='conv2_2')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool2')\n .conv(3, 3, 256, 1, 1, name='conv3_1')\n .conv(3, 3, 256, 1, 1, name='conv3_2')\n .conv(3, 3, 256, 1, 1, name='conv3_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool3')\n .conv(3, 3, 512, 1, 1, name='conv4_1')\n .conv(3, 3, 512, 1, 1, name='conv4_2')\n .conv(3, 3, 512, 1, 1, name='conv4_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool4')\n .conv(3, 3, 512, 1, 1, name='conv5_1')\n .conv(3, 3, 512, 1, 1, name='conv5_2')\n .conv(3, 3, 512, 1, 1, name='conv5_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool5'))\n\n ############# 以上为VGG16前五层 ####################################\n\n '''\n after conv5 follow by conv6 conv7 conv8 conv9 conv10 conv11\n '''\n # TODO 确定这些卷积核的大小 通道数 和池化层\n (self.conv(3, 3, 1024, 1, 1, name='conv6_1')\n .conv(3, 3, 1024, 1, 1, name='conv6_2')\n # TODO 根据论文 conv6 和conv7之间没有池化层\n .conv(3, 3, 1024, 1, 1, name='conv7_1')\n .conv(3, 3, 1024, 1, 1, name='conv7_2')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool7')\n .conv(3, 3, 1024, 1, 1, name='conv8_1')\n .conv(3, 3, 1024, 1, 1, name='conv8_2')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool8')\n .conv(3, 3, 1024, 1, 1, name='conv9_1')\n .conv(3, 3, 1024, 1, 1, name='conv9_2')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool9')\n .conv(3, 3, 1024, 1, 1, name='conv10_1')\n .conv(3, 3, 1024, 1, 1, name='conv10_2')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool10')\n .conv(3, 3, 1024, 1, 1, name='conv11_1')\n .conv(3, 3, 1024, 1, 1, name='conv11_2')\n .max_pool(2, 2, 2, 2, padding='VALID', name='f11'))\n\n # TODO 每个f层需要有自己的输出 用于打分和回归\n\n # TODO 前一个参数是deconv中的红色层,后一个是蓝色层\n deconv_module_list = ['f10', 'f9', 'f8', 'f7', 'f4', 'f3']\n\n # TODO 每个feature map上的scale\n f_scales = {\n 'f11': [184, 208, 232, 256],\n 'f10': [124, 136, 148, 160],\n 'f9': [88, 96, 104, 112],\n 'f8': [56, 64, 72, 80],\n 'f7': [36, 40, 44, 48],\n 'f4': [20, 24, 28, 32],\n 'f3': [4, 8, 6, 10, 12, 16],\n }\n\n for deconv_m in deconv_module_list:\n self.deconv_module(name=deconv_m)\n\n self.predict_module(name=deconv_m + '_pred')\n # TODO 将预测层的N H W 1024 输出送入全连接层准备输出3\n\n # 第一个参数是特征图的channel数目\n # 每个f层都需要输出预测目标\n\n # TODO 特征图每个像素 输出k * q * 2个预测得分 deconv_fc 输出 (N,H,W,k * q * 2)\n self.feed(deconv_m + '_pred').deconv_fc(512, 2, name=deconv_m + '_corner_pred_score')\n # TODO 特征图每个像素 输出k * q * 4个预测目标 deconv_fc 输出 (N,H,W,k * q * 4)\n self.feed(deconv_m + '_pred').deconv_fc(512, 4, name=deconv_m + '_corner_pred_offset')\n\n # TODO 需要计算出feat_stride 即在每个f层上一个像素点对应多少的步长\n self.feed(deconv_m + '_corner_pred_score', 'corner_box', 'img_info', 'gt_default_box') \\\n .corner_detect_layer(scales=f_scales[deconv_m], feat_stride=None, name=deconv_m + '_loss_data', )\n\n # spatial_reshape_layer 将predict score reshape成 (1, height, -1, 2)\n # spatial_softmax 做空间softmax\n self.feed(deconv_m + '_corner_pred_score') \\\n .spatial_reshape_layer(2, name=deconv_m + '_corner_reshape_pred_score') \\\n .spatial_softmax(name=deconv_m + 'corner_cls_prob')\n\n # TODO 取出 f3 f4 f7 f8 f9 做segment sensitive map\n # 'f9', 'f8', 'f7', 'f4' 需要缩放到f3的大小\n # f3 shape (1, h, w, 1024)\n new_size = self.get_output('f3').get_shape()[1:3]\n\n layers_for_segment = ['f9', 'f8', 'f7', 'f4', 'f3']\n for f_layer in layers_for_segment[:-1]:\n self.feed(f_layer).bilinear_upsample(name=f_layer + '_bilinear')\n # TODO 以下卷积层的参数待定\n f_bilinear = [x + '_bilinear' for x in layers_for_segment]\n (self.feed(*f_bilinear)\n .layer_n_eltw_sum(name='segment_feature_map')\n .conv(1, 1, 1024, 1, 1, name='bilinear_conv1')\n .batch_normalize()\n .relu()\n .deconv(2, 2, 4, 1, 1, name='bilinear_deconv1')\n .conv(1, 1, 1024, 1, 1, name='bilinear_conv2')\n .batch_normalize()\n .relu()\n # 输出了 (1, h, w, 4)的特征图, 在计算这个分支的时候取出, 相对于f3,此时feature map已经扩大了4倍\n .deconv(2, 2, 4, 1, 1, name='bilinear_deconv2')\n # 做一次空间softmax 将每个像素输出的值约束在 0-1之间\n .spatial_softmax(name='segmentation_pred'))\n\n def build_loss(self):\n \"\"\"\n two parts of loss\n Corner Point Detection\n Position Sensitive Segmentation\n :return:\n \"\"\"\n deconv_module_list = ['f10', 'f9', 'f8', 'f7', 'f4', 'f3']\n \"\"\" corner point loss\n 需要取出 'f10', 'f9', 'f8', 'f7', 'f4', 'f3' 每个特征图上的预测结果 计算损失函数\n \n \n \"\"\"\n all_cross_entropy = 0\n all_regression_loss = 0\n # TODO 这个地方采用把所有角点的损失都统一计算, 需要搞清楚可不可以这样做\n for deconv_m in deconv_module_list:\n # shape(h * w * num_scales * 4, 2)\n corner_cls_score = tf.reshape(self.get_output(deconv_m + '_corner_reshape_pred_score'), [-1, 2])\n\n # self.get_output(deconv_m + '_loas_data')[0]是形如(1, FM的高,FM的宽,10)的labels\n # 真值标签shape (h * w * num_scales * 4)\n corner_label = tf.reshape(self.get_output(deconv_m + '_loss_data')[0], [-1])\n\n # 取出标签为1 的label所在的索引,多行一列矩阵 shape=(?,1)\n fg_keep = tf.where(tf.equal(corner_label, 1))\n\n # 取出标签为1 或者0的label所在的索引,多行一列矩阵\n # 对于是0或者是1的标签是我们感兴趣\n roi_keep = tf.where(tf.not_equal(corner_label, -1))\n\n # 取出保留的标签所在行的分数\n roi_cls_score = tf.gather(corner_cls_score, roi_keep) # shape (N, 2)\n\n # 取出保留的标签所在的标签\n corner_label = tf.gather(corner_label, roi_keep)\n\n # 交叉熵损失 累加\n rpn_cross_entropy_n = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=corner_label,\n logits=roi_cls_score)\n all_cross_entropy += tf.reduce_mean(rpn_cross_entropy_n)\n\n # shape (N,H,W,num_scales * 4 * 4)\n corner_pred_offset = self.get_output(deconv_m + '_corner_pred_offset')\n\n # 取出盒子真值用于回归\n # corner_pred_target shape (1, height, width, num_scales, 4, 4) 最后一个维度中,表示这个box 需要回归的四维的值\n corner_pred_target = self.get_output(deconv_m + '_loss_data')[1]\n\n \"\"\"\n 公式\n \n \"\"\"\n\n # 取出标签为1的盒子回归\n # 取出 预测的值\n corner_pred_offset = tf.gather(tf.reshape(corner_pred_offset, [-1, 4]), fg_keep) # shape (N, 2)\n\n # 取出 目标值\n corner_pred_target = tf.gather(tf.reshape(corner_pred_target, [-1, 4]), fg_keep)\n\n corner_loss_box_n = tf.reduce_sum(self.smooth_l1_dist((corner_pred_offset - corner_pred_target)),\n reduction_indices=[1])\n\n corner_regression_loss = tf.reduce_sum(corner_loss_box_n) / (tf.cast(tf.shape(fg_keep)[0], tf.float32) + 1)\n\n all_regression_loss += corner_regression_loss\n\n # TODO branch segment dice loss ; to be continued...\n\n # 取出准备好的segment mask\n segmentation_mask = self.get_output('segmentation_mask')\n # 取出预测的segment\n segmentation_pred = self.get_output('segmentation_pred')\n\n # 做dice loss\n dice = self.dice_coe(segmentation_pred,segmentation_mask)\n\n dice_loss = 1 - dice\n\n model_loss = all_cross_entropy + all_regression_loss + dice_loss\n\n regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n total_loss = tf.add_n(regularization_losses) + model_loss\n return total_loss, model_loss, all_cross_entropy, all_regression_loss, dice_loss\n\n def dice_coe(output, target, loss_type='jaccard', axis=[1, 2, 3], smooth=1e-5):\n \"\"\"Soft dice (Sørensen or Jaccard) coefficient for comparing the similarity\n of two batch of data, usually be used for binary image segmentation\n i.e. labels are binary. The coefficient between 0 to 1, 1 means totally match.\n\n Parameters\n -----------\n output : tensor\n A distribution with shape: [batch_size, ....], (any dimensions).\n target : tensor\n A distribution with shape: [batch_size, ....], (any dimensions).\n loss_type : string\n ``jaccard`` or ``sorensen``, default is ``jaccard``.\n axis : list of integer\n All dimensions are reduced, default ``[1,2,3]``.\n smooth : float\n This small value will be added to the numerator and denominator.\n If both output and target are empty, it makes sure dice is 1.\n If either output or target are empty (all pixels are background), dice = ```smooth/(small_value + smooth)``,\n then if smooth is very small, dice close to 0 (even the image values lower than the threshold),\n so in this case, higher smooth can have a higher dice.\n\n Examples\n ---------\n >>> outputs = tl.act.pixel_wise_softmax(network.outputs)\n >>> dice_loss = 1 - tl.cost.dice_coe(outputs, y_)\n\n References\n -----------\n - `Wiki-Dice `_\n \"\"\"\n inse = tf.reduce_sum(output * target, axis=axis)\n if loss_type == 'jaccard':\n l = tf.reduce_sum(output * output, axis=axis)\n r = tf.reduce_sum(target * target, axis=axis)\n elif loss_type == 'sorensen':\n l = tf.reduce_sum(output, axis=axis)\n r = tf.reduce_sum(target, axis=axis)\n else:\n raise Exception(\"Unknow loss_type\")\n ## old axis=[0,1,2,3]\n # dice = 2 * (inse) / (l + r)\n # epsilon = 1e-5\n # dice = tf.clip_by_value(dice, 0, 1.0-epsilon) # if all empty, dice = 1\n ## new haodong\n dice = (2. * inse + smooth) / (l + r + smooth)\n ##\n dice = tf.reduce_mean(dice)\n return dice\n\n def dice_hard_coe(output, target, threshold=0.5, axis=[1, 2, 3], smooth=1e-5):\n \"\"\"Non-differentiable Sørensen–Dice coefficient for comparing the similarity\n of two batch of data, usually be used for binary image segmentation i.e. labels are binary.\n The coefficient between 0 to 1, 1 if totally match.\n\n Parameters\n -----------\n output : tensor\n A distribution with shape: [batch_size, ....], (any dimensions).\n target : tensor\n A distribution with shape: [batch_size, ....], (any dimensions).\n threshold : float\n The threshold value to be true.\n axis : list of integer\n All dimensions are reduced, default ``[1,2,3]``.\n smooth : float\n This small value will be added to the numerator and denominator, see ``dice_coe``.\n\n References\n -----------\n - `Wiki-Dice `_\n \"\"\"\n output = tf.cast(output > threshold, dtype=tf.float32)\n target = tf.cast(target > threshold, dtype=tf.float32)\n inse = tf.reduce_sum(tf.multiply(output, target), axis=axis)\n l = tf.reduce_sum(output, axis=axis)\n r = tf.reduce_sum(target, axis=axis)\n ## old axis=[0,1,2,3]\n # hard_dice = 2 * (inse) / (l + r)\n # epsilon = 1e-5\n # hard_dice = tf.clip_by_value(hard_dice, 0, 1.0-epsilon)\n ## new haodong\n hard_dice = (2. * inse + smooth) / (l + r + smooth)\n ##\n hard_dice = tf.reduce_mean(hard_dice)\n return hard_dice","sub_path":"network/mstn_train.py","file_name":"mstn_train.py","file_ext":"py","file_size_in_byte":14227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"125118078","text":"import io\nimport json\nimport os\nimport sys\nfrom simpletransformers.question_answering import QuestionAnsweringModel\nimport platform\nimport requests\nimport pdf2image as pdf2image\n\n\ndef to_list(tensor):\n return tensor.detach().cpu().tolist()\n\n\nendpoint = 'https://uksouth.api.cognitive.microsoft.com/'\nocr_url = 'https://uksouth.api.cognitive.microsoft.com/vision/v2.1/ocr'\nsubscription_key = 'ffcc4bbd174c4b6e97d0a945aebf8b98'\n\ninputPath = input(\"PDF Path:\")\nprint(\"PDF path:\", inputPath)\n\nprint(\"starts converting PDF to image, please wait\")\npages = None\nif os.path.isfile(inputPath):\n if platform.system() == 'Windows':\n pathname = os.path.dirname(sys.argv[0])\n pathname = os.path.abspath(pathname) + os.path.sep + \"poppler_win\" + os.path.sep + \"bin\"\n pages = pdf2image.convert_from_path(inputPath, 200, poppler_path=pathname, fmt=\"jpeg\")\n else:\n pages = pdf2image.convert_from_path(inputPath, 200, fmt=\"jpeg\")\nelse:\n print(\"input file doesn't exist\")\n exit()\n\npageNum = 0\nprevNum = -1\noutputString = \"\"\nwhile pageNum < len(pages):\n if prevNum != pageNum:\n pages[pageNum].show()\n makeQues = input(\"making some questions? - T/F or skip reset pages and output current progress - S or jump to page - JUMP\")\n if makeQues.upper()[0:4] == \"JUMP\":\n pageNum = int(makeQues[4:])\n if makeQues.upper() == \"S\":\n pageNum = len(pages)\n if makeQues.upper() == \"F\":\n pageNum += 1\n if makeQues.upper() == 'T':\n if prevNum != pageNum:\n outputString = \"\"\n imgByteArr = io.BytesIO()\n pages[pageNum].save(imgByteArr, format=pages[pageNum].format)\n imgByteArr = imgByteArr.getvalue()\n headers = {'Ocp-Apim-Subscription-Key': subscription_key, 'Content-Type': 'application/octet-stream'}\n params = {'language': 'unk', 'detectOrientation': 'true'}\n response = requests.post(ocr_url, headers=headers, params=params, data=imgByteArr)\n response.raise_for_status()\n ocrResult = response.json()\n ocrResult = json.dumps(eval(str(ocrResult)))\n\n resultJSONObject = json.loads(ocrResult)\n regions = resultJSONObject['regions']\n for region in regions:\n lines = region['lines']\n\n for line in lines:\n words = line['words']\n for word in words:\n text = word.get('text')\n outputString += text + \" \"\n prevNum = pageNum\n\n question = input(\"question:\")\n to_predict = [{'context': outputString, 'qas': [{'question': question, 'id': '0'}]}]\n # print(to_predict)\n\n model = QuestionAnsweringModel('albert', 'ahotrod/albert_xxlargev1_squad2_512', args={'max_seq_length': 512, \"eval_batch_size\": 3, \"version_2_with_negative\": True, 'reprocess_input_data': True, 'overwrite_output_dir': True, 'silent': True})\n res = model.predict(to_predict, 10)\n # print(len(res))\n print(\"answer:\", res[0]['answer'])\n","sub_path":"docQuery.py","file_name":"docQuery.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"181954816","text":"import cv2\nimport numpy as np\nPOLY_FILL_COLOR = (1.0, 1.0, 1.0)\n\n\ndef get_affine_transform(src, src_tri, dst_tri, size):\n # Given a pair of triangles, find the affine transform.\n warp_mat = cv2.getAffineTransform(np.float32(src_tri), np.float32(dst_tri))\n\n # Apply the Affine Transform just found to the src image\n dst = cv2.warpAffine(src, warp_mat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR,\n borderMode=cv2.BORDER_REFLECT_101)\n return dst\n\n\ndef morph_triangular_region(triangle_1, triangle_2, img_1, img_2):\n # Find bounding rectangle for each triangle in the form \n x_1, y_1, w_1, h_1 = cv2.boundingRect(np.float32([triangle_1]))\n x_2, y_2, w_2, h_2 = cv2.boundingRect(np.float32([triangle_2]))\n\n # Offset points by left top corner of the respective rectangles\n offset_triangle_1 = []\n offset_triangle_2 = []\n\n # for the coordinates of each point the triangle find the offset\n # move this into a separate function if you need to do it a for a lot of triangles\n for coords in triangle_1:\n offset_triangle_1.append(((coords[0] - x_1), (coords[1] - y_1)))\n for coords in triangle_2:\n offset_triangle_2.append(((coords[0] - x_2), (coords[1] - y_2)))\n\n # get the mask by filling the triangle to mask pixels outside the desired area\n mask = np.zeros((h_2, w_2, 3))\n cv2.fillConvexPoly(mask, np.int32(offset_triangle_2), (1.0, 1.0, 1.0))\n\n # get only the part of the image we are going to map within the bounding rectangle\n img_1_within_bounds = img_1[y_1:y_1 + h_1, x_1:x_1 + w_1]\n\n size_bounds_triangle_2 = (w_2, h_2)\n\n # apply the affine transform on img_1 based on the triangles\n transformed_area = get_affine_transform(img_1_within_bounds, offset_triangle_1, offset_triangle_2,\n size_bounds_triangle_2)\n\n # remove all parts of the transformed image outside the area we care about (triangle mask)\n transformed_triangle = transformed_area * mask\n\n # slice the current area out of the in the image we are mapping the face to\n img_2[y_2:y_2 + h_2, x_2:x_2 + w_2] = img_2[y_2:y_2 +\n h_2, x_2:x_2 + w_2] * (POLY_FILL_COLOR - mask)\n # slice the transformed area back in its place\n img_2[y_2:y_2 + h_2, x_2:x_2 + w_2] = img_2[y_2:y_2 +\n h_2, x_2:x_2 + w_2] + transformed_triangle\n\n return img_2\n\n\ndef apply_affine_transformation(delauney, hull_1, hull_2, img_1, img_2):\n # create a copy of image 2 that we will map the face from image 1 to\n img_2_with_face_1 = np.copy(img_2)\n\n # morph each triangular region one at a time\n for triangle in delauney:\n triangles_1 = []\n triangles_2 = []\n\n # get points within img_1 and img_2 corresponding to the triangle points previously found from the face in img_1\n for point in triangle:\n triangles_1.append(hull_1[point])\n triangles_2.append(hull_2[point])\n\n # once we have found the points in the landmarks corresponding to the triangle morph the triangular region from\n # img_1 to img_2 and return the result that we will modify again with the next triangle\n morph_triangular_region(triangles_1, triangles_2,\n img_1, img_2_with_face_1)\n\n return img_2_with_face_1\n\n\n# Get the area that was transformed in order to seamlessly clone\ndef calculate_mask(landmarks, img):\n hull_tuples = []\n\n hull = []\n # this is the area that we will be mapping between faces\n hull_index_to_map = cv2.convexHull(np.array(landmarks), returnPoints=False)\n\n # find the facial landmark points on both faces that are within the hull of the face we are basing our map off of\n for i in range(0, len(hull_index_to_map)):\n hull.append(landmarks[int(hull_index_to_map[i])])\n\n for points in hull:\n hull_tuples.append((points[0], points[1]))\n # create a mask that encompasses the whole image\n mask = np.zeros(img.shape, dtype=img.dtype)\n\n # use the empty mask as the input image and the hull tuples a polygon vertices to fill\n # this fills only the area of the hull\n cv2.fillConvexPoly(mask, np.int32(hull_tuples), (255, 255, 255))\n\n # cv2.imshow(\"Image Mask \", mask)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n hull_bounding_rectangle = cv2.boundingRect(np.float32([hull]))\n\n bounding_rectangle_center = (hull_bounding_rectangle[0] + int(hull_bounding_rectangle[2] / 2),\n hull_bounding_rectangle[1] + int(hull_bounding_rectangle[3] / 2))\n\n # return the mask of the face area and the center of the bounding bounding box which contains the face\n return mask, bounding_rectangle_center\n\n\n# Smooths and blends the mask to look more natural\ndef merge_mask_with_image(landmarks, img_with_mapped_face, original_img):\n mask, center = calculate_mask(landmarks, original_img)\n\n # make the mask smaller \n kernel = np.ones((20, 20), np.uint8)\n mask = cv2.erode(mask, kernel, iterations = 2)\n\n # poisson blending to smooth out the edges\n return cv2.seamlessClone(np.uint8(img_with_mapped_face), original_img, mask, center, cv2.NORMAL_CLONE)\n\n\n# Color Correction\ndef correct_colors(im1, im2, landmarks1):\n COLOUR_CORRECT_BLUR_FRAC = 0.75\n LEFT_EYE_POINTS = list(range(42, 48))\n RIGHT_EYE_POINTS = list(range(36, 42))\n\n blur_amount = COLOUR_CORRECT_BLUR_FRAC * np.linalg.norm(\n np.mean(landmarks1[LEFT_EYE_POINTS], axis=0) -\n np.mean(landmarks1[RIGHT_EYE_POINTS], axis=0))\n blur_amount = int(blur_amount)\n if blur_amount % 2 == 0:\n blur_amount += 1\n im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0)\n im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0)\n\n # Avoid divide-by-zero errors.\n im2_blur = im2_blur.astype(int)\n im2_blur += 128*(im2_blur <= 1)\n\n result = im2.astype(np.float64) * im1_blur.astype(np.float64) / im2_blur.astype(np.float64)\n result = np.clip(result, 0, 255).astype(np.uint8)\n\n return result","sub_path":"face_swap.py","file_name":"face_swap.py","file_ext":"py","file_size_in_byte":6162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"387843783","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\nclass Soseki4Spider(scrapy.Spider):\n name = 'soseki4'\n allowed_domains = ['www.aozora.gr.jp']\n start_urls = ['https://www.aozora.gr.jp/index_pages/person148.html']\n\n def parse(self, response):\n li_list = response.css('ol > li a')\n for a in li_list:\n href = a.css('::attr(href)').extract_first()\n href2 = response.urljoin(href)\n yield response.follow(\n href2, self.parse_card\n )\n \n def parse_card(self, response):\n title = response.css('title::text').extract_first()\n alist = response.css('table.download tr td a')\n \n for a in alist:\n href = a.css('::attr(href)').extract_first()\n href2 = response.urljoin(href)\n if href2[-4:] != \".zip\": continue\n req = scrapy.Request(\n href2, callback=self.parse_item\n )\n req.meta[\"title\"] = title\n yield req\n \n def parse_item(self, response):\n title = response.meta[\"title\"]\n title = title.replace('図書カード:','').strip()\n fname = title + '.zip'\n with open(fname, \"wb\") as f:\n f.write(response.body)\n","sub_path":"scrapy_test/soseki_list/soseki_list/spiders/soseki4.py","file_name":"soseki4.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"609129198","text":"import pyxhook\nimport pyautogui\nimport os\n\n\ndef OnKeyPress(event):\n k = event.Key\n if k == 'F2':\n pyautogui.scroll(-3)\n if k == 'F3':\n if new_hook.clicked:\n new_hook.clicked = False\n pyautogui.mouseUp()\n else:\n pyautogui.mouseDown()\n new_hook.clicked = True\n if k == 'F4':\n if new_hook.x == 0:\n new_hook.x, new_hook.y = pyautogui.position()\n else:\n x, y = pyautogui.position()\n pyautogui.moveTo(new_hook.x, new_hook.y)\n pyautogui.click()\n pyautogui.moveTo(x, y)\n if k == 'F12':\n new_hook.cancel()\n\nif os.path.exists('position'):\n os.remove('position')\n# instantiate HookManager class\nnew_hook = pyxhook.HookManager()\n# listen to all keystrokes\nnew_hook.KeyDown = OnKeyPress\n# hook the keyboard\nnew_hook.HookKeyboard()\n# start the session\nnew_hook.start()\n","sub_path":"lab/Macro/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"577511957","text":"from pymongo import MongoClient\nfrom bson.objectid import ObjectId\n\nfrom datetime import datetime\nclient = MongoClient()\ndb = client.machiavelli_db_test\ngames = db.machiavelli_games_test\n\n\ndef parseUrl(url):\n d = {}\n for keyvalue in url.split('&'):\n s = keyvalue.split('=')\n if len(s) > 1:\n d[s[0]] = s[1]\n return d\n\n\nclass GameClass():\n def public_test(self, msg):\n return (True, \"Hello World... this is a test\\nmsg:\\n\"+msg+'\\n')\n\n def gameExists(self, gameIdStr):\n return games.find_one({'_id': ObjectId(gameIdStr)})\n\n def playerInGame(self, gameIdStr, playerIdStr):\n return games.find_one({'_id': ObjectId(gameIdStr), 'players': { '$elemMatch': {'_id': ObjectId(playerIdStr)}}})\n\n def gamePlaying(self, gameIdStr):\n game = games.find_one({\"_id\": ObjectId(gameIdStr)})\n if not game:\n raise ValueError('Game doen\\'t exist!')\n return game['gameState'] != 'lobby'\n\n def createRandomName(self):\n return \"Sir Spank-a-lot\"\n\n def createNewGame(self, name):\n if name == '':\n name = self.createRandomName()\n playerId = ObjectId()\n post = {\n \"players\": [{\"_id\": playerId,\"name\": name, \"char\": '', \"gold\": 0, \"build\": [], \"hand\":[]}],\n \"deckChars\": [],\n \"deckBuildings\": [],\n \"turn\": '',\n \"kingsMark\": '',\n \"gameState\": \"lobby\",\n \"lastActivity\": datetime.utcnow()}\n games.insert_one(post)\n return str(playerId)\n\n def joinGame(self, name, gameIdStr):\n if self.gamePlaying(gameIdStr):\n raise ValueError('Can\\'t join')\n if name == '':\n name = self.createRandomName()\n playerId = ObjectId()\n games.update({\"_id\": ObjectId(gameIdStr)},{\"$push\": {\"players\": {\"_id\": playerId,\"name\": name, \"char\": '', \"gold\": 0, \"build\": [], \"hand\":[]}}})\n return str(playerId)\n\n def getPlayerDict(self, playerIdStr):\n game = games.find_one({\"players\": { '$elemMatch': {'_id': ObjectId(playerIdStr)}}})\n if not game:\n raise ValueError('Game doen\\'t exist!')\n return game","sub_path":"GameHandler.py","file_name":"GameHandler.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"328366700","text":"import os\nimport math\nimport requests\nimport json\nfrom ads_lib import get_library\nfrom ads_lib import fix_journal_abbr\nfrom ads_lib import adsresponse_to_dict, dict_to_bib\n\n######### Parameters #########\nexport_filename = 'exportlib.bib'\nexport_format = 'bibtexabs'\n# leave empty to export all your libraries\n# or use comma-separated names of your libraries\nlibrary_name = ''\nbibtex_keyformat = \"%1H%R\"\nsort_format = \"first_author asc\"\n# Use short or long journal names instead of journal TeX abbreviations; \\aj\nfix_journal = True\n######################################\n\n#\n#\nt = json.load(open('mysecrets'))\nmy_token = t['my_token']\nbase_url = \"https://api.adsabs.harvard.edu/v1/biblib\"\nheaders = {'Authorization': \"Bearer \" + my_token,\n \"Content-type\": \"application/json\"}\n\n# Finds all your libraries\nr = requests.get(base_url+\"/libraries\",\n headers=headers)\nall_libraries = r.json()['libraries']\n\nif library_name == '':\n my_libraries = all_libraries\nelse:\n libs = library_name.split(',')\n lib_list = [l.lower().strip() for l in libs]\n my_libraries = []\n my_libraries = [\n lib for lib in all_libraries if lib['name'].lower() in lib_list]\n if len(my_libraries)==0:\n raise NameError(f\"No libraries found named: {lib_list}\")\n\nprint(\"Exporting from {} libraries\".format(str(len(my_libraries))))\n\n# Get bibcodes for each library\nbibs = []\nconfig = {}\nconfig['headers'] = headers\nconfig['url'] = base_url\n\nfor library in my_libraries:\n #\n bib = get_library(library['id'], library['num_documents'], config)\n #\n bibs.extend(bib)\n\n# Keep unique bibcodes\nmy_bibs = list(set(bibs))\nprint(\"Found {} unique bibcodes\".format(len(my_bibs)))\n# Export in bibtex\ntry:\n os.remove(export_filename)\nexcept OSError:\n pass\n\nexport_url = \"https://api.adsabs.harvard.edu/v1/export/\"+export_format\n\n\nstart = 0\nrows = 2000\nnum_paginates = int(math.ceil(len(my_bibs) / (1.0*rows)))\n\ns1 = start\ns2 = rows\nfout = open(export_filename, 'a')\n\nfor i in range(num_paginates):\n #\n querystring = {\"bibcode\": my_bibs[s1:s2],\n \"keyformat\": bibtex_keyformat,\n \"sort\": sort_format}\n #\n response = requests.request(\"POST\",\n export_url,\n headers=headers,\n data=json.dumps(querystring))\n # turn response into dictionary of references\n expbib = adsresponse_to_dict(response.json()['export'])\n\n if fix_journal == True:\n expbib = fix_journal_abbr(expbib, format='short')\n\n final_bib = dict_to_bib(expbib)\n\n fout.write(final_bib)\n\n\n s1 = s1 + rows\n s2 = s2 + rows\n\nfout.close()\nprint(response)\n","sub_path":"ads_exportlib.py","file_name":"ads_exportlib.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"604017859","text":"import sys\nsys.path.append(\"/usr/lib/freecad-daily/lib/\")\n\nimport FreeCAD\nfrom FreeCAD import Base\nimport Part\n\nclass ImpellerClass:\n def __init__(self,parameters):\n rInlet = parameters[0]\n rImpeller = parameters[1]\n\n def makeImpeller(self):\n S1 = Part.makeCircle(rInlet)\n S2 = Part.makeCircle(rImpeller)\n\n W1 = Part.Wire(S1)\n W2 = Part.Wire(S2)\n\n F1 = Part.Face(W1)\n F2 = Part.Face(W2)\n F3 = F2.cut(F1)\n\n return F3\n","sub_path":"Geometry/Impeller.py","file_name":"Impeller.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"440445322","text":"\"\"\"Implementation of the invocation-side open-tracing interceptor.\"\"\"\n\nimport sys\nimport logging\n\nfrom six import iteritems\n\nfrom ._utilities import get_method_type, get_deadline_millis,\\\n log_or_wrap_request_or_iterator, RpcInfo\nimport opentracing\nfrom opentracing.ext import tags as ot_tags\n\n\nclass _GuardedSpan(object):\n\n def __init__(self, span):\n self.span = span\n self._engaged = True\n\n def __enter__(self):\n self.span.__enter__()\n return self\n\n def __exit__(self, *args, **kwargs):\n if self._engaged:\n return self.span.__exit__(*args, **kwargs)\n else:\n return False\n\n def release(self):\n self._engaged = False\n return self.span\n\n\ndef _inject_span_context(tracer, span, metadata):\n headers = {}\n try:\n tracer.inject(span.context, opentracing.Format.HTTP_HEADERS, headers)\n except (opentracing.UnsupportedFormatException,\n opentracing.InvalidCarrierException,\n opentracing.SpanContextCorruptedException) as e:\n logging.exception('tracer.inject() failed')\n span.log_kv({'event': 'error', 'error.object': e})\n return metadata\n metadata.update(headers)\n return metadata\n\n\ndef get_method(sss):\n\n for ss in sss.split(','):\n if 'method\":' in ss:\n s = ss.split(':')[1]\n return s[2:-1]\n return ''\n\nclass OpenTracingClientInterceptor():\n\n def __init__(self, tracer, active_span_source, log_payloads,\n span_decorator):\n self._tracer = tracer\n self._active_span_source = active_span_source\n self._log_payloads = log_payloads\n self._span_decorator = span_decorator\n\n def _start_span(self, method):\n active_span_context = None\n if self._active_span_source is not None:\n active_span = self._active_span_source.get_active_span()\n if active_span is not None:\n active_span_context = active_span.context\n tags = {\n ot_tags.COMPONENT: 'jsonrpc',\n ot_tags.SPAN_KIND: ot_tags.SPAN_KIND_RPC_CLIENT\n }\n return self._tracer.start_span(\n operation_name=method, child_of=active_span_context, tags=tags)\n\n def _start_guarded_span(self, *args, **kwargs):\n return _GuardedSpan(self._start_span(*args, **kwargs))\n\n def trace_before_request(self, request, headers):\n with self._start_guarded_span(get_method(request)) as guarded_span:\n headers = _inject_span_context(self._tracer, guarded_span.span, headers)\n\n if self._log_payloads:\n guarded_span.span.log_kv({'request': request})\n\n return guarded_span, headers\n\n def trace_after_request(self, response_text, guarded_span):\n if self._log_payloads:\n guarded_span.span.log_kv({'response': response_text})\n","sub_path":"jsonrpcclient/opentracing/_client.py","file_name":"_client.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"247207439","text":"\"\"\"\nhttps://adventofcode.com/2018/day/2\n\"\"\"\n\nfrom collections import Counter\nfrom typing import List, Tuple\n\ndef count_letters(id: str, appears: int = 2) -> bool:\n letters = Counter(id)\n\n for count in letters.values():\n if count == appears:\n return True\n return False\n\ndef get_checksum(input: List[str]) -> int:\n appears_two_times=0\n appears_three_times=0\n for id in input:\n appears_two_times += count_letters(id, appears=2)\n appears_three_times += count_letters(id, appears=3)\n\n return appears_three_times * appears_two_times\n\n\ndef get_distane(id1: str, id2: str) -> Tuple[int, str]:\n assert len(id1) == len(id2)\n distance: int = 0\n common: List[str] = []\n for letter1, letter2 in zip(id1, id2):\n if letter1 != letter2:\n distance += 1\n else:\n common.append(letter1)\n \n return distance, \"\".join(common)\n\ndef find_similar_boxes(ids: List[str]) -> str:\n for id1 in ids:\n for id2 in ids:\n distance, common = get_distane(id1, id2)\n if distance == 1:\n return common\n return \"\"\n\n\nassert count_letters(\"abcdef\", appears=2) == False\nassert count_letters(\"bababc\", appears=2) == True\nassert count_letters(\"bababc\", appears=3) == True\n\n\nINPUT1 = [\n \"abcdef\",\n \"bababc\",\n \"abbcde\",\n \"abcccd\",\n \"aabcdd\",\n \"abcdee\",\n \"ababab\"\n ]\n\nINPUT2 = [\n \"abcde\",\n \"fghij\",\n \"klmno\",\n \"pqrst\",\n \"fguij\",\n \"axcye\",\n \"wvxyz\"\n ]\n\nassert get_checksum(INPUT1) == 12\n\n\nassert get_distane(\"abcde\", \"axcye\") == (2, \"ace\")\nassert get_distane(\"fghij\", \"fguij\") == (1, \"fgij\")\n\nassert find_similar_boxes(INPUT2) == \"fgij\"\n\nif __name__ == \"__main__\":\n\n\n with open(\"day2_input1.txt\", \"r\") as inp:\n lines = [line.strip() for line in inp]\n\n print(get_checksum(lines))\n print(find_similar_boxes(lines))","sub_path":"day2_InventoryManagementSystem.py","file_name":"day2_InventoryManagementSystem.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"248447365","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import resolve\nfrom django.shortcuts import redirect\n\nfrom .actions import customers, subscriptions\nfrom .conf import settings\n\n\nclass ActiveSubscriptionMiddleware(object):\n\n def process_request(self, request):\n if request.user.is_authenticated() and not request.user.is_staff:\n url_name = resolve(request.path).url_name\n if url_name not in settings.PINAX_STRIPE_SUBSCRIPTION_REQUIRED_EXCEPTION_URLS:\n try:\n customer = customers.get_customer_for_user(request.user)\n if subscriptions.current_subscription(customer) is None:\n return redirect(\n settings.PINAX_STRIPE_SUBSCRIPTION_REQUIRED_REDIRECT\n )\n except ObjectDoesNotExist:\n return redirect(settings.PINAX_STRIPE_SUBSCRIPTION_REQUIRED_REDIRECT)\n","sub_path":"pinax/stripe/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"537907477","text":"import requests as r\r\nimport json\r\nimport io\r\nfrom discord_webhook import DiscordWebhook, DiscordEmbed\r\n\r\nstate_url = \"http://blhxjploginapi.azurlane.jp/?cmd=load_server?\"\r\nstate = {\r\n 0: \"**Online**\",\r\n 1: \"**Offline**\"\r\n}\r\n\r\ndiscord_api = {\r\n \"web_hook\": \"WEBHOOK URL HERE\",\r\n \"mention\": \"<@&MENTION ROLE ID HERE>\"\r\n}\r\n\r\n\r\ndef update_json(string: str):\r\n with io.open(\"ALServer.json\", mode=\"w\", encoding=\"utf-8\") as f:\r\n f.write(string)\r\n f.close()\r\n\r\n\r\ndef get_json_data(string: str):\r\n return json.loads(string)\r\n\r\n\r\ndef fetch_new_json():\r\n try:\r\n blhx_raw = r.get(state_url).text\r\n except ConnectionError:\r\n print(\"Connection Failed, aborting script\")\r\n exit(1)\r\n return blhx_raw\r\n\r\n\r\ndef fetch_old_json():\r\n error_old = False\r\n try:\r\n f = io.open(\"ALServer.json\", mode=\"r\", encoding=\"utf-8\")\r\n blhx_raw_old = f.read()\r\n f.close()\r\n if blhx_raw_old == None:\r\n error_old = True\r\n else:\r\n return blhx_raw_old\r\n except FileNotFoundError:\r\n error_old = True\r\n if error_old:\r\n update_json(fetch_new_json())\r\n exit()\r\n\r\n\r\ndef state_change_check():\r\n message_to_print = \"**SERVER STATUS**\\n\"\r\n new_states = get_json_data(fetch_new_json())\r\n old_states = get_json_data(fetch_old_json())\r\n changed = False\r\n for index in range(len(new_states)):\r\n if new_states[index][\"state\"] != old_states[index][\"state\"]:\r\n message_to_print = message_to_print + new_states[index][\"name\"] + \" had gone from \" + state[old_states[index][\"state\"]] + \" to \" + state[new_states[index][\"state\"]] + \"\\n\"\r\n changed = True\r\n if changed:\r\n update_json(fetch_new_json())\r\n send_alert_discord(message_to_print)\r\n\r\n\r\ndef send_alert_discord(message: str):\r\n web_hook = DiscordWebhook(url=discord_api[\"web_hook\"], content=discord_api[\"mention\"])\r\n embed = DiscordEmbed(title='Azu chan\\'s news!', description=message, color=1699843)\r\n # embed.set_image(url='https://cdn.discordapp.com/emojis/554398006045311014.png')\r\n embed.set_footer(text='Time now')\r\n embed.set_timestamp()\r\n web_hook.add_embed(embed)\r\n web_hook.execute()\r\n\r\n\r\nstate_change_check()\r\n","sub_path":"ALServer.py","file_name":"ALServer.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"289942118","text":"import os\nimport discord\nfrom discord.ext import commands\nimport asyncio\nimport sys\nimport datetime\nimport os\nfrom dotenv import load_dotenv\nimport sqlite3\n\nload_dotenv(\"config.env\")\nTOKEN = os.getenv(\"BOT_TOKEN\")\nGUILD = os.getenv(\"GUILD\")\n\nbot = commands.Bot(command_prefix = '.')\nbot.remove_command(\"help\")\n@bot.event\nasync def on_ready():\n #Select the guild the bot is connected to\n for guild in bot.guilds:\n if guild.name == GUILD:\n break\n print(f\"{bot.user} has connected to Discord!\")\n print(f\"{guild.name}(id: {guild.id})\")\n members = \"\\n - \".join([member.name for member in guild.members])\n print(f\"Guild members:\\n - {members}\")\n\n#Loads all of the cogs\n#removed unnecessary cogs\ninitial_extensions = [\"cogs.moderation\", \"cogs.imgcog\", \"cogs.errorhandler\"]\nif __name__ == \"__main__\":\n for extension in initial_extensions:\n try:\n bot.load_extension(extension)\n except Exception as e:\n print(f\"Failed to load extension {extension}\", file=sys.stderr)\n\n@bot.event \nasync def on_error(event, *args, **kwargs):\n with open(\"err.log\", \"a\") as f:\n if event == \"on_message\":\n f.write(f\"Unhandled message: {args[0]}\\n\")\n\nbot.run(TOKEN)\n","sub_path":"discordbot.py","file_name":"discordbot.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"531648133","text":"# Python client for the Nano Node API\n# Tested with Python 2.7.13 and Python 3.6.5\nimport glob\nimport os\nimport core_pb2 as Core\nimport socket\nimport sys\nimport struct\nimport time\nfrom google.protobuf.message import Message\nimport google.protobuf.descriptor_pb2\nfrom google.protobuf.json_format import MessageToJson, Parse\nfrom builtins import bytes\nfrom future.standard_library import install_aliases\ninstall_aliases()\nfrom urllib.parse import urlparse\nfrom threading import Lock\n\nproto_modules = [Core]\n\n# Dynamically import all proto-generated modules\nproto_files = glob.glob('**/*_pb2.py')\nfor proto_file in proto_files:\n basename = os.path.basename(proto_file)\n noext = basename[:-3]\n\n # Already imported\n if noext != \"core_pb2\":\n proto_mod = basename[:-7].capitalize()\n imported = __import__(noext)\n proto_modules.append(imported) \n\nclass Session:\n \"\"\"A node session \"\"\"\n def __init__(self, conn):\n \"\"\"Initialize client with an IPC connection. The connection object must provide send_request method\"\"\"\n self.conn = conn\n self.lock = Lock()\n\n # Extract request types\n #self.query_types = [v for v in vars(Core).values() if isinstance(v, type) and issubclass(v, Message) and v.__name__.startswith(\"req_\")]\n #for clazz in self.query_types:\n # print (clazz.__name__)\n # print (clazz.DESCRIPTOR.fields_by_name.keys())\n # print ([f.type for f in clazz.DESCRIPTOR.fields])\n\n def getattr_multi(self, objarray, symbol):\n \"\"\"Returns the attribute 'symbol' in any of the objects in objarray, or None if not found\"\"\"\n for obj in objarray:\n attr = getattr(obj, symbol, None)\n if attr is not None:\n return attr\n return None\n\n def request(self, req_object):\n self.lock.acquire()\n try:\n \"\"\"Send request\"\"\"\n # Strip off the 'req_' prefix\n undecorated = type(req_object).__name__[4:]\n\n # Get enum value and send req\n req_id = getattr(Core, undecorated.upper())\n error, res = self.conn.send_request(req_id, req_object)\n if error is None:\n # Create result object\n res_obj = self.getattr_multi(proto_modules, 'res_' + undecorated.lower())()\n res_obj.ParseFromString(res)\n return res_obj\n else:\n err_obj = Core.response()\n err_obj.ParseFromString(error)\n return err_obj\n finally:\n self.lock.release()\n\n def to_json(self, obj):\n \"\"\"Convert the protobuf object to JSON\"\"\"\n return MessageToJson(obj, preserving_proto_field_name=True, indent=4)\n\n def from_json(self, req_name, json):\n \"\"\"Create a req_ protobuf object and parse the supplied JSON into it\"\"\"\n msg = self.getattr_multi(proto_modules, 'req_' + req_name.lower())\n if msg != None:\n return Parse(json, msg())\n else:\n return None\n\n def error_response(self, code, message):\n \"\"\"Create an error response given an error code and message\"\"\"\n err_obj = Core.response()\n err_obj.error_code = code\n err_obj.error_message = message\n err_obj.error_category = \"generic\"\n return err_obj\n\n def error_response_from_exception(self, ex):\n \"\"\"Create an error response given an exception. The error code is set to 1.\"\"\"\n err_obj = Core.response()\n err_obj.error_code = 1\n err_obj.error_message = str(ex)\n err_obj.error_category = \"exception\"\n return err_obj\n\nclass SocketConnection:\n \"\"\"TCP or domain socket connection to the IPC server\"\"\"\n def __init__(self, address):\n \"\"\"Initialize socket with a tcp:/// or local:/// connection.\"\"\"\n self.address = address\n try:\n url = urlparse(self.address)\n if (url.scheme.lower() == \"local\"):\n self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self.sock.connect(url.path)\n elif (url.scheme.lower() == \"tcp\"):\n self.sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)\n self.sock.connect((url.path, url.port))\n else:\n raise ValueError('Invalid scheme', url.scheme)\n except socket.error as msg:\n print >>sys.stderr, msg\n\n def close (self):\n \"\"\"Close socket connection\"\"\"\n self.sock.close()\n\n def recvall(self, sock, count):\n buf = b''\n while count:\n newbuf = sock.recv(count)\n if not newbuf: return None\n buf += newbuf\n count -= len(newbuf)\n return buf\n\n def send_request(self, request_type, request):\n \"\"\"Serialize request and send via socket, deserialize and return result\"\"\"\n try:\n # Send preamble\n preamble = bytearray([ord('N'),0, Core.VERSION_MAJOR, Core.VERSION_MINOR])\n self.sock.sendall(preamble)\n\n # Request header\n header = Core.request()\n header.type = request_type\n str_header = header.SerializeToString()\n str_request = request.SerializeToString()\n\n # i%ds\" % (len(str_header),), len(str_header), str_header)\n packed_request = struct.pack(\">i%ds\" % (len(str_request),), len(str_request), str_request)\n self.sock.sendall(packed_heading)\n self.sock.sendall(packed_request)\n\n # Get preamble\n preamble = self.recvall(self.sock, 4)\n if chr(preamble[0]) != 'N':\n raise ValueError('Invalid preamble')\n if int(preamble[2]) > Core.VERSION_MAJOR:\n raise ValueError('Unsupport API version')\n\n # Get response header\n response_buf = self.recvall(self.sock, 4)\n header_len = struct.unpack('>i', response_buf[:4])[0]\n\n response_buf = self.recvall(self.sock, header_len)\n response = Core.response();\n response.ParseFromString(response_buf);\n\n if response.error_code == 0:\n # Length of response\n response_buf = bytes('','utf-8')\n while len(response_buf) < 4:\n response_buf += self.sock.recv(1)\n header_len = struct.unpack('>i', response_buf[:4])[0]\n\n # Response\n response_buf = bytes('','utf-8')\n while len(response_buf) < header_len:\n response_buf += self.sock.recv(1)\n return None, response_buf\n else:\n return response_buf, None\n\n except Exception as msg:\n response = Core.response()\n response.error_code = 1\n response.error_message = str(msg)\n return response.SerializeToString(), None\n","sub_path":"src/nanoapi.py","file_name":"nanoapi.py","file_ext":"py","file_size_in_byte":6988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"385861609","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='customerUser',\n fields=[\n ('cust_id', models.CharField(max_length=20, serialize=False, primary_key=True)),\n ('cust_name', models.CharField(max_length=50)),\n ('cust_mail', models.EmailField(max_length=254)),\n ],\n ),\n migrations.CreateModel(\n name='orderIN',\n fields=[\n ('order_id', models.IntegerField(serialize=False, primary_key=True)),\n ('order_price', models.FloatField()),\n ('cust_id', models.ForeignKey(to='table.customerUser')),\n ],\n ),\n migrations.CreateModel(\n name='productItem',\n fields=[\n ('prod_id', models.CharField(max_length=20, serialize=False, primary_key=True)),\n ('prod_name', models.CharField(max_length=50)),\n ('prod_price', models.FloatField()),\n ],\n ),\n migrations.AddField(\n model_name='orderin',\n name='prod_id',\n field=models.ForeignKey(to='table.productItem'),\n ),\n ]\n","sub_path":"table/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"3285782","text":"import matplotlib.pyplot as plt\nimport os\nimport csv\nimport numpy as np\nfrom matplotlib.widgets import Slider\n\nos.chdir('C:\\\\Users\\\\Dhruv\\\\Desktop')\nN = 10\n\nglobal x\nglobal t\n\ndef moving_average(x,N):\n return [sum(x[i:i+N])/N for i in range(len(x)-N+1)]\ndef change_N(val):\n global m_avg\n global x\n global t\n m_avg.remove()\n N = int(s_N.val)\n m_avg, = ax.plot(t[N-2:-1],moving_average(x,N),label = str(N),color='r')\n \n \nfig, ax = plt.subplots()\nplt.subplots_adjust(left=0.25, bottom=0.25)\n\naxcolor = 'lightgoldenrodyellow'\nax_N = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)\ns_N = Slider(ax_N,'N',2,499,valinit=1,valstep=1)\n\nwith open('yahooo.csv','r') as f:\n x = [float(row[0]) for row in csv.reader(f)]\n \nt = [i for i in range(len(x))]\n\nax.plot(t,x,label='orignal')\n#ax.plot(t[N-2:-1],moving_average(x,N),label = str(N))\n#N = N-5\nN = 2\nglobal m_avg\nm_avg, = ax.plot(t[N-2:-1],moving_average(x,N),label = str(N))\nax.legend(loc='upper left')\n\ns_N.on_changed(change_N)\nplt.show()\n \n \n \n","sub_path":"ts.py","file_name":"ts.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"614346575","text":"import numpy as np \r\nimport matplotlib.pyplot as plt \r\ns=str(input(\"Enter name of image = \"))\r\nimage=plt.imread(s)\r\nimg=np.asarray(image)\r\nf=open(\"data.dat\",\"w\")\r\nfor i in range(np.shape(img)[0]):\r\n for j in range(np.shape(img)[1]):\r\n np.savetxt(f,img[i][j])\r\nf.close()\r\nprint(np.shape(img))","sub_path":"blur/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"425231216","text":"#!/usr/bin/env python\n\"\"\"HTTP API logic that ties API call renderers with HTTP routes.\"\"\"\n\n\n\nimport json\nimport urllib2\n\n\n# pylint: disable=g-bad-import-order,unused-import\nfrom grr.gui import django_lib\n# pylint: enable=g-bad-import-order,unused-import\n\nfrom django import http\n\nfrom werkzeug import exceptions as werkzeug_exceptions\nfrom werkzeug import routing\n\nimport logging\n\nfrom grr.gui import api_call_renderers\nfrom grr.gui import api_plugins\nfrom grr.gui import http_routing\nfrom grr.lib import access_control\nfrom grr.lib import rdfvalue\nfrom grr.lib import registry\nfrom grr.lib import utils\n\n\ndef BuildToken(request, execution_time):\n \"\"\"Build an ACLToken from the request.\"\"\"\n\n if request.method == \"GET\":\n reason = request.GET.get(\"reason\", \"\")\n elif request.method == \"POST\":\n # The header X-GRR-REASON is set in api-service.js, which django converts to\n # HTTP_X_GRR_REASON.\n reason = utils.SmartUnicode(urllib2.unquote(\n request.META.get(\"HTTP_X_GRR_REASON\", \"\")))\n\n token = access_control.ACLToken(\n username=request.user,\n reason=reason,\n process=\"GRRAdminUI\",\n expiry=rdfvalue.RDFDatetime().Now() + execution_time)\n\n for field in [\"REMOTE_ADDR\", \"HTTP_X_FORWARDED_FOR\"]:\n remote_addr = request.META.get(field, \"\")\n if remote_addr:\n token.source_ips.append(remote_addr)\n return token\n\n\ndef StripTypeInfo(rendered_data):\n \"\"\"Strips type information from rendered data. Useful for debugging.\"\"\"\n\n if isinstance(rendered_data, (list, tuple)):\n return [StripTypeInfo(d) for d in rendered_data]\n elif isinstance(rendered_data, dict):\n if \"value\" in rendered_data:\n return StripTypeInfo(rendered_data[\"value\"])\n else:\n result = {}\n for k, v in rendered_data.items():\n result[k] = StripTypeInfo(v)\n return result\n else:\n return rendered_data\n\n\ndef RegisterHttpRouteHandler(method, route, renderer_cls):\n \"\"\"Registers given ApiCallRenderer for given method and route.\"\"\"\n http_routing.HTTP_ROUTING_MAP.add(routing.Rule(\n route, methods=[method],\n endpoint=renderer_cls))\n\n\ndef GetRendererForHttpRequest(request):\n \"\"\"Returns a renderer to handle given HTTP request.\"\"\"\n\n matcher = http_routing.HTTP_ROUTING_MAP.bind(\n \"%s:%s\" % (request.environ[\"SERVER_NAME\"],\n request.environ[\"SERVER_PORT\"]))\n try:\n match = matcher.match(request.path, request.method)\n except werkzeug_exceptions.NotFound:\n raise api_call_renderers.ApiCallRendererNotFoundError(\n \"No API renderer was found for (%s) %s\" % (request.path,\n request.method))\n\n renderer_cls, route_args = match\n return (renderer_cls(), route_args)\n\n\ndef FillAdditionalArgsFromRequest(request, supported_types):\n \"\"\"Creates arguments objects from a given request dictionary.\"\"\"\n\n results = {}\n for key, value in request.items():\n try:\n request_arg_type, request_attr = key.split(\".\", 1)\n except ValueError:\n continue\n\n arg_class = None\n for key, supported_type in supported_types.items():\n if key == request_arg_type:\n arg_class = supported_type\n\n if arg_class:\n if request_arg_type not in results:\n results[request_arg_type] = arg_class()\n\n results[request_arg_type].Set(request_attr, value)\n\n results_list = []\n for name, arg_obj in results.items():\n additional_args = api_call_renderers.ApiCallAdditionalArgs(\n name=name, type=supported_types[name].__name__)\n additional_args.args = arg_obj\n results_list.append(additional_args)\n\n return results_list\n\n\nclass JSONEncoderWithRDFPrimitivesSupport(json.JSONEncoder):\n \"\"\"Custom JSON encoder that encodes renderers output.\n\n Custom encoder is required to facilitate usage of primitive values -\n booleans, integers and strings - in renderers responses.\n\n If renderer references an RDFString, RDFInteger or and RDFBOol when building a\n response, it will lead to JSON encoding failure when response encoded,\n unless this custom encoder is used. Another way to solve this issue would be\n to explicitly call api_value_renderers.RenderValue on every value returned\n from the renderer, but it will make the code look overly verbose and dirty.\n \"\"\"\n\n def default(self, obj):\n if isinstance(obj, (rdfvalue.RDFInteger,\n rdfvalue.RDFBool,\n rdfvalue.RDFString)):\n return obj.SerializeToDataStore()\n\n return json.JSONEncoder.default(self, obj)\n\n\ndef BuildResponse(status, rendered_data):\n \"\"\"Builds HTTPResponse object from rendered data and HTTP status.\"\"\"\n response = http.HttpResponse(status=status,\n content_type=\"application/json; charset=utf-8\")\n response[\"Content-Disposition\"] = \"attachment; filename=response.json\"\n response[\"X-Content-Type-Options\"] = \"nosniff\"\n\n response.write(\")]}'\\n\") # XSSI protection\n\n # To avoid IE content sniffing problems, escape the tags. Otherwise somebody\n # may send a link with malicious payload that will be opened in IE (which\n # does content sniffing and doesn't respect Content-Disposition header) and\n # IE will treat the document as html and executre arbitrary JS that was\n # passed with the payload.\n str_data = json.dumps(rendered_data, cls=JSONEncoderWithRDFPrimitivesSupport)\n response.write(str_data.replace(\"<\", r\"\\u003c\").replace(\">\", r\"\\u003e\"))\n\n return response\n\n\ndef RenderHttpResponse(request):\n \"\"\"Handles given HTTP request with one of the available API renderers.\"\"\"\n\n renderer, route_args = GetRendererForHttpRequest(request)\n\n strip_type_info = False\n\n if request.method == \"GET\":\n if request.GET.get(\"strip_type_info\", \"\"):\n strip_type_info = True\n\n if renderer.args_type:\n unprocessed_request = request.GET\n if hasattr(unprocessed_request, \"dict\"):\n unprocessed_request = unprocessed_request.dict()\n\n args = renderer.args_type()\n for type_info in args.type_infos:\n if type_info.name in route_args:\n args.Set(type_info.name, route_args[type_info.name])\n elif type_info.name in unprocessed_request:\n args.Set(type_info.name, unprocessed_request[type_info.name])\n\n if renderer.additional_args_types:\n if not hasattr(args, \"additional_args\"):\n raise RuntimeError(\"Renderer %s defines additional arguments types \"\n \"but its arguments object does not have \"\n \"'additional_args' field.\" % renderer)\n\n if hasattr(renderer.additional_args_types, \"__call__\"):\n additional_args_types = renderer.additional_args_types()\n else:\n additional_args_types = renderer.additional_args_types\n\n args.additional_args = FillAdditionalArgsFromRequest(\n unprocessed_request, additional_args_types)\n\n else:\n args = None\n elif request.method == \"POST\":\n try:\n args = renderer.args_type()\n for type_info in args.type_infos:\n if type_info.name in route_args:\n args.Set(type_info.name, route_args[type_info.name])\n\n if request.META[\"CONTENT_TYPE\"].startswith(\"multipart/form-data;\"):\n payload = json.loads(request.POST[\"_params_\"])\n args.FromDict(payload)\n\n for name, fd in request.FILES.items():\n args.Set(name, fd.read())\n else:\n payload = json.loads(request.body)\n if payload:\n args.FromDict(payload)\n except Exception as e: # pylint: disable=broad-except\n logging.exception(\n \"Error while parsing POST request %s (%s): %s\",\n request.path, request.method, e)\n\n return BuildResponse(500, dict(message=str(e)))\n else:\n raise RuntimeError(\"Unsupported method: %s.\" % request.method)\n\n token = BuildToken(request, renderer.max_execution_time)\n\n try:\n rendered_data = api_call_renderers.HandleApiCall(renderer, args,\n token=token)\n\n if strip_type_info:\n rendered_data = StripTypeInfo(rendered_data)\n\n return BuildResponse(200, rendered_data)\n except access_control.UnauthorizedAccess as e:\n logging.exception(\n \"Access denied to %s (%s) with %s: %s\", request.path,\n request.method, renderer.__class__.__name__, e)\n\n return BuildResponse(403, dict(message=\"Access denied by ACL\"))\n except Exception as e: # pylint: disable=broad-except\n logging.exception(\n \"Error while processing %s (%s) with %s: %s\", request.path,\n request.method, renderer.__class__.__name__, e)\n\n return BuildResponse(500, dict(message=str(e)))\n\n\nclass HttpApiInitHook(registry.InitHook):\n \"\"\"Register HTTP API renderers.\"\"\"\n\n def RunOnce(self):\n # The list is alphabetized by route.\n RegisterHttpRouteHandler(\"GET\", \"/api/aff4/\",\n api_plugins.aff4.ApiAff4Renderer)\n RegisterHttpRouteHandler(\"GET\", \"/api/aff4-index/\",\n api_plugins.aff4.ApiAff4IndexRenderer)\n\n RegisterHttpRouteHandler(\"GET\", \"/api/artifacts\",\n api_plugins.artifact.ApiArtifactsRenderer)\n RegisterHttpRouteHandler(\"POST\", \"/api/artifacts/upload\",\n api_plugins.artifact.ApiArtifactsUploadRenderer)\n RegisterHttpRouteHandler(\"POST\", \"/api/artifacts/delete\",\n api_plugins.artifact.ApiArtifactsDeleteRenderer)\n\n RegisterHttpRouteHandler(\"GET\", \"/api/clients/kb-fields\",\n api_plugins.client.ApiListKbFieldsRenderer)\n RegisterHttpRouteHandler(\"GET\", \"/api/clients\",\n api_plugins.client.ApiClientSearchRenderer)\n RegisterHttpRouteHandler(\"GET\", \"/api/clients/\",\n api_plugins.client.ApiClientSummaryRenderer)\n RegisterHttpRouteHandler(\"GET\", \"/api/clients/labels\",\n api_plugins.client.ApiClientsLabelsListRenderer)\n RegisterHttpRouteHandler(\"POST\", \"/api/clients/labels/add\",\n api_plugins.client.ApiClientsAddLabelsRenderer)\n RegisterHttpRouteHandler(\"POST\", \"/api/clients/labels/remove\",\n api_plugins.client.ApiClientsRemoveLabelsRenderer)\n\n RegisterHttpRouteHandler(\"GET\", \"/api/config\",\n api_plugins.config.ApiConfigRenderer)\n RegisterHttpRouteHandler(\"GET\", \"/api/config/\",\n api_plugins.config.ApiConfigOptionRenderer)\n\n RegisterHttpRouteHandler(\"GET\", \"/api/docs\",\n api_plugins.docs.ApiDocsRenderer)\n\n RegisterHttpRouteHandler(\"GET\", \"/api/flows///status\",\n api_plugins.flow.ApiFlowStatusRenderer)\n RegisterHttpRouteHandler(\"GET\", \"/api/flows/descriptors\",\n api_plugins.flow.ApiFlowDescriptorsListRenderer)\n RegisterHttpRouteHandler(\n \"GET\", \"/api/clients//flows//results\",\n api_plugins.flow.ApiFlowResultsRenderer)\n RegisterHttpRouteHandler(\n \"GET\", \"/api/clients//flows//output-plugins\",\n api_plugins.flow.ApiFlowOutputPluginsRenderer)\n RegisterHttpRouteHandler(\"POST\",\n \"/api/clients//flows/remotegetfile\",\n api_plugins.flow.ApiRemoteGetFileRenderer)\n RegisterHttpRouteHandler(\"POST\", \"/api/clients//flows/start\",\n api_plugins.flow.ApiStartFlowRenderer)\n RegisterHttpRouteHandler(\n \"POST\",\n \"/api/clients//flows//results/archive-files\",\n api_plugins.flow.ApiFlowArchiveFilesRenderer)\n\n RegisterHttpRouteHandler(\n \"GET\", \"/api/output-plugins/all\",\n api_plugins.output_plugin.ApiOutputPluginsListRenderer)\n\n RegisterHttpRouteHandler(\"GET\", \"/api/hunts\",\n api_plugins.hunt.ApiHuntsListRenderer)\n RegisterHttpRouteHandler(\"GET\", \"/api/hunts/\",\n api_plugins.hunt.ApiHuntSummaryRenderer)\n RegisterHttpRouteHandler(\"GET\", \"/api/hunts//errors\",\n api_plugins.hunt.ApiHuntErrorsRenderer)\n RegisterHttpRouteHandler(\"GET\", \"/api/hunts//log\",\n api_plugins.hunt.ApiHuntLogRenderer)\n RegisterHttpRouteHandler(\"GET\", \"/api/hunts//results\",\n api_plugins.hunt.ApiHuntResultsRenderer)\n RegisterHttpRouteHandler(\"GET\", \"/api/hunts//output-plugins\",\n api_plugins.hunt.ApiHuntOutputPluginsRenderer)\n RegisterHttpRouteHandler(\"POST\", \"/api/hunts/create\",\n api_plugins.hunt.ApiCreateHuntRenderer)\n RegisterHttpRouteHandler(\"POST\",\n \"/api/hunts//results/archive-files\",\n api_plugins.hunt.ApiHuntArchiveFilesRenderer)\n\n RegisterHttpRouteHandler(\n \"GET\", \"/api/reflection/aff4/attributes\",\n api_plugins.reflection.ApiAff4AttributesReflectionRenderer)\n RegisterHttpRouteHandler(\n \"GET\", \"/api/reflection/rdfvalue/\",\n api_plugins.reflection.ApiRDFValueReflectionRenderer)\n RegisterHttpRouteHandler(\n \"GET\", \"/api/reflection/rdfvalue/all\",\n api_plugins.reflection.ApiAllRDFValuesReflectionRenderer)\n\n RegisterHttpRouteHandler(\n \"GET\", \"/api/stats/store//metadata\",\n api_plugins.stats.ApiStatsStoreMetricsMetadataRenderer)\n RegisterHttpRouteHandler(\n \"GET\", \"/api/stats/store//metrics/\",\n api_plugins.stats.ApiStatsStoreMetricRenderer)\n\n RegisterHttpRouteHandler(\"GET\", \"/api/users/me/approvals/\",\n api_plugins.user.ApiUserApprovalsListRenderer)\n RegisterHttpRouteHandler(\"GET\", \"/api/users/me/settings\",\n api_plugins.user.ApiUserSettingsRenderer)\n RegisterHttpRouteHandler(\"POST\", \"/api/users/me/settings\",\n api_plugins.user.ApiSetUserSettingsRenderer)\n","sub_path":"gui/http_api.py","file_name":"http_api.py","file_ext":"py","file_size_in_byte":14107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"27023490","text":"class Solution(object):\n def searchRange(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n if (target not in nums):\n return [-1,-1]\n a = nums.index(target)\n b = a\n while ((b 0\n\t\texcept:\n\t\t\terrors = []\n\t\t\terrors.append('\\344\\270\\212\\344\\274\\240\\346\\226\\207\\344\\273\\266\\351\\224\\231\\350\\257\\257')\n\t\t\trender_dict['errors'] = errors\n\t\t\treturn render_template('test_demo.html', **render_dict)\n\t\tfilename = str(random.random())\n\t\tmedia_name = os.path.join(app.config['UPLOAD_DIR'],filename)\n\t\tf.save(media_name)\n\t\trender_dict['test_demo_result'] = 20\n\treturn render_template('test_demo.html', **render_dict)\n","sub_path":"app/views/test_demo.py","file_name":"test_demo.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"591011172","text":"#!/usr/bin/env python\n\n# This file is part of Empirical, https://github.com/devosoft/Empirical\n# Copyright (C) Michigan State University, 2016.\n# Released under the MIT Software license; see doc/LICENSE\n\n\nfrom __future__ import print_function\nimport sys\nfrom collections import defaultdict\n\n\"\"\"\nSimple file to generate a level dependency map for the project.\n\"\"\"\n\nimport os, glob, argparse\n\nclass argstruct(object):\n \"\"\"really stupid object to pass in arguments from an interpreter\"\"\"\n pass\n\nclass DependencyNode(object):\n \"\"\"Simple object to build a dependency tree\"\"\"\n\n def __init__(self, path):\n self.name = os.path.basename(path)\n self.path = path\n self.dependencies = defaultdict(DependencyNode)\n self.depth = 0\n\n def __str__(self):\n return self.name\n\n def add_dep(self, node):\n # nodes are identified by their normalized path\n self.dependencies[node.path] = node\n\n if node.depth + 1 > self.depth:\n self.depth = node.depth + 1\n\n return self.depth\n\nclass DependencyTree(object):\n \"\"\"Simple object to hold a dependency tree.\"\"\"\n\n def __init__(self):\n # again, nodes are indexed by normalized path\n self.nodes = defaultdict(DependencyNode)\n\n def add_node(self, node):\n self.nodes[node.path] = node\n\n\ndef file_to_tree(filepath, tree, args):\n \"\"\"Takes a file and adds it (and its dependencies) to the tree\"\"\"\n with open(filepath, 'r') as currfile:\n\n thisnode = DependencyNode(os.path.normpath(filepath))\n if args.verbose:\n print(\"INFO: Created node {}\".format(thisnode.name))\n\n for line in currfile.readlines():\n # grab dependencies\n if line.startswith('#include \"'):\n dep = line.replace('#include \"', \"\")\n dep = dep.replace('\"', \"\")\n dep = dep.strip()\n\n if args.verbose:\n print(\"INFO: Stepping down to {}\".format(os.path.dirname(currfile.name) + '/' + dep),\n file=sys.stderr)\n\n childnode = file_to_tree(os.path.dirname(currfile.name) + '/' + dep, tree, args)\n thisnode.add_dep(childnode)\n if childnode.depth + 1 > thisnode.depth:\n thisnode.depth = childnode.depth + 1\n\n\n tree.add_node(thisnode)\n return thisnode\n\ndef build_tree_from_dir(dirpath, args):\n \"\"\"Takes in a directory path and arguments, spits out a tree.\"\"\"\n\n tree = DependencyTree()\n\n # get initial list of files\n target = dirpath + \"*.\" + args.extension\n\n if args.verbose:\n print(\"Initial search path is: {}\".format(target), file=sys.stderr)\n\n tlist = glob.glob(target)\n\n for sfile in tlist:\n if args.verbose:\n print(\"INFO: Processing {}\".format(sfile), file=sys.stderr)\n\n file_to_tree(sfile, tree, args)\n\n return tree\n\ndef tree_to_dep_map(tree):\n \"\"\"converts a dependency tree into a dict: dep_level -> nodes\"\"\"\n dep_map = defaultdict(set)\n for path in tree.nodes:\n dep_map[tree.nodes[path].depth].add(tree.nodes[path])\n\n return dep_map\n\ndef print_dep_map_for_dir(dirpath, args):\n \"\"\"Takes in a dependency map and prints it to stdout\"\"\"\n dep_map = tree_to_dep_map(build_tree_from_dir(dirpath, args))\n\n for key in dep_map:\n if key != 0:\n print(\"\\n\\n== Level \" + str(key) + \" ==\")\n for node in dep_map[key]:\n print(\"\\n\\t\", str(node), \"depends:\\n\\t\\t\", end=\"\")\n chars = 24 # tabs make for great consistency\n for dep in node.dependencies:\n chars += len(str(node.dependencies[dep])) + 1\n if(chars > 80):\n print(\"\\n\\t\\t\", end=\"\")\n chars = 16 + len(str(dep))\n print(str(node.dependencies[dep]) + \" \", end=\" \")\n else:\n print(str(node.dependencies[dep]) + \", \", end=\"\")\n else:\n print(\"== Level 0 (no dependencies) ==\")\n for node in dep_map[key]:\n print(\"\\t{}\".format(str(node)))\n\ndef get_parser():\n \"\"\"\n Constructs the parser for when this script is called from the command line\n \"\"\"\n\n parser = argparse.ArgumentParser(\n description='Script to build a dependency map for a directory',\n epilog='Example: ./level_mapper.py ../tools/ h')\n\n parser.add_argument('directory_path', help=\"Path of the directory to make\"\n \" a tree for (ending slash required)\")\n parser.add_argument('extension', help=\"Extension to glob (no dot)\",\n default=\"h\")\n parser.add_argument('-v', '--verbose', help=\"Turn on debugging info\",\n action='store_true')\n\n return parser\n\ndef main():\n args = get_parser().parse_args()\n print_dep_map_for_dir(args.directory_path, args)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/level_mapper.py","file_name":"level_mapper.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"232964531","text":"from django.conf.urls import include\nfrom django.urls import path, re_path\n\nfrom grandchallenge.groups.views import UserAutocomplete\nfrom grandchallenge.profiles.forms import SignupFormExtra\nfrom grandchallenge.profiles.views import (\n PreSocialView,\n UserProfileDetail,\n login_redirect,\n profile,\n profile_edit,\n profile_edit_redirect,\n signin,\n signup,\n signup_complete,\n)\n\nurlpatterns = [\n path(\n \"signup/\",\n signup,\n {\"signup_form\": SignupFormExtra},\n name=\"profile_signup\",\n ),\n path(\"signup-social/\", PreSocialView.as_view(), name=\"pre-social\"),\n path(\"signin/\", signin, name=\"profile_signin\"),\n path(\"signup_complete/\", signup_complete, name=\"profile_signup_complete\"),\n path(\"login-redirect/\", login_redirect, name=\"login_redirect\"),\n path(\"profile/edit/\", profile_edit_redirect, name=\"profile_redirect_edit\"),\n path(\"profile/\", profile, name=\"profile_redirect\"),\n path(\n \"user-autocomplete/\",\n UserAutocomplete.as_view(),\n name=\"users-autocomplete\",\n ),\n re_path(\n r\"^(?P[\\@\\.\\+\\w-]+)/edit/$\",\n profile_edit,\n name=\"userena_profile_edit\",\n ),\n re_path(\n r\"^(?P(?!(signout|signup|signin)/)[\\@\\.\\+\\w-]+)/$\",\n UserProfileDetail.as_view(),\n name=\"userena_profile_detail\",\n ),\n path(\"\", include(\"userena.urls\")),\n]\n","sub_path":"app/grandchallenge/profiles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"279206663","text":"\"\"\"\ngmm -> NN\n\"\"\"\n\nimport torch\nimport sys\nimport torch.nn as nn\n\nsys.path.insert(0, '/home/ghost/PycharmProjects/bayesian_prolo')\nfrom scheduling_env.alpha_div import AlphaLoss\nimport numpy as np\nfrom scheduling_env.argument_parser import Logger\nimport pickle\nfrom torch.autograd import Variable\nfrom utils.global_utils import save_pickle\nfrom sklearn import mixture\nfrom utils.pairwise_utils import create_new_data, create_sets_of_20_from_x_for_pairwise_comparisions, find_which_schedule_this_belongs_to, save_performance_results\nfrom utils.naive_utils import create_new_dataset\nfrom scheduling_env.generate_results_of_hypothesis.pairwise.train_autoencoder import Autoencoder\nimport itertools\n\nsys.path.insert(0, '../')\n\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\ntorch.manual_seed(0)\nnp.random.seed(0) \n\n\nclass GmmSmall(nn.Module):\n \"\"\"\n gmm with a small number of parameters\n \"\"\"\n\n def __init__(self):\n super(GmmSmall, self).__init__()\n self.fc1 = nn.Linear(16, 32)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Linear(32, 32)\n self.relu2 = nn.ReLU()\n self.fc21 = nn.Linear(32, 32)\n self.relu21 = nn.ReLU()\n self.fc22 = nn.Linear(32, 32)\n self.relu22 = nn.ReLU()\n self.fc3 = nn.Linear(32, 2)\n self.soft = nn.Softmax()\n\n def forward(self, x):\n \"\"\"\n forward pass\n :param x: i_minus_j or vice versa\n :return:\n \"\"\"\n x = self.fc1(x)\n x = self.relu1(x)\n x = self.fc2(x)\n x = self.relu2(x)\n x = self.fc21(x)\n x = self.relu21(x)\n x = self.fc22(x)\n x = self.relu22(x)\n x = self.fc3(x)\n x = self.soft(x)\n\n return x\n\nclass GmmNNTrain():\n def __init__(self, num_schedules):\n self.arguments = Logger()\n self.alpha = .9\n self.num_schedules = num_schedules \n self.home_dir = self.arguments.home_dir\n\n load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str(\n self.num_schedules) + 'high_low_hetero_deadline_pairwise.pkl'\n\n self.data = pickle.load(open(load_directory, \"rb\"))\n self.X, self.Y, self.schedule_array = create_new_data(self.num_schedules, self.data)\n self.start_of_each_set_twenty = create_sets_of_20_from_x_for_pairwise_comparisions(self.X)\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.model = GmmSmall().to(device)\n\n print(self.model.state_dict())\n self.opt = torch.optim.SGD(self.model.parameters(), lr=.0001, weight_decay=.1) # TODO: tune weight decay\n\n schedule_matrix_load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/' + str(self.num_schedules) + 'matrices.pkl'\n self.matrices = pickle.load(open(schedule_matrix_load_directory, \"rb\"))\n\n self.gmm, self.label = self.cluster_matrices(self.matrices, self.num_schedules)\n\n @staticmethod\n def cluster_matrices(matrices, num_schedules):\n # vectorize each matrix\n vectorized_set = []\n for i in matrices:\n vectorized = i.reshape(20 * 2048, 1)\n vectorized_set.append(vectorized)\n gmm = mixture.GaussianMixture(n_components=3, covariance_type='full',random_state=0) # random state makes it deterministic\n # Fitting the input data\n new_set = np.hstack(tuple(vectorized_set)).reshape(num_schedules, 20 * 2048)\n gmm_model = gmm.fit(np.asarray(new_set))\n labels = gmm_model.predict_proba(np.asarray(new_set))\n return gmm_model, labels\n\n # noinspection PyArgumentList\n def train(self):\n \"\"\"\n Trains NN.\n Randomly samples a schedule and timestep within that schedule, produces training data using x_i - x_j\n and trains upon that.\n :return:\n \"\"\"\n\n total_iterations = 0\n convergence_epsilon = .01\n when_to_save = 1000\n distribution_epsilon = .0001\n training_done = False\n total_loss_array = []\n\n loss_func = AlphaLoss(.9)\n\n # variables to keep track of loss and number of tasks trained over\n\n while not training_done:\n # sample a timestep before the cutoff for cross_validation\n set_of_twenty = np.random.choice(self.start_of_each_set_twenty)\n which_schedule = find_which_schedule_this_belongs_to(self.schedule_array, set_of_twenty)\n\n # get actual task scheduled\n truth = self.Y[set_of_twenty]\n\n # choose cluster based on value produced by gmm\n augment_proba = self.label[which_schedule]\n\n # find feature vector of true action taken\n phi_i_num = truth + set_of_twenty\n phi_i = self.X[phi_i_num]\n phi_i_numpy = np.asarray(phi_i)\n running_loss_predict_tasks = 0\n num_iterations_predict_task = 0\n # iterate over pairwise comparisons\n for counter in range(set_of_twenty, set_of_twenty + 20):\n if counter == phi_i_num: # if counter == phi_i_num:\n continue\n else:\n phi_j = self.X[counter]\n phi_j_numpy = np.asarray(phi_j)\n feature_input = phi_i_numpy - phi_j_numpy\n\n if torch.cuda.is_available():\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda())\n feature_input = torch.cat(feature_input, torch.Tensor(augment_proba))\n P = Variable(torch.Tensor([1 - distribution_epsilon, distribution_epsilon]).cuda())\n else:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)))\n P = Variable(torch.Tensor([1 - distribution_epsilon, distribution_epsilon]))\n\n output = self.model.forward(feature_input)\n\n if torch.isnan(output[0][0]).item() == 1:\n print('hi')\n self.opt.zero_grad()\n loss = loss_func.forward(P, output)\n\n if torch.isnan(loss):\n print(self.alpha, ' :nan occurred at iteration ', total_iterations, ' at', num_iterations_predict_task)\n\n if loss.item() < .001 or loss.item() > 50:\n pass\n else:\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)\n self.opt.step()\n running_loss_predict_tasks += loss.item()\n num_iterations_predict_task += 1\n\n # second loop\n for counter in range(set_of_twenty, set_of_twenty + 20):\n if counter == phi_i_num:\n continue\n else:\n phi_j = self.X[counter]\n phi_j_numpy = np.asarray(phi_j)\n feature_input = phi_j_numpy - phi_i_numpy\n\n if torch.cuda.is_available():\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda())\n P = Variable(torch.Tensor([distribution_epsilon, 1 - distribution_epsilon]).cuda())\n else:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)))\n P = Variable(torch.Tensor([distribution_epsilon, 1 - distribution_epsilon]))\n\n output = self.model.forward(feature_input)\n if torch.isnan(output[0][0]).item() == 1:\n print('hi')\n self.opt.zero_grad()\n loss = loss_func.forward(P, output)\n\n if loss.item() < .001 or loss.item() > 50:\n pass\n else:\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)\n self.opt.step()\n\n running_loss_predict_tasks += loss.item()\n\n num_iterations_predict_task += 1\n\n total_loss_array.append(running_loss_predict_tasks / num_iterations_predict_task)\n\n total_iterations += 1\n\n if total_iterations % 50 == 49:\n print('total loss (average for each 40, averaged) at iteration ', total_iterations, ' is ', np.mean(total_loss_array[-40:]))\n\n if total_iterations % when_to_save == when_to_save - 1:\n self.save_trained_nets('gmm_nn_small' + str(self.num_schedules))\n\n if total_iterations > 5000 and np.mean(total_loss_array[-100:]) - np.mean(total_loss_array[-500:]) < convergence_epsilon:\n training_done = True\n\n # noinspection PyArgumentList\n def evaluate_on_test_data(self, models, schedules_trained_on):\n \"\"\"\n Evaluate performance of a trained network tuned upon the alpha divergence loss.\n Note this function is called after training convergence\n :return:\n \"\"\"\n num_schedules = 75\n # load in new data\n load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str(\n num_schedules) + 'test_high_low_hetero_deadline_pairwise.pkl'\n\n data = pickle.load(open(load_directory, \"rb\"))\n X, Y, schedule_array = create_new_data(num_schedules, data)\n\n ### take a side step and do some of the clustering stuff\n autoencoder_class = AutoEncoderTrain(num_schedules)\n autoencoder_class.model.load('/home/ghost/PycharmProjects/bayesian_prolo/saved_models/Autoencoder' + str(schedules_trained_on) + '.tar')\n autoencoder_class.compute_mean()\n autoencoder_class.create_iterables()\n\n autoencoder_class.round_each_encoding_and_create_array()\n autoencoder_class.populate_a_matrix_per_schedule()\n test_matrices = autoencoder_class.save_matrices()\n\n gmm_model, labels = self.cluster_matrices(test_matrices, num_schedules)\n\n prediction_accuracy = [0, 0]\n percentage_accuracy_top1 = []\n percentage_accuracy_top3 = []\n\n for j in range(0, num_schedules):\n schedule_bounds = schedule_array[j]\n step = schedule_bounds[0]\n self.model = models[labels[j]]\n\n while step < schedule_bounds[1]:\n probability_matrix = np.zeros((20, 20))\n\n for m, counter in enumerate(range(step, step + 20)):\n phi_i = X[counter]\n phi_i_numpy = np.asarray(phi_i)\n\n # for each set of twenty\n for n, second_counter in enumerate(range(step, step + 20)):\n # fill entire array with diagnols set to zero\n if second_counter == counter: # same as m = n\n continue\n phi_j = X[second_counter]\n phi_j_numpy = np.asarray(phi_j)\n\n feature_input = phi_i_numpy - phi_j_numpy\n\n if torch.cuda.is_available():\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda())\n\n else:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)))\n\n # push through nets\n preference_prob = self.model.forward(feature_input)\n probability_matrix[m][n] = preference_prob[0].data.detach()[\n 0].item() # TODO: you can do a check if only this line leads to the same thing as the line below\n # probability_matrix[n][m] = preference_prob[0].data.detach()[1].item()\n\n # Set of twenty is completed\n column_vec = np.sum(probability_matrix, axis=1)\n\n # top 1\n choice = np.argmax(column_vec)\n\n # top 3\n _, top_three = torch.topk(torch.Tensor(column_vec), 3)\n\n # Then do training update loop\n truth = Y[step]\n\n # index top 1\n if choice == truth:\n prediction_accuracy[0] += 1\n\n # index top 3\n if truth in top_three:\n prediction_accuracy[1] += 1\n\n # add average loss to array\n step += 20\n\n # schedule finished\n print('Prediction Accuracy: top1: ', prediction_accuracy[0] / 20, ' top3: ', prediction_accuracy[1] / 20)\n\n print('schedule num:', j)\n percentage_accuracy_top1.append(prediction_accuracy[0] / 20)\n percentage_accuracy_top3.append(prediction_accuracy[1] / 20)\n\n prediction_accuracy = [0, 0]\n save_performance_results(percentage_accuracy_top1, percentage_accuracy_top3, 'HIFI_LIFI_gmm_nn_small_pairwise' + str(schedules_trained_on) + '.pkl')\n\n def save_trained_nets(self, name):\n \"\"\"\n saves the model\n :return:\n \"\"\"\n torch.save({'nn_state_dict': self.model.state_dict()},\n '/home/ghost/PycharmProjects/bayesian_prolo/saved_models/pairwise_saved_models/gmm_nn_' + name + '.tar')\n\n\n# can also be used for gmm\nclass AutoEncoderTrain:\n \"\"\"\n create and train the autoencoder\n \"\"\"\n\n def __init__(self):\n\n self.num_schedules = 75\n load_directory = '/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/datasets/' + str(\n self.num_schedules) + 'test_high_low_hetero_deadline_pairwise.pkl'\n self.data = pickle.load(open(load_directory, \"rb\"))\n self.X, self.Y, self.schedule_array = create_new_dataset(num_schedules=self.num_schedules, data=self.data)\n for i, each_element in enumerate(self.X):\n self.X[i] = each_element + list(range(20))\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.model = Autoencoder().to(device)\n\n print(self.model.state_dict())\n self.opt = torch.optim.SGD(self.model.parameters(), lr=.0001)\n self.mean_embedding = None\n self.embedding_np = None\n self.matrices = None\n self.total_binary_embeddings = None\n self.states = None\n\n # noinspection PyArgumentList\n def compute_mean(self):\n \"\"\"\n computes the mean embedding by first computing all embeddings for every step of the schedule,\n adding them to a numpy array and computing the avg\n :return:\n \"\"\"\n # load_in_all_parameters(self.save_directory, self.auto_encoder)\n for i, data_row in enumerate(self.X):\n input_nn = data_row\n if torch.cuda.is_available():\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)).cuda())\n else:\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)))\n\n prediction_embedding = self.model.forward_only_encoding(input_nn)\n print(prediction_embedding)\n if i == 0:\n self.embedding_np = prediction_embedding.data.clone().cpu().numpy()[0]\n else:\n self.embedding_np = np.vstack((self.embedding_np, prediction_embedding.data.clone().cpu().numpy()[0]))\n self.mean_embedding = np.average(self.embedding_np, axis=0)\n print('mean embedding is ', self.mean_embedding)\n\n def create_iterables(self):\n \"\"\"\n adds all possible state combinations\n :return:\n \"\"\"\n iterables = [[0, 1], [0, 1], [0, 1],\n [0, 1], [0, 1], [0, 1],\n [0, 1], [0, 1], [0, 1],\n [0, 1], [0, 1]]\n self.states = []\n for t in itertools.product(*iterables):\n self.states.append(t)\n\n # noinspection PyArgumentList\n def round_each_encoding_and_create_array(self):\n \"\"\"\n rounds each encoding by comparing it to the mean, and then stacks these in an array\n :return:\n \"\"\"\n self.total_binary_embeddings = np.zeros((0))\n for counter, data_row in enumerate(self.X):\n input_nn = data_row\n if torch.cuda.is_available():\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)).cuda())\n else:\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)))\n\n prediction_embedding = self.model.forward_only_encoding(input_nn)\n\n embedding_copy = np.zeros((1, 11))\n\n for i, each_element in enumerate(self.mean_embedding):\n if each_element > prediction_embedding.data[0][i].item():\n embedding_copy[0][i] = 0\n else:\n embedding_copy[0][i] = 1\n\n if counter == 0:\n self.total_binary_embeddings = embedding_copy\n else:\n self.total_binary_embeddings = np.vstack((self.total_binary_embeddings, embedding_copy))\n\n # This should generate n schedules of binary data\n print('finished turning all elements of schedule into binary')\n\n def pass_in_embedding_out_state_ID(self, binary):\n \"\"\"\n pass in a binary embedding, and itll return the state id\n :param binary:\n :return:\n \"\"\"\n binary_as_tuple = tuple(binary)\n index = self.states.index(binary_as_tuple)\n return index\n\n def populate_a_matrix_per_schedule(self):\n \"\"\"\n creates matrices bases on the binary embeddings\n :return:\n \"\"\"\n self.matrices = []\n for i in range(self.num_schedules):\n m = np.zeros((2048, 20))\n self.matrices.append(m)\n for i, each_matrix in enumerate(self.matrices):\n # lets look at elements of schedule 1\n for j in range(self.schedule_array[i][0], self.schedule_array[i][1] + 1):\n binary_embedding = self.total_binary_embeddings[j]\n index = self.pass_in_embedding_out_state_ID(binary_embedding)\n # action taken at this instance\n action = self.Y[j]\n each_matrix[index][action] += 1\n total_sum = each_matrix.sum()\n self.matrices[i] = np.divide(each_matrix, total_sum)\n\n print('n matrices have been generated')\n\n\n def save_matrices(self):\n \"\"\"\n saves the matrices so these can be used to cluster in the gmm etc.\n :return:\n \"\"\"\n save_pickle('/home/ghost/PycharmProjects/bayesian_prolo/scheduling_env/', self.matrices, str(self.num_schedules) + 'test_matrices.pkl')\n return self.matrices\n\n\ndef main():\n \"\"\"\n entry point for file\n :return:\n \"\"\"\n for num_schedules in (3, 9, 15, 150, 1500):\n trainer = GmmNNTrain(num_schedules)\n trainer.train()\n trainer.evaluate_on_test_data(trainer.models, num_schedules)\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n","sub_path":"scheduling_env/generate_results_of_hypothesis/pairwise/gmm_small_pairwise.py","file_name":"gmm_small_pairwise.py","file_ext":"py","file_size_in_byte":19093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"604693721","text":"from conftest import build_site_url\nfrom environs import Env\nfrom pages.ondemand.admin.base.header import Header\nfrom pages.ondemand.common.navigation_tabs.tabs import Tabs\nfrom utilities import Page\nfrom utilities.constants.ondemand import Admin\n\n\nsut_env = Env()\nAGENCY: str = sut_env.str('AGENCY')\n\n\nclass Base(Page):\n \"\"\"Base Page objects and methods for the OnDemand Admin application.\"\"\"\n\n URL_PATH = build_site_url(app='ondemand', path=f'/admin/{AGENCY}')\n\n @property\n def header(self: Page) -> Header:\n return Header(self)\n\n @property\n def navigation(self: Page) -> Tabs:\n return Tabs(\n self,\n tabs=[tab for tab in Admin.NAVIGATION_TABS],\n selector='header-navigation-container',\n )\n","sub_path":"pages/ondemand/admin/base/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"301036176","text":"import tensorflow as tf\nimport numpy as np\nimport argparse\nimport operator\nimport pickle\nimport time\n\nfrom getBatch_sk import *\nfrom utils import *\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-M1', '--model1', type=str, help=\"First model\")\n parser.add_argument('-M2', '--model2', type=str, help=\"Second model\")\n parser.add_argument('-D', '--dataset', type=str, default=\"test\", help=\"Dataset to evaluate. 'train' or 'step'\")\n parser.add_argument('-P', '--print', type=str, default=\"N\", help=\"Print confusion matrix. 'Y' or 'N'\")\n parser.add_argument('-TS', '--twoStream', type=bool, default=True, help=\"TwoStream test (True) or Normal test (False)\")\n cfg = parser.parse_args()\n\n # this is used for Data and checkpoints\n # if /var/scratch/ exists, this computer is DAS4\n main_folder = ''\n if(os.path.isdir('/var/scratch/delariva/')):\n main_folder = '/var/scratch/delariva/'\n\n\n\n\n\n\n\n #/********************************************\n # * **************************************** *\n # * **************** GRAPH ***************** *\n # * **************************************** *\n # ********************************************/\n\n start_runtime = time.time()\n tf.set_random_seed(42)\n\n graph1 = tf.Graph()\n sess1 = tf.Session(graph=graph1)\n tf.saved_model.loader.load(sess1, [tf.saved_model.tag_constants.SERVING], (main_folder+\"checkpoints/%s\" % cfg.model1))\n #print([n.name for n in graph.as_graph_def().node])\n images_placeholder1 = graph1.get_tensor_by_name(\"images_placeholder:0\")\n labels_placeholder1 = graph1.get_tensor_by_name(\"labels_placeholder:0\")\n isTraining1 = graph1.get_tensor_by_name(\"isTraining:0\")\n learningRate1 = graph1.get_tensor_by_name(\"learningRate:0\")\n eval_correct1 = graph1.get_tensor_by_name(\"Equal:0\")\n argmax_logits1 = graph1.get_tensor_by_name(\"dense_2/BiasAdd:0\")\n argmax_labels1 = graph1.get_tensor_by_name(\"ArgMax_1:0\")\n\n\n with open(main_folder+\"checkpoints/%s.cfg\" % cfg.model1, 'rb') as f:\n model1_cfg = pickle.load(f)\n if not hasattr(model1_cfg, 'HEAT_THRESHOLD'): model1_cfg.HEAT_THRESHOLD = 0\n if not main_folder in model1_cfg.dataDirectory: model1_cfg.dataDirectory = main_folder + model1_cfg.dataDirectory\n\n\n\n if cfg.twoStream:\n graph2 = tf.Graph()\n sess2 = tf.Session(graph=graph2)\n tf.saved_model.loader.load(sess2, [tf.saved_model.tag_constants.SERVING], (main_folder+\"checkpoints/%s\" % cfg.model2))\n #print([n.name for n in graph.as_graph_def().node])\n images_placeholder2 = graph2.get_tensor_by_name(\"images_placeholder:0\")\n labels_placeholder2 = graph2.get_tensor_by_name(\"labels_placeholder:0\")\n isTraining2 = graph2.get_tensor_by_name(\"isTraining:0\")\n learningRate2 = graph2.get_tensor_by_name(\"learningRate:0\")\n eval_correct2 = graph2.get_tensor_by_name(\"Equal:0\")\n argmax_logits2 = graph2.get_tensor_by_name(\"dense_2/BiasAdd:0\")\n argmax_labels2 = graph2.get_tensor_by_name(\"ArgMax_1:0\")\n\n with open(main_folder+\"checkpoints/%s.cfg\" % cfg.model2, 'rb') as f:\n model2_cfg = pickle.load(f)\n if not hasattr(model2_cfg, 'HEAT_THRESHOLD'): model2_cfg.HEAT_THRESHOLD = 0\n if not main_folder in model2_cfg.dataDirectory: model2_cfg.dataDirectory = main_folder + model2_cfg.dataDirectory\n\n\n\n\n\n\n\n #/********************************************\n # * **************************************** *\n # * ************* EVALUATION *************** *\n # * **************************************** *\n # ********************************************/\n\n true_count = 0 # Counts the number of correct predictions.\n # Restart configuration\n model1_cfg.curr_action = 0\n model1_cfg.file_iterator = 0\n model1_cfg.frame_iterator = 0\n model1_cfg.startDataAugment = 0\n # Restart configuration\n '''model2_cfg.curr_action = 0\n model2_cfg.file_iterator = 0\n model2_cfg.frame_iterator = 0\n model2_cfg.startDataAugment = 0\n test_file_dictionary = 0#'''\n # Configure depending on dataset to validate\n if cfg.dataset==\"test\":\n accuracy_per_file_dictionary = {k:0 for k in model1_cfg.testfiles}\n total_file_dictionary = {k:0 for k in model1_cfg.testfiles}\n model1_cfg.curr_file = model1_cfg.testfiles[0]\n Steps = model1_cfg.testSteps\n Expls = model1_cfg.testExpls\n elif cfg.dataset==\"train\":\n accuracy_per_file_dictionary = {k:0 for k in model1_cfg.trainfiles}\n total_file_dictionary = {k:0 for k in model1_cfg.trainfiles}\n model1_cfg.curr_file = model1_cfg.trainfiles[0]\n Steps = model1_cfg.totalSteps\n Expls = model1_cfg.totalSteps\n # Create confusion matrix\n confusion_matrix = np.zeros(shape=(model1_cfg.n_classes, model1_cfg.n_classes))\n for i in range(Steps):\n if model1_cfg.dataAugment == 1 and i == int(Steps/2):\n model1_cfg.startDataAugment = 1\n model1_cfg.curr_action = 0\n model1_cfg.file_iterator = 0\n model1_cfg.frame_iterator = 0\n if cfg.dataset==\"test\": model1_cfg.curr_file = model1_cfg.testfiles[model1_cfg.file_iterator]\n elif cfg.dataset==\"train\": model1_cfg.curr_file = model1_cfg.trainfiles[model1_cfg.file_iterator]\n print('Without dataAugment: %d/%d = %0.04f' % (true_count, i, true_count/i))\n temp_curr_file = model1_cfg.curr_file\n\n x1,y1,_,_ = getNextBatch3DCNNEquallyDistancedFrames(model1_cfg, 0, cfg.dataset)\n feed_dict = {\n images_placeholder1: x1,\n labels_placeholder1: y1,\n isTraining1: False,\n learningRate1: 0,\n }\n _, out_argmax_logits1, out_argmax_labels1 = sess1.run([eval_correct1, argmax_logits1, argmax_labels1], feed_dict=feed_dict)\n\n\n if cfg.twoStream:\n #x2,y2,_,_ = getNextBatch3DCNNEquallyDistancedFrames(model2_cfg, 0, cfg.dataset)\n feed_dict = {\n images_placeholder2: x1,\n labels_placeholder2: y1,\n isTraining2: False,\n learningRate2: 0,\n }\n _, out_argmax_logits2, out_argmax_labels2 = sess2.run([eval_correct2, argmax_logits2, argmax_labels2], feed_dict=feed_dict)\n\n\n # Apply softmax\n out_argmax_logits1 = softmax(out_argmax_logits1)\n out_argmax_logits2 = softmax(out_argmax_logits2)\n if cfg.twoStream:\n out_argmax_logits = (out_argmax_logits1 + out_argmax_logits2) / 2\n if out_argmax_labels1 != out_argmax_labels2:\n print('Labels do not coincide!')\n print('step: '+i)\n print('file: '+cfg.actions[cfg.curr_action]+' '+cfg.curr_file)\n exit()\n else:\n out_argmax_logits = out_argmax_logits1\n out_argmax_logits = np.argmax(out_argmax_logits, 1)\n output = np.equal(out_argmax_logits, out_argmax_labels1)\n # Calculate accuracy\n true_count += np.sum(output)\n accuracy_per_file_dictionary[temp_curr_file] += np.sum(output)\n total_file_dictionary[temp_curr_file] += 1\n\n # Fill confusion matrix\n for i,j in zip(out_argmax_labels1, out_argmax_logits):\n confusion_matrix[i, j] += 1\n\n\n\n # Print results and store confusion matrix\n precision = float(true_count) / Expls\n print(' Precision @ 1: %d/%d = %0.04f' % (true_count, Expls, precision))\n print(' %s\\n' % (str(sorted(accuracy_per_file_dictionary.items(), key=operator.itemgetter(1)))))\n if cfg.P == 'Y':\n if cfg.twoStream: confusionMatrixFileName = cfg.model1+\"-\"+cfg.model2+\".pkl\"\n else: confusionMatrixFileName = cfg.model1+\".pkl\"\n with open(confusionMatrixFileName, \"wb\") as f:\n pickle.dump(accuracy_confusion_matrix, f)\n\n\n\n\n duration = time.time() - start_runtime\n hours = duration/60/60\n mins = (hours-int(hours))*60\n print('Total runtime: %.2f sec (%d h %d min)' % (duration, hours, mins))","sub_path":"twoStream.py","file_name":"twoStream.py","file_ext":"py","file_size_in_byte":8186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"512556266","text":"####################################################################################################\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -#\n# Welcome! you are viewing the source code of the GCU Engineering Shop Activity Monitor #\n# Clearly you are here because something isn't working, or because you want to know how it works #\n# #\n# I've used a functional programming style over a strictly OO method, and have used #\n# intuitive naming instead of comments but as always, #\n# If it ain't broke, feature-creep! #\n# #\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -#\n# #\n# Author: Daniel Hoven (github @Dhoven23/ShopMonitor_2.0), #\n# email: Daniel.Hoven@gcu.edu # #\n# #\n####################################################################################################\n\nfrom datetime import datetime, timedelta\nimport os\n\nimport weakref\nfrom tkinter import *\nfrom tkinter import ttk\nimport tkinter as tk\nimport sys\nimport Data.mongo_setup as mongo\nfrom Plotting.activity import plotins\nimport Service.admin_svc as asv\nimport Service.data_service as svc\nfrom Data.key import Key\nfrom Data import lookupID as LID\n\n\n\ndef isint(s): # universal check for integer\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n\ndef popup_message(text, tab): # universally callable message-display\n def destroy():\n pop.destroy()\n button.destroy()\n\n message = text\n pop = Text(tab, height=7, width=55)\n pop.insert(END, text)\n pop.grid(column=1,row=2)\n button = Button(tab, text=\"Acknowledge\", command=destroy)\n button.after(5000, button.destroy) # destroy popup message after 5 seconds\n pop.after(15000, pop.destroy)\n button.grid(column=1,row=3)\n\n\ndef popup_create_student(StudentID, window, master): # add student to mongo\n def student_create(event: object = None):\n name = prompt.get()\n for n in name:\n if n.isalnum() or (n == ' '):\n pass\n else:\n Label(pop,text='Special Characters Not allowed!',fg='red').grid(row=5,column=0)\n return\n ID = StudentID\n\n if isint(name) == False:\n student = svc.create_student(ID, str(name))\n svc.log_into_account(StudentID)\n message = \"Hello \" + name\n popup_message(message, window)\n pop.destroy()\n\n\n\n def delete_entry(event=None):\n prompt.delete(0, END)\n\n def Capstone():\n def insert_capstoneID():\n value = int(get_cell_equals(str(prompt.get())))\n CapstoneID.insert(END, value)\n def capstone_student_create(event: object = None):\n name = prompt.get()\n ID = StudentID\n\n if isint(name) == False:\n C_Number = CapstoneID.get()\n print(str(C_Number), str(name))\n student = svc.create_capstone_student(ID, str(name), str(C_Number))\n svc.log_into_account_capstone(StudentID)\n pop.destroy()\n\n from Service.Reports.CapstoneProjects import get_cell_equals\n\n CapstoneID = Entry(pop, width=35,borderwidth=2)\n CapstoneID.grid(row=3,column=0)\n Label(pop,text=\"Capstone\\nnumber\").grid(row=3,column=1)\n insert_capstoneID()\n prompt.bind('',insert_capstoneID)\n Bo = Button(pop,text='Create new\\nCapstone Student',command=capstone_student_create)\n Bo.grid(row=4,column=0)\n\n\n pop = Toplevel()\n x = master.winfo_x()\n y = master.winfo_y()\n v = IntVar()\n pop.geometry(\"+%d+%d\" % (x + 130, y + 70))\n pop.minsize(80, 50)\n prompt = Entry(pop, width=35, borderwidth=2)\n capstone = Button(pop, text='Capstone', command=Capstone).grid(row=2, column=1)\n prompt.bind('', delete_entry)\n\n prompt.bind('', student_create)\n\n pop.wm_title(\"New Student\")\n warning = Label(pop, text='Confirm name below')\n warning.grid(row=0, column=0)\n prompt.grid(row=2, column=0)\n\n\ndef main_login_student_operation(window, master):\n def login(*args, **kwargs):\n StudentID = entry.get()\n entry.delete(0, END)\n\n if not (((len(StudentID) == 8) | (len(StudentID) == 6)) and isint(StudentID)):\n return\n else:\n message, loggedIn = svc.log_into_account(StudentID)\n if message == f\"Add name for student #{StudentID}\":\n popup_create_student(str(StudentID), window, master)\n else:\n if loggedIn:\n popup_message(message, window)\n else:\n student = svc.find_student_by_studentID(StudentID)\n popup_message(message, window)\n\n def delete_entry(*args):\n entry.delete(0, END)\n\n instruction = Label(window, text=\"\\nEnter Student ID\\n\", font='Helvetica 16 bold', fg='purple4',bg='grey86')\n instruction.grid(column=1,row=0)\n entry = Entry(window, width=25, borderwidth=2, font='Helvetica 20')\n Label(window,text=' ',font='Arial 16 bold',bg='grey86').grid(column=0,row=1)\n entry.bind('', login)\n entry.bind('', delete_entry)\n entry.grid(column=1,row=1)\n #print(f'{round(time.clock(),4)}: - - - - - Login Functions Written')\n\ndef admin_duties(admin, tabStructure, master): # admin operation\n def whos_in_the_shop(*args):\n messages = asv.whos_in_the_shop()\n text.delete('1.0', END)\n\n if messages:\n for message in messages:\n text.insert(END, f\"-> {message}\\n\")\n\n return None\n\n def logout_all_users(event: object = None):\n Are_you_sure()\n\n def edit_training(*args2):\n\n def training(*args0):\n Student = asv.edit_training_level(prompt.get(), train.get())\n if Student:\n pop.destroy()\n\n def delete_name_entry(*args1):\n prompt.delete(0, END)\n\n def delete_train_entry(*args):\n train.delete(0, END)\n\n pop = Toplevel()\n x = master.winfo_x()\n y = master.winfo_y()\n\n pop.geometry(\"+%d+%d\" % (x + 200, y + 100))\n pop.minsize(80, 30)\n prompt = Entry(pop, width=35, borderwidth=2)\n prompt.insert(0, \"Enter Student name: \")\n prompt.bind('', delete_name_entry)\n\n train = Entry(pop, width=35, borderwidth=2)\n train.insert(0, \"Enter Key Number: \")\n train.bind('', delete_train_entry)\n\n train.bind('', training)\n\n pop.wm_title(\"Training\")\n\n prompt.grid(row=2, column=0)\n train.grid(row=3, column=0)\n proceed = Button(pop, text=\"Cancel\", fg='red', command=pop.destroy)\n proceed.grid(row=1, column=0)\n\n def delete_entry(event=None):\n DateField.delete(0, END)\n\n def tools_past_due(event=None):\n message = asv.PastDueTools()\n text.delete('1.0', END)\n if message:\n for mess in message:\n text.insert(END,mess)\n\n\n def get_date(event=None):\n\n date = DateField.get()\n messages = asv.who_was_in_the_shop(date)\n text.delete('1.0', END)\n if messages:\n\n for message in messages:\n mess = message.split('|')\n mess1 = str(mess[0])\n text.insert(END, mess1[0:25] + (62 - len(mess[0])) * '.' + mess1[25:(len(mess[0]) + 1)] + mess[1])\n else:\n text.insert(END,\n f\"No record exists for {date}, make sure entry \\nhas format YYYY-MM-DD. ex: 2020-07-01\\n\")\n\n\n def plot_graphs(*args, **kwargs4):\n plotins()\n\n\n def Today():\n today=datetime.now().date()\n DateField.delete(0,END)\n DateField.insert(0,str(today))\n global date_memory\n date_memory = today\n get_date()\n\n def next_day():\n global date_memory\n step = timedelta(days=+1)\n date_memory = date_memory + step\n DateField.delete(0, END)\n DateField.insert(0, str(date_memory))\n get_date()\n\n\n def prev_day():\n global date_memory\n step = timedelta(days=-1)\n date_memory = date_memory + step\n DateField.delete(0, END)\n DateField.insert(0, str(date_memory))\n get_date()\n\n def add_capstone_id(event=None):\n\n def add_number():\n ID = str(E.get())\n student = svc.find_student_by_studentID(ID)\n\n student.capstoneID = str(E2.get())\n student.save()\n pop.destroy()\n pop = Toplevel()\n pop.minsize(300,120)\n Label(pop, text = 'Please Enter Student ID', font = 'Helvetica 14 bold', fg='Purple3').pack()\n E = Entry(pop, width=10, font = 'Helvetica 14')\n E.pack()\n Label(pop, text='Please Enter Capstone ID', font='Helvetica 14 bold', fg='Purple3').pack()\n E2 = Entry(pop, width=10, font='Helvetica 14')\n E2.pack()\n B = Button(pop, text='Submit!', font = 'helvetica 14 bold', fg='green', command=add_number).pack()\n\n\n\n text = Text(admin, height=20, width=62)\n DateField = Entry(admin, font='Arial 16 bold',width=10, borderwidth=4, relief = 'sunken')\n button1 = Button(admin, text=\"Who's In the Shop?\", width=15,font = \"Helvetica 12 bold\", borderwidth = 4,command=whos_in_the_shop)\n button2 = Button(admin, text=\"Signout All\", width=15,font = \"Helvetica 12 bold\", borderwidth = 4, command=logout_all_users)\n button3 = Button(admin, text=\"Blame\",width = 15, font = \"Helvetica 12 bold\", borderwidth = 4, command=tools_past_due)\n button4 = Button(admin, text=\"Edit Training\", width=15, font = \"Helvetica 12 bold\", borderwidth = 4, command=edit_training)\n button5 = Button(admin, text=\"Plot Graphs\", width=15, font = \"Helvetica 12 bold\", borderwidth = 4, command=plot_graphs)\n button6 = Button(admin, text=\"Add Capstone ID\", width=15, font = \"Helvetica 12 bold\", borderwidth = 4, command=add_capstone_id)\n button1.grid(column=0, row=1, columnspan=3)\n button2.grid(column=0, row=2, columnspan=3)\n button3.grid(column=0, row=3, columnspan=3)\n button4.grid(column=0, row=4, columnspan=3)\n button5.grid(column=0, row=5, columnspan=3)\n button6.grid(column=0, row=6, columnspan=3)\n Button(admin, text='<', bg='gray50',command=prev_day).grid(column=0,row=8,sticky=W+E)\n Button(admin, text='Today',command=Today).grid(column=1,row=8,sticky=W+E)\n Button(admin, text='>', bg='gray50',command=next_day).grid(column=2, row=8, sticky=W + E)\n text.grid(column=3, row=1, rowspan=10, columnspan=2)\n DateField.grid(column=1, row=7)\n DateField.bind('',get_date)\n #print(f'{round(time.clock(),4)}: - - - - - Admin functions Written')\n\ndef build_login_tab(tabStructure, master):\n login = ttk.Frame(tabStructure)\n\n tabStructure.add(login, text=\"login\")\n\n main_login_student_operation(login, master)\n\n\ndef Are_you_sure(): # simple yes/no for logout-all\n def do_yes():\n asv.logout_all_users()\n question.destroy()\n\n def do_no():\n question.destroy()\n\n question = Toplevel()\n question.wm_title(\"Confirm\")\n\n prompt = Label(question, text='Are You Sure?')\n yes = Button(question, text='YES', width=30, fg='green', command=do_yes)\n no = Button(question, text='NO', width=30, fg='red', command=do_no)\n prompt.pack()\n yes.pack()\n no.pack()\n question.geometry('210x75')\n\n\ndef build_admin_tab(tabStructure, master):\n admin = ttk.Frame(tabStructure)\n\n tabStructure.add(admin, text=\"admin\")\n\n admin_duties(admin, tabStructure, master)\n\n\ndef checkout_Machine_Key(keyNumber, root):\n arg = BooleanVar()\n arg = True\n X = root.winfo_x()\n Y = root.winfo_y()\n\n def check_training(*args):\n StudentID = ID.get()\n student = svc.find_student_by_studentID(StudentID)\n\n for number in student.keys_trained:\n if int(number) == int(keyNumber):\n alert = Toplevel()\n alert.geometry(\"+%d+%d\" % (X+225, Y+75))\n alert.wm_title(\"Checkout Successful\")\n confirm = Label(alert, text=\"You're good to go!\", font='Helvetica 14 bold', fg='green')\n confirm.pack(anchor=CENTER)\n Button(alert, text='Confirm', command=alert.destroy).pack()\n checkout.destroy()\n arg = True\n return\n\n alert = Toplevel()\n alert.geometry(\"+%d+%d\" % (X+225, Y+75))\n alert.wm_title(\"Checkout Unsuccessful\")\n confirm = Label(alert, text=\"You are not cleared \\nto use this key\", font='Helvetica 14 bold', fg='red')\n confirm.pack(anchor=CENTER)\n Button(alert, text='Confirm', command=alert.destroy).pack()\n checkout.destroy()\n arg = False\n return\n\n checkout = Toplevel()\n checkout.geometry(\"+%d+%d\" % (X+150, Y+75))\n checkout.minsize(320,140)\n checkout.config(bg='purple1')\n Instruction = Label(checkout, text='Please Enter your StudentID', font='Arial 12 bold', fg='white',bg='purple1').pack(side=TOP)\n ID = Entry(checkout, width=25, borderwidth=2, font = 'Helvetica 14')\n ID.bind('', check_training)\n ID.pack()\n if arg == True:\n return 'green'\n else:\n return 'red'\n\n\n\nclass KeyButton:\n\n def __init__(self, master, x, y, number, root, name, color):\n def Onclick():\n\n if self.button[\"bg\"] == color:\n self.button[\"bg\"] = \"grey86\"\n key = Key.objects(keyNumber=number).first()\n result = checkout_Machine_Key(key.keyNumber, root)\n\n else:\n self.button[\"bg\"] = color\n pop = Toplevel()\n pop.geometry(\"300x140\")\n pop.minsize(320,140)\n Label(pop, text='Key Successfully Returned\\n'\n 'Please Ensure Machine is Clean', font='Arial 14 bold', fg='MediumPurple3').pack()\n Button(pop, text='Accept', font='Helvetica 16 bold', fg='green',command=pop.destroy).pack()\n\n\n\n if svc.key_exists(number):\n self.button = Button(master, text=str(number) + '\\n' + name, bg=color,width=16, height=2, command=Onclick)\n self.button.grid(column=x+1, row=y)\n\n\ndef build_keys_tab(tabStructure, root):\n\n keys = ttk.Frame(tabStructure)\n Key_Buttons_list_function(keys,root)\n\n tabStructure.add(keys, text=\"Keys\")\n\n\ndef Key_Buttons_list_function(keys, root):\n from Data.KeysList import RM131_keys, RM130_keys, RM132_keys\n\n keys.grid()\n key1 = KeyButton(keys,1,1,1,root,RM130_keys[1], 'MediumPurple1')\n key2 = KeyButton(keys,2,1,2,root,RM130_keys[2], 'MediumPurple1')\n key3 = KeyButton(keys,3,1,3,root,RM130_keys[3], 'MediumPurple1')\n key4 = KeyButton(keys,4,1,4,root,RM130_keys[4], 'MediumPurple1')\n key5 = KeyButton(keys,5,1,5,root,RM130_keys[5], 'MediumPurple1')\n key6 = KeyButton(keys,1,2,6,root,RM130_keys[6], 'MediumPurple1')\n key7 = KeyButton(keys,2,2,7,root,RM130_keys[7], 'MediumPurple1')\n key8 = KeyButton(keys,3,2,8,root,RM130_keys[8], 'MediumPurple1')\n key9 = KeyButton(keys,4,2,9,root,RM130_keys[9], 'MediumPurple1')\n key10 =KeyButton(keys,5,2,10,root,RM130_keys[10],'MediumPurple1')\n key11 =KeyButton(keys,1,3,11,root,RM130_keys[11],'MediumPurple1')\n key12 = KeyButton(keys, 2, 3, 12, root, RM132_keys[12], 'orange red')\n\n key13 = KeyButton(keys,3,3,13,root,RM132_keys[13],'orange red')\n key14 = KeyButton(keys,4,3,14,root,RM132_keys[14],'orange red')\n key15 = KeyButton(keys, 5, 3, 15, root, RM132_keys[15], 'orange red')\n key16 = KeyButton(keys, 1, 4, 16, root, RM132_keys[16], 'orange red')\n key17 = KeyButton(keys, 2, 4, 17, root, RM132_keys[17], 'orange red')\n key18 = KeyButton(keys, 3, 4, 18, root, RM132_keys[18], 'orange red')\n key19 = KeyButton(keys, 4, 4, 19, root, RM132_keys[19], 'orange red')\n key20 = KeyButton(keys, 5, 4, 20, root, RM132_keys[20], 'orange red')\n key21 = KeyButton(keys, 1, 5, 21, root, RM132_keys[21], 'orange red')\n\n key22 = KeyButton(keys, 2, 5, 22, root, RM131_keys[22], 'bisque')\n key23 = KeyButton(keys, 3, 5, 23, root, RM131_keys[23], 'bisque')\n key24 = KeyButton(keys, 4, 5, 24, root, RM131_keys[24], 'bisque')\n key25 = KeyButton(keys, 5, 5, 25, root, RM131_keys[25], 'bisque')\n key26 = KeyButton(keys, 1, 6, 26, root, RM131_keys[26], 'bisque')\n Label(keys, text=\" \", bg='grey86').grid(column=0,rowspan=4)\n\n\ndef Checkout_tool(x, y, toolname):\n\n def checkout():\n ID = e.get()\n ReturnDate = date.get()\n svc.Checkout_tool(toolname, ID, ReturnDate)\n pop.destroy()\n\n pop = Toplevel()\n pop.geometry(\"+%d+%d\" % (x+280, y+75))\n pop.minsize(350,240)\n pop.title(f'Checkout {toolname.name} {toolname.size}')\n i = Label(pop, text='Enter Student ID', font = 'Arial 14 bold')\n e = Entry(pop, font='Helvetica 12', width=25)\n space = Label(pop, text ='', font = 'Arial 10')\n i.pack()\n e.pack()\n space.pack()\n date_instruction = Label(pop, text='Return Date', font='Arial 14 bold')\n date_instruction.pack()\n date_instruction_line2 = Label(pop, text = \"Format as YYYY-MM-DD\")\n date_instruction_line2.pack()\n date = Entry(pop, font='Helvetica 12', width=25)\n date.pack()\n Label(pop, text=' ', font='Arial 10').pack()\n Button(pop,text='OK', width=20,font='Helvetica 12 bold',fg='green',command =checkout).pack()\n Button(pop, text='Cancel', width=20, font='Helvetica 12 bold', fg='red', command = lambda: pop.destroy()).pack()\n\n\ndef Return_Tool(x, y, toolname):\n def retern():\n ID = E.get()\n if isint(ID) and (len(ID)==8):\n print(toolname.name + toolname.size)\n svc.Return(toolname, ID)\n pop.destroy()\n\n pop = Toplevel()\n pop.geometry(\"+%d+%d\" % (x + 280, y + 75))\n pop.minsize(220,140)\n I = Label(pop, text='Confirm Student ID', font='Helvetica 14 bold')\n I.pack()\n E = Entry(pop, width=35, borderwidth=2, font='Helvetica 12')\n E.pack()\n Label(pop, text=' ', font='Arial 14').pack()\n B = Button(pop, width=20, text='Return Tool', font='Arial 14 bold', fg = 'green', command=retern).pack()\n\nclass ToolLabel:\n\n _instances = set()\n def __init__(self, master, message, n, root, col, returner):\n\n message = message.split(',')\n\n\n def Onclick1(*args):\n x = root.winfo_x()\n y = root.winfo_y()\n\n tool = svc.find_tool(message[0], message[1])\n self.button.destroy()\n Checkout_tool(x, y, tool)\n\n def Onclick2(*args):\n x = root.winfo_x()\n y = root.winfo_y()\n self.button.destroy()\n tool = svc.find_tool(message[0], message[1])\n Return_Tool(x, y, tool)\n\n\n\n if returner==False:\n self.button = ttk.Button(master, text=f\"{message[0]}\\n{message[1]}\", command=Onclick1, style='flat.TButton')\n else:\n self.button = ttk.Button(master, text=f\"{message[0]}\\n{message[1]}\", command=Onclick2, style='flat.TButton')\n\n\n self.button.grid(row=n, column=col,sticky=W + E)\n self._instances.add(weakref.ref(self))\n\n def clear(self,event=None):\n self.button.destroy()\n\n @classmethod # keep track of button objects so they can be destroyed\n def getinstances(cls):\n dead = set()\n for ref in cls._instances:\n obj = ref()\n if obj is not None:\n yield obj\n else:\n dead.add(ref)\n cls._instances -= dead\n\n\n\n\n\ndef tools_tab_functions(tools, root, tabStructure):\n def ActiveToolSearch(event=None):\n text = toolName.get()\n toolname = text.split(',')\n temp = toolname[0].split(' ')\n text = ''\n for t in temp:\n if text:\n text = text + '-' + t\n else:\n text = t\n name = text\n size = ''\n if len(toolname) > 1:\n size = toolname[1].strip()\n\n\n messages = svc.lookup_tool(name, size)\n n = IntVar()\n n = 2\n destroy()\n toolName.bind('')\n col = 0\n for message in messages:\n if n > 9:\n col = col + 1\n n = 2\n if col == 3:\n return\n toolLabel = ToolLabel(tools, message, n, root, col, False)\n n += 1\n\n def ActiveToolReturn(event=None):\n StudentID = return_ID.get()\n\n message = svc.FindCheckedOutTools(StudentID)\n n=2\n destroy()\n if message:\n for mess in message:\n toolLabel = ToolLabel(tools, mess, n, root, 4, True)\n n+=1\n\n\n\n def destroy():\n for model in ToolLabel.getinstances():\n model.clear()\n\n def add_tool(event=None):\n\n def insert_tool_in_DB():\n name = toolname.get()\n\n size = toolsize.get()\n full_tool_name = str(name) + ',' + str(size)\n tool = svc.Create_Tool(full_tool_name)\n if not tool.name:\n print(\"Oops, that didn't work\")\n else:\n pop.destroy()\n return\n\n\n pop = Toplevel()\n pop.minsize(200, 300)\n name_instruction = Label(pop, text =\"Enter tool name\", font = 'Arial 16 bold').pack()\n name_ins_second_line = Label(pop, text = \"seperate words with a -\\n i.e. 'metric-nut-driver'\").pack()\n toolname = Entry(pop, width=20, borderwidth=2, font = 'Arial 16')\n toolname.pack()\n Label(pop,text = \"\t\").pack()\n size_instruction = Label(pop, text = \"Enter tool size or number\", font = 'Arial 16 bold').pack()\n Label(pop, text = \"i.e. '3/4'\").pack()\n toolsize = Entry(pop, width = 10, borderwidth=2, font = 'Arial 16')\n toolsize.pack()\n dewit = Button(pop, text = \"Add Tool\", font = \"Helvetica 16 bold\", fg = 'green',\ncommand = insert_tool_in_DB)\n\n dewit.pack()\n\n\n\n instruction = Label(tools, text='Name of Tool\\n(For tool checkout)', font='Helvetica 14 bold').grid(row=0,columnspan=3, sticky=N+S)\n\n toolName = Entry(tools, width=20, borderwidth=2, font='Arial 20')\n toolName.bind('', ActiveToolSearch)\n\n toolName.grid(row=1, columnspan=3)\n return_instruction = Label(tools, text='Student_ID\\n(For tool return)', font='Helvetica 14 bold', bg='grey86')\n return_ID = Entry(tools, width=14, borderwidth=2, font='Arial 20')\n return_ID.bind('', ActiveToolReturn)\n return_instruction.grid(row=0,column=4, sticky=W+E)\n color=root.cget('bg')\n add_tool = Button(tools, text = \"Add\\ntool\", command = add_tool)\n add_tool.grid(row=1,column=3)\n return_ID.grid(row=1,column=4)\n #print(f'{round(time.clock(),4)}: - - - - - Tools tab functions built')\n\ndef buils_tools_tab(tabStructure, master):\n\n\n tools = ttk.Frame(tabStructure)\n\n tabStructure.add(tools, text=\"Tools\")\n tools_tab_functions(tools, master, tabStructure)\n\n\n\ndef build_all_the_tabs_admin(master):\n s = ttk.Style()\n s.configure('base.TNotebook', background='white')\n tabStructure = ttk.Notebook(master, style='base.TNotebook')\n\n\n build_login_tab(tabStructure, master)\n #print(f'{round(time.clock(),4)}: - - - - - Login Tab built')\n build_admin_tab(tabStructure, master)\n #print(f'{round(time.clock(),4)}: - - - - - Admin Tab built')\n build_keys_tab(tabStructure, master)\n #print(f'{round(time.clock(),4)}: - - - - - keys Tab built')\n buils_tools_tab(tabStructure, master)\n #print(f'{round(time.clock(),4)}: - - - - - Tools Tab built')\n\n tabStructure.pack(expand=1, fill='both')\n #print(f'{round(time.clock(),4)}: - - - - - Tabstructure built')\n\n\nclass app: # constructor for GUI\n def __init__(self, master):\n self.master = master\n\n def onExit():\n master.quit()\n\n\n master.title(\"Shop Activity Monitor\")\n master.minsize(720, 300)\n styles = ttk.Style(master)\n styles.theme_use('clam')\n styles.configure('flat.TButton', borderwidth=0,font='Helvetica 8')\n styles.configure('green.TButton', foreground='green', borderwidth=0)\n menubar = Menu(self.master)\n self.master.config(menu=menubar)\n\n fileMenu = Menu(menubar)\n fileMenu.add_command(label=\"Exit\", command=onExit)\n menubar.add_cascade(label=\"File\", menu=fileMenu)\n\n #fileMenu.add_command(label=\"Create Report\", command=generate)\n #fileMenu.add_command(label=\"Send last Report\", command=send_weekly_report)\n\n self.statusbar = Label(master, text=\"\", bd=1, relief=SUNKEN, anchor=W)\n self.statusbar.pack(side=BOTTOM, fill=X)\n build_all_the_tabs_admin(master)\n self.update()\n\n def update(self):\n date = 'Today is: ' + svc.print_day() + f\", time: \" \\\n f\"{datetime.now().hour}:{datetime.now().minute} \"\n\n self.statusbar.config(text=str(date))\n self.statusbar.after(1000, self.update)\n\n\ndef login():\n def check(*args):\n os.environ['USER'] = user_entry.get()\n os.environ['PASSWORD'] = password_entry.get()\n mongo.global_init(os.environ.get('USER'), os.environ.get('PASSWORD'))\n #print(f'{round(time.clock(),4)}: - - - - - Connected to Cloud')\n log.destroy()\n\n log = Tk()\n log.title(\"User Login\")\n log.geometry(\"280x140\")\n instruction = Label(log, text='Please enter your database credentials', font='Helvetica').grid(row=0, column=0,\n columnspan=2)\n user_entry = Entry(log, width=25, borderwidth=1)\n user_entry.grid(row=1, column=1)\n user_instruction = Label(log, text='Username')\n user_instruction.grid(row=1, column=0)\n password_entry = Entry(log, width=25, borderwidth=1)\n password_entry.grid(row=2, column=1)\n password_instruction = Label(log, text='Password')\n password_instruction.grid(row=2, column=0)\n attempt = Button(log, text='GO', font='Helvetica', width=15, command=check)\n attempt.grid(row=3, column=0, columnspan=2)\n #print(f'{round(time.clock(),4)}: - - - - - Network Login launched')\n log.mainloop()\n\n\ndef login_error():\n def ok():\n error.destroy()\n login()\n\n def cancel():\n error.quit()\n sys.exit()\n\n error = Tk()\n error.geometry(\"260x100\")\n Label(error, text='Invalid Login\\n Please try again', font='Helvetica', fg='red').pack()\n Button(error, text='Ok', width=15, command=ok, fg='green').pack()\n Button(error, text='Cancel', width=15, command=cancel, fg='red').pack()\n error.mainloop()\n\n\ndef main(): # run the app\n\n #print(f'{round(time.clock(),4)}: - - - - - Program execution Begin')\n login()\n #print(f'{round(time.clock(),4)}: - - - - - Cloud Login')\n\n while True:\n try:\n svc.print_day() # check that cloud connection is alive\n except:\n mongo.global_disconnect()\n login_error()\n\n else:\n break\n\n root = tk.Tk()\n #print(f'{round(time.clock(),4)}: - - - - - App window launch')\n app(root)\n root.mainloop()\n","sub_path":"App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":27882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"542274618","text":"#!/usr/bin/python3\n\nfrom python_scripts.ranker.rank_helper import *\n\nclass Ranker(object):\n\n #This class is dedicated to ranking the various columns according to how well \n #they are performing in reference to each other.\n\n def __init__(self, rank_bin_size):\n self.rb_size = rank_bin_size\n self.rank_system = {}\n\n #Getter and setter methods\n def setRankBinSize(self, bin_size ):\n self.rb_size = bin_size \n\n def getRankBinSize(self):\n return self.rb_size\n\n #Rank the entire system \n def fit(self, df):\n\n import pandas as pd\n\n n = self.getRankBinSize()\n\n self.rank_system = { col : setRankSystemCol(n, df, col) for col in df.columns}\n \n #Return a construct ranking system\n def getRankSystem(self):\n\n return self.rank_system\n\n #Rank the new system, and return the newly ranked system\n def predict(self, df ):\n \n import pandas as pd\n import numpy as np\n \n rank_system = self.getRankSystem()\n\n df_rank = pd.DataFrame( index = df.index )\n\n if rank_system == {}:\n print ('Please set the ranking system')\n\n else:\n for col in df.columns:\n if col in rank_system.keys(): \n rank_dict = rank_system.get(col)\n \n df_rank[col] = 0\n \n n = len(rank_dict.keys()) - 1\n \n for k in rank_dict.keys():\n \n l_bound = rank_dict.get(k).get('Min')\n u_bound = rank_dict.get(k).get('Max')\n\n if int(k) == 0:\n indices = np.where( df[col] < u_bound )[0]\n\n if int(k) == n:\n indices = np.where( l_bound <= df[col] )[0]\n\n else:\n indices = np.where( ( df[col] < u_bound )\n &\n ( l_bound <= df[col] )\n )[0]\n df_rank.loc[indices, col] = int(k)\n \n else:\n df_rank[col] = df[col] \n \n return df_rank \n\n \n","sub_path":"work_samples/internship/code_base/proj1/ranker/ranker.py","file_name":"ranker.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"54040527","text":"#!/usr/bin/env python\nfrom __future__ import print_function, division, absolute_import\nimport types\n\nimport numpy as np\n\nimport os\nos.environ['ODIN'] = 'float32,gpu,theano'\n\nfrom blocks import algorithms\nfrom blocks import backend as K\nfrom blocks import nnet as N\nfrom blocks import fuel\nfrom blocks import training\nfrom blocks.roles import add_role, DEPLOYING, has_roles, TRAINING\nfrom blocks.graph import ComputationGraph\nfrom blocks import visual\nimport cPickle\n\nds = fuel.load_mnist()\nprint(ds)\n\nX = K.placeholder(shape=(None,) + ds['X_train'].shape[1:], name='X', for_training=True)\ny = K.placeholder(shape=ds['y_train'].shape, name='y', dtype='int32')\n\n\nops = N.Sequence([\n lambda x: K.dimshuffle(x, (0, 'x', 1, 2)),\n N.Conv2D(16, (3, 3), stride=(1, 1), pad='same', activation=N.activations.rectify),\n K.pool2d,\n N.Dropout(level=0.3),\n N.Conv2D(32, (3, 3), stride=(1, 1), pad='same', activation=N.activations.rectify),\n K.pool2d,\n N.Dropout(level=0.3),\n K.flatten,\n N.Dense(64, activation=N.activations.rectify),\n N.Dense(10, activation=N.activations.softmax)\n])\nops = cPickle.loads(cPickle.dumps(ops)) # test if the ops is pickle-able\n\ny_pred_train = ops(X)\nadd_role(X, DEPLOYING)\ny_pred_test = ops(X)\ncost_train = N.cost.categorical_crossentropy(y_pred_train, y)\ncost_test = N.cost.categorical_accuracy(y_pred_test, y)\n\ngraph = ComputationGraph(cost_train)\nalg = algorithms.GradientDescent(cost=cost_train, step_rule=algorithms.RMSProp(learning_rate=0.01)).initialize()\nf_train = alg.function\nf_test = K.function([X, y], cost_test)\n\ntask = training.MainLoop(dataset=ds, batch_size=128)\ntask.add_callback(\n training.ProgressMonitor(title='Results: %.2f'),\n training.History(),\n # training.EarlyStopGeneralizationLoss(5, 'valid', lambda x: 1 - np.mean(x)),\n training.EarlyStopPatience(0, 'valid', lambda x: 1 - np.mean(x)),\n training.CheckpointGraph('graph', 'path')\n)\ntask = cPickle.loads(cPickle.dumps(task))\ntask.set_task(f_train, ('X_train', 'y_train'), epoch=1, name='train')\ntask.add_subtask(f_test, ('X_valid', 'y_valid'), freq=0.3, name='valid')\ntask.add_subtask(f_test, ('X_test', 'y_test'), epoch=1, when=-1, name='test')\ntask.run()\n\nvalid = task.callback[1].get(task='valid', event='epoch_end')\nvalid = [np.mean(i) for i in valid]\nprint(valid)\nprint(visual.print_bar(valid, bincount=len(valid)))\n\n\ntry:\n test = np.mean(task.callback[1].get(task='test', event='epoch_end')[0])\n print('Test accuracy:', test)\nexcept:\n pass\n","sub_path":"examples/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"184667574","text":"#\n# Copyright (c) 2023 Airbyte, Inc., all rights reserved.\n#\n\nfrom http import HTTPStatus\nfrom unittest.mock import patch\n\nimport pytest\nimport requests\nfrom source_recharge.api import (\n Addresses,\n Charges,\n Collections,\n Customers,\n Discounts,\n Metafields,\n Onetimes,\n Orders,\n Products,\n RechargeStream,\n Shop,\n Subscriptions,\n)\n\n\n# config\n@pytest.fixture(name=\"config\")\ndef config():\n return {\n \"authenticator\": None,\n \"access_token\": \"access_token\",\n \"start_date\": \"2021-08-15T00:00:00Z\",\n }\n\n\nclass TestCommon:\n\n main = RechargeStream()\n\n @pytest.mark.parametrize(\n \"stream_cls, expected\",\n [\n (Addresses, \"id\"),\n (Charges, \"id\"),\n (Collections, \"id\"),\n (Customers, \"id\"),\n (Discounts, \"id\"),\n (Metafields, \"id\"),\n (Onetimes, \"id\"),\n (Orders, \"id\"),\n (Products, \"id\"),\n (Shop, [\"shop\", \"store\"]),\n (Subscriptions, \"id\"),\n ],\n )\n def test_primary_key(self, stream_cls, expected):\n assert expected == stream_cls.primary_key\n\n @pytest.mark.parametrize(\n \"stream_cls\",\n [\n (Addresses),\n (Charges),\n (Collections),\n (Customers),\n (Discounts),\n (Metafields),\n (Onetimes),\n (Orders),\n (Products),\n (Shop),\n (Subscriptions),\n ],\n )\n def test_url_base(self, stream_cls):\n expected = self.main.url_base\n result = stream_cls.url_base\n assert expected == result\n\n @pytest.mark.parametrize(\n \"stream_cls\",\n [\n (Addresses),\n (Charges),\n (Collections),\n (Customers),\n (Discounts),\n (Metafields),\n (Onetimes),\n (Orders),\n (Products),\n (Shop),\n (Subscriptions),\n ],\n )\n def test_limit(self, stream_cls):\n expected = self.main.limit\n result = stream_cls.limit\n assert expected == result\n\n @pytest.mark.parametrize(\n \"stream_cls\",\n [\n (Addresses),\n (Charges),\n (Collections),\n (Customers),\n (Discounts),\n (Metafields),\n (Onetimes),\n (Orders),\n (Products),\n (Shop),\n (Subscriptions),\n ],\n )\n def test_page_num(self, stream_cls):\n expected = self.main.page_num\n result = stream_cls.page_num\n assert expected == result\n\n @pytest.mark.parametrize(\n \"stream_cls, stream_type, expected\",\n [\n (Addresses, \"incremental\", \"addresses\"),\n (Charges, \"incremental\", \"charges\"),\n (Collections, \"full-refresh\", \"collections\"),\n (Customers, \"incremental\", \"customers\"),\n (Discounts, \"incremental\", \"discounts\"),\n (Metafields, \"full-refresh\", \"metafields\"),\n (Onetimes, \"incremental\", \"onetimes\"),\n (Orders, \"incremental\", \"orders\"),\n (Products, \"full-refresh\", \"products\"),\n (Shop, \"full-refresh\", None),\n (Subscriptions, \"incremental\", \"subscriptions\"),\n ],\n )\n def test_data_path(self, config, stream_cls, stream_type, expected):\n if stream_type == \"incremental\":\n result = stream_cls(start_date=config[\"start_date\"]).data_path\n else:\n result = stream_cls().data_path\n assert expected == result\n\n @pytest.mark.parametrize(\n \"stream_cls, stream_type, expected\",\n [\n (Addresses, \"incremental\", \"addresses\"),\n (Charges, \"incremental\", \"charges\"),\n (Collections, \"full-refresh\", \"collections\"),\n (Customers, \"incremental\", \"customers\"),\n (Discounts, \"incremental\", \"discounts\"),\n (Metafields, \"full-refresh\", \"metafields\"),\n (Onetimes, \"incremental\", \"onetimes\"),\n (Orders, \"incremental\", \"orders\"),\n (Products, \"full-refresh\", \"products\"),\n (Shop, \"full-refresh\", \"shop\"),\n (Subscriptions, \"incremental\", \"subscriptions\"),\n ],\n )\n def test_path(self, config, stream_cls, stream_type, expected):\n if stream_type == \"incremental\":\n result = stream_cls(start_date=config[\"start_date\"]).path()\n else:\n result = stream_cls().path()\n assert expected == result\n\n @pytest.mark.parametrize(\n (\"http_status\", \"headers\", \"should_retry\"),\n [\n (HTTPStatus.OK, {\"Content-Length\": 256}, True),\n (HTTPStatus.BAD_REQUEST, {}, False),\n (HTTPStatus.TOO_MANY_REQUESTS, {}, True),\n (HTTPStatus.INTERNAL_SERVER_ERROR, {}, True),\n (HTTPStatus.FORBIDDEN, {}, False),\n ],\n )\n def test_should_retry(self, http_status, headers, should_retry):\n response = requests.Response()\n response.status_code = http_status\n response._content = b\"\"\n response.headers = headers\n stream = RechargeStream()\n assert stream.should_retry(response) == should_retry\n\n\nclass TestFullRefreshStreams:\n def generate_records(self, stream_name, count):\n result = []\n for i in range(0, count):\n result.append({f\"record_{i}\": f\"test_{i}\"})\n return {stream_name: result}\n\n @pytest.mark.parametrize(\n \"stream_cls, rec_limit, expected\",\n [\n (Collections, 1, {\"page\": 2}),\n (Metafields, 2, {\"page\": 2}),\n (Products, 1, {\"page\": 2}),\n (Shop, 1, {\"page\": 2}),\n ],\n )\n def test_next_page_token(self, stream_cls, rec_limit, requests_mock, expected):\n stream = stream_cls()\n stream.limit = rec_limit\n url = f\"{stream.url_base}{stream.path()}\"\n requests_mock.get(url, json=self.generate_records(stream.name, rec_limit))\n response = requests.get(url)\n assert stream.next_page_token(response) == expected\n\n @pytest.mark.parametrize(\n \"stream_cls, next_page_token, stream_state, stream_slice, expected\",\n [\n (Collections, None, {}, {}, {\"limit\": 250}),\n (Metafields, {\"page\": 2}, {\"updated_at\": \"2030-01-01\"}, {}, {\"limit\": 250, \"page\": 2}),\n (Products, None, {}, {}, {\"limit\": 250}),\n (Shop, None, {}, {}, {\"limit\": 250}),\n ],\n )\n def test_request_params(self, stream_cls, next_page_token, stream_state, stream_slice, expected):\n stream = stream_cls()\n result = stream.request_params(stream_state, stream_slice, next_page_token)\n assert result == expected\n\n @pytest.mark.parametrize(\n \"stream_cls, data, expected\",\n [\n (Collections, [{\"test\": 123}], [{\"test\": 123}]),\n (Metafields, [{\"test2\": 234}], [{\"test2\": 234}]),\n (Products, [{\"test3\": 345}], [{\"test3\": 345}]),\n (Shop, {\"test4\": 456}, [{\"test4\": 456}]),\n ],\n )\n def test_parse_response(self, stream_cls, data, requests_mock, expected):\n stream = stream_cls()\n url = f\"{stream.url_base}{stream.path()}\"\n data = {stream.data_path: data} if stream.data_path else data\n requests_mock.get(url, json=data)\n response = requests.get(url)\n assert list(stream.parse_response(response)) == expected\n\n @pytest.mark.parametrize(\n \"stream_cls, data, expected\",\n [\n (Collections, [{\"test\": 123}], [{\"test\": 123}]),\n (Metafields, [{\"test2\": 234}], [{\"test2\": 234}]),\n (Products, [{\"test3\": 345}], [{\"test3\": 345}]),\n (Shop, {\"test4\": 456}, [{\"test4\": 456}]),\n ],\n )\n def get_stream_data(self, stream_cls, data, requests_mock, expected):\n stream = stream_cls()\n url = f\"{stream.url_base}{stream.path()}\"\n data = {stream.data_path: data} if stream.data_path else data\n requests_mock.get(url, json=data)\n response = requests.get(url)\n assert list(stream.parse_response(response)) == expected\n\n @pytest.mark.parametrize(\"owner_resource, expected\", [({\"customer\": {\"id\": 123}}, {\"customer\": {\"id\": 123}})])\n def test_metafields_read_records(self, owner_resource, expected):\n with patch.object(Metafields, \"read_records\", return_value=owner_resource):\n result = Metafields().read_records(stream_slice={\"owner_resource\": owner_resource})\n assert result == expected\n\n\nclass TestIncrementalStreams:\n def generate_records(self, stream_name, count):\n result = []\n for i in range(0, count):\n result.append({f\"record_{i}\": f\"test_{i}\"})\n return {stream_name: result}\n\n @pytest.mark.parametrize(\n \"stream_cls, expected\",\n [\n (Addresses, \"updated_at\"),\n (Charges, \"updated_at\"),\n (Customers, \"updated_at\"),\n (Discounts, \"updated_at\"),\n (Onetimes, \"updated_at\"),\n (Orders, \"updated_at\"),\n (Subscriptions, \"updated_at\"),\n ],\n )\n def test_cursor_field(self, config, stream_cls, expected):\n stream = stream_cls(start_date=config[\"start_date\"])\n result = stream.cursor_field\n assert result == expected\n\n @pytest.mark.parametrize(\n \"stream_cls, rec_limit, expected\",\n [\n (Addresses, 1, {\"page\": 2}),\n (Charges, 2, {\"page\": 2}),\n (Customers, 1, {\"page\": 2}),\n (Discounts, 1, {\"page\": 2}),\n (Onetimes, 1, {\"page\": 2}),\n (Orders, 1, {\"page\": 2}),\n (Subscriptions, 1, {\"page\": 2}),\n ],\n )\n def test_next_page_token(self, config, stream_cls, rec_limit, requests_mock, expected):\n stream = stream_cls(start_date=config[\"start_date\"])\n stream.limit = rec_limit\n url = f\"{stream.url_base}{stream.path()}\"\n requests_mock.get(url, json=self.generate_records(stream.name, rec_limit))\n response = requests.get(url)\n assert stream.next_page_token(response) == expected\n\n @pytest.mark.parametrize(\n \"stream_cls, next_page_token, stream_state, stream_slice, expected\",\n [\n (Addresses, None, {}, {}, {\"limit\": 250, \"updated_at_min\": \"2021-08-15 00:00:00\"}),\n (Charges, {\"page\": 2}, {\"updated_at\": \"2030-01-01\"}, {}, {\"limit\": 250, \"page\": 2, \"updated_at_min\": \"2030-01-01 00:00:00\"}),\n (Customers, None, {}, {}, {\"limit\": 250, \"updated_at_min\": \"2021-08-15 00:00:00\"}),\n (Discounts, None, {}, {}, {\"limit\": 250, \"updated_at_min\": \"2021-08-15 00:00:00\"}),\n (Onetimes, {\"page\": 2}, {\"updated_at\": \"2030-01-01\"}, {}, {\"limit\": 250, \"page\": 2, \"updated_at_min\": \"2030-01-01 00:00:00\"}),\n (Orders, None, {}, {}, {\"limit\": 250, \"updated_at_min\": \"2021-08-15 00:00:00\"}),\n (Subscriptions, None, {}, {}, {\"limit\": 250, \"updated_at_min\": \"2021-08-15 00:00:00\"}),\n ],\n )\n def test_request_params(self, config, stream_cls, next_page_token, stream_state, stream_slice, expected):\n stream = stream_cls(start_date=config[\"start_date\"])\n result = stream.request_params(stream_state, stream_slice, next_page_token)\n assert result == expected\n\n @pytest.mark.parametrize(\n \"stream_cls, current_state, latest_record, expected\",\n [\n (Addresses, {}, {\"updated_at\": 2}, {\"updated_at\": 2}),\n (Charges, {\"updated_at\": 2}, {\"updated_at\": 3}, {\"updated_at\": 3}),\n (Customers, {\"updated_at\": 3}, {\"updated_at\": 4}, {\"updated_at\": 4}),\n (Discounts, {}, {\"updated_at\": 2}, {\"updated_at\": 2}),\n (Onetimes, {}, {\"updated_at\": 2}, {\"updated_at\": 2}),\n (Orders, {\"updated_at\": 5}, {\"updated_at\": 5}, {\"updated_at\": 5}),\n (Subscriptions, {\"updated_at\": 6}, {\"updated_at\": 7}, {\"updated_at\": 7}),\n ],\n )\n def test_get_updated_state(self, config, stream_cls, current_state, latest_record, expected):\n stream = stream_cls(start_date=config[\"start_date\"])\n result = stream.get_updated_state(current_state, latest_record)\n assert result == expected\n","sub_path":"dts/airbyte/airbyte-integrations/connectors/source-recharge/unit_tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":12253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"511269693","text":"\"\"\"\nGeneral functions.\n\"\"\"\nimport ctypes\nimport hashlib\nimport logging\nimport os\nimport re\nimport shutil\nimport stat\nimport subprocess\nimport sys\nfrom configparser import ConfigParser\nfrom datetime import datetime\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom smtplib import SMTP\n\nfrom .google_api import GMail\n\ntry:\n PermissionError\nexcept NameError:\n PermissionError = OSError # for Python 2.7\n FileExistsError = OSError\n\nlogger = logging.getLogger(__package__)\n\n_readers = []\n\n\ndef checksum(file, algorithm='sha256', chunk_size=65536, shake_length=256):\n \"\"\"Get the checksum of a file.\n\n A checksum is a sequence of numbers and letters that act as a fingerprint\n for a file against which later comparisons can be made to detect errors or\n changes in the file. It can be used to verify the integrity of the data.\n\n Parameters\n ----------\n file : :term:`path-like ` or :term:`file ` object\n A file to get the checksum of.\n algorithm : :class:`str`, optional\n The hash algorithm to use to compute the checksum.\n See :mod:`hashlib` for more details.\n chunk_size : :class:`int`, optional\n The number of bytes to read at a time from the file. It is useful\n to tweak this parameter when reading a large file to improve performance.\n shake_length : :class:`int`, optional\n The digest length to use for the ``SHAKE`` algorithm. See\n :meth:`hashlib.shake.hexdigest` for more details.\n\n Returns\n -------\n :class:`str`\n The checksum containing only hexadecimal digits.\n \"\"\"\n def read(fp):\n # read in chucks in case the file size is too large\n # to load it into RAM all at once\n while True:\n data = fp.read(chunk_size)\n if not data:\n break\n hash_cls.update(data)\n\n hash_cls = hashlib.new(algorithm)\n\n try:\n with open(file, mode='rb') as f:\n read(f)\n except TypeError:\n if not hasattr(file, 'tell'):\n raise\n position = file.tell()\n read(file)\n file.seek(position)\n\n try:\n return hash_cls.hexdigest()\n except TypeError:\n return hash_cls.hexdigest(shake_length)\n\n\ndef copy(source, destination, overwrite=False, include_metadata=True):\n \"\"\"Copy a file.\n\n Parameters\n ----------\n source : :term:`path-like object`\n The path to a file to copy.\n destination : :term:`path-like object`\n A directory to copy the file to or a full path (i.e., includes the basename).\n If the directory does not exist then it, and all intermediate directories,\n will be created.\n overwrite : :class:`bool`, optional\n Whether to overwrite the `destination` file if it already exists.\n If `destination` already exists and `overwrite` is :data:`False` then a\n :exc:`FileExistsError` is raised.\n include_metadata : :class:`bool`, optional\n Whether to also copy information such as the file permissions,\n the latest access time and latest modification time with the file.\n\n Returns\n -------\n :class:`str`\n The path to where the file was copied.\n \"\"\"\n if os.path.isdir(destination) or is_dir_accessible(destination):\n destination = os.path.join(destination, os.path.basename(source))\n else:\n # TODO include the exist_ok kwarg to makedirs\n # when dropping support for Python 2.7\n try:\n os.makedirs(os.path.dirname(destination))\n except OSError:\n pass\n\n if not overwrite and (os.path.isfile(destination) or is_file_readable(destination)):\n raise FileExistsError('Will not overwrite {!r}'.format(destination))\n\n # TODO include the follow_symlinks kwarg to copyfile and copystat\n # (and to this \"copy\" function) when dropping support for Python 2.7\n shutil.copyfile(source, destination)\n if include_metadata:\n shutil.copystat(source, destination)\n\n return destination\n\n\ndef is_admin():\n \"\"\"Check if the current process is being run as an administrator.\n\n Returns\n -------\n :class:`bool`\n Whether the current process is being run as an administrator.\n \"\"\"\n try:\n return ctypes.windll.shell32.IsUserAnAdmin() == 1\n except AttributeError:\n try:\n return os.geteuid() == 0\n except AttributeError:\n return False\n\n\ndef is_dir_accessible(path, strict=False):\n \"\"\"Check if a directory exists and is accessible.\n\n An accessible directory is one that the user has\n permission to access.\n\n Parameters\n ----------\n path : :class:`str`\n The directory to check.\n strict : :class:`bool`, optional\n Whether to raise the exception (if one occurs).\n\n Returns\n -------\n :class:`bool`\n Whether the directory exists and is accessible.\n \"\"\"\n cwd = os.getcwd()\n try:\n os.chdir(path)\n except:\n if strict:\n raise\n return False\n else:\n os.chdir(cwd)\n return True\n\n\ndef is_file_readable(file, strict=False):\n \"\"\"Check if a file exists and is readable.\n\n Parameters\n ----------\n file : :class:`str`\n The file to check.\n strict : :class:`bool`, optional\n Whether to raise the exception (if one occurs).\n\n Returns\n -------\n :class:`bool`\n Whether the file exists and is readable.\n \"\"\"\n try:\n with open(file, mode='rb'):\n return True\n except:\n if strict:\n raise\n return False\n\n\ndef register(reader_class):\n \"\"\"Use as a decorator to register a :class:`~msl.io.base.Reader` subclass.\n\n See :ref:`io-create-reader` for an example on how to use @register decorator.\n\n Parameters\n ----------\n reader_class : :class:`~msl.io.base.Reader`\n A :class:`~msl.io.base.Reader` subclass.\n\n Returns\n -------\n :class:`~msl.io.base.Reader`\n The :class:`~msl.io.base.Reader`.\n \"\"\"\n def append(cls):\n _readers.append(cls)\n logger.debug('registered %r', cls)\n return cls\n return append(reader_class)\n\n\ndef search(folder, pattern=None, levels=0, regex_flags=0, exclude_folders=None,\n ignore_permission_error=True, ignore_hidden_folders=True, follow_symlinks=False):\n r\"\"\"Search for files starting from a root folder.\n\n Parameters\n ----------\n folder : :class:`str`\n The root folder to begin searching for files.\n pattern : :class:`str`, optional\n A regex string to use to filter the filenames. If :data:`None` then no\n filtering is applied and all files are yielded. Examples:\n\n * ``r'data'`` :math:`\\rightarrow` find all files with the word ``data``\n in the filename\n\n * ``r'\\.png$'`` :math:`\\rightarrow` find all files with the extension ``.png``\n\n * ``r'\\.jpe*g$'`` :math:`\\rightarrow` find all files with the extension\n ``.jpeg`` or ``.jpg``\n\n levels : :class:`int`, optional\n The number of sub-folder levels to recursively search for files.\n If :data:`None` then search all sub-folders.\n regex_flags : :class:`int`, optional\n The flags to use to compile regex strings.\n exclude_folders : :class:`str` or :class:`list` of :class:`str`, optional\n The pattern of folder names to exclude from the search. Can be a regex\n string. If :data:`None` then include all folders in the search. Examples:\n\n * ``r'bin'`` :math:`\\rightarrow` exclude all folders that contain the word ``bin``\n\n * ``r'^My'`` :math:`\\rightarrow` exclude all folders that start with the letters ``My``\n\n * ``[r'bin', r'^My']`` which is equivalent to ``r'(bin|^My')`` :math:`\\rightarrow` exclude\n all folders that contain the word ``bin`` or start with the letters ``My``\n\n ignore_permission_error : :class:`bool`, optional\n Whether to ignore :exc:`PermissionError` exceptions when reading\n the items within a folder.\n ignore_hidden_folders : :class:`bool`, optional\n Whether to ignore hidden folders from the search. A hidden folder\n starts with a ``.`` (a dot).\n follow_symlinks : :class:`bool`, optional\n Whether to search for files by following symbolic links.\n\n Yields\n ------\n :class:`str`\n The path to a file.\n \"\"\"\n if levels is not None and levels < 0:\n return\n\n if ignore_hidden_folders and os.path.basename(folder).startswith('.'):\n logger.debug('ignore hidden folder %r', folder)\n return\n\n if exclude_folders:\n if isinstance(exclude_folders, str):\n exclude_folders = [exclude_folders]\n\n if isinstance(exclude_folders[0], str):\n ex_compiled = [re.compile(ex, flags=regex_flags) for ex in exclude_folders]\n else: # the items should already be of type re.Pattern\n ex_compiled = exclude_folders\n\n basename = os.path.basename(folder)\n for exclude in ex_compiled:\n if exclude.search(basename):\n logger.debug('excluding folder %r', folder)\n return\n else:\n ex_compiled = None\n\n if ignore_permission_error:\n try:\n names = os.listdir(folder)\n except PermissionError:\n logger.debug('permission error %r', folder)\n return\n else:\n names = os.listdir(folder)\n\n if isinstance(pattern, str):\n regex = re.compile(pattern, flags=regex_flags) if pattern else None\n else: # the value should already be of type re.Pattern\n regex = pattern\n\n for name in names:\n path = folder + '/' + name\n if os.path.isfile(path) or is_file_readable(path):\n if regex is None or regex.search(name):\n yield path\n elif os.path.isdir(path) or (follow_symlinks and os.path.islink(path)):\n for item in search(path,\n pattern=regex,\n levels=None if levels is None else levels - 1,\n regex_flags=regex_flags,\n exclude_folders=ex_compiled,\n ignore_permission_error=ignore_permission_error,\n ignore_hidden_folders=ignore_hidden_folders,\n follow_symlinks=follow_symlinks):\n yield item\n\n\ndef send_email(config, recipients, sender=None, subject=None, body=None):\n \"\"\"Send an email.\n\n Parameters\n ----------\n config\n A :term:`path-like object` or :term:`file-like object` of an INI-style\n configuration file that contains information on how to send an email.\n There are two ways to send an email -- Gmail API or SMTP server.\n\n An example INI file to use the Gmail API is the following (see\n :class:`~msl.io.google_api.GMail` for more details). Although all\n key-value pairs are optional, a ``[gmail]`` section must exist to use\n the Gmail API.\n\n .. code-block:: ini\n\n [gmail]\n account = work [default: None]\n credentials = path/to/client_secrets.json [default: None]\n scopes = [default: None]\n https://www.googleapis.com/auth/gmail.send\n https://www.googleapis.com/auth/gmail.metadata\n domain = @gmail.com [default: None]\n\n An example INI file for an SMTP server is the following. Only the `host`\n and `port` key-value pairs are required.\n\n .. code-block:: ini\n\n [smtp]\n host = hostname or IP address of the SMTP server\n port = port number to connect to on the SMTP server\n starttls = true|yes|1|on -or- false|no|0|off [default: false]\n username = the username to authenticate with [default: None]\n password = the password for username [default: None]\n domain = @company.com [default: None]\n\n .. warning::\n Since this information is specified in plain text in the configuration\n file, you should set the file permissions provided by your operating\n system to ensure that your authentication credentials are safe.\n\n recipients : :class:`str` or :class:`list` of :class:`str`\n The email address(es) of the recipient(s). Can omit the ``@domain.com``\n part if a ``domain`` key is specified in the `config` file. Can be the\n value ``'me'`` if sending an email to yourself via Gmail.\n sender : :class:`str`, optional\n The email address of the sender. Can omit the ``@domain.com`` part\n if a ``domain`` key is specified in the `config` file. If not\n specified then it equals the value of the first `recipient` if using\n SMTP or the value ``'me'`` if using Gmail.\n subject : :class:`str`, optional\n The text to include in the subject field.\n body : :class:`str`, optional\n The text to include in the body of the email. The text can be\n enclosed in ```` tags to use HTML elements to format\n the message.\n \"\"\"\n cfg = _prepare_email(config, recipients, sender)\n if cfg['type'] == 'smtp':\n server = SMTP(host=cfg['host'], port=cfg['port'])\n if cfg['starttls']:\n server.ehlo()\n server.starttls()\n server.ehlo()\n if cfg['username'] and cfg['password']:\n server.login(cfg['username'], cfg['password'])\n msg = MIMEMultipart()\n msg['From'] = cfg['from']\n msg['To'] = ', '.join(cfg['to'])\n msg['Subject'] = subject or '(no subject)'\n text = body or ''\n subtype = 'html' if text.startswith('') else 'plain'\n msg.attach(MIMEText(text, subtype))\n server.sendmail(cfg['from'], cfg['to'], msg.as_string())\n server.quit()\n else:\n with GMail(account=cfg['account'], credentials=cfg['credentials'],\n scopes=cfg['scopes']) as gmail:\n gmail.send(cfg['to'], sender=cfg['from'], subject=subject, body=body)\n\n\ndef _prepare_email(config, recipients, sender):\n \"\"\"Loads a configuration file to prepare for sending an email.\n\n Returns a dict.\n \"\"\"\n if hasattr(config, 'read'):\n contents = config.read()\n else:\n with open(config, mode='rt') as fp:\n contents = fp.read()\n\n if isinstance(contents, bytes):\n contents = contents.decode('utf-8')\n\n cp = ConfigParser()\n cp.read_string(contents)\n\n has_smtp = cp.has_section('smtp')\n has_gmail = cp.has_section('gmail')\n if has_smtp and has_gmail:\n raise ValueError(\"Cannot specify both a 'gmail' and 'smtp' section\")\n if not (has_smtp or has_gmail):\n raise ValueError(\"Must create either a 'gmail' or 'smtp' section\")\n\n section = cp['gmail'] if has_gmail else cp['smtp']\n\n domain = section.get('domain')\n if domain and not domain.startswith('@'):\n domain = '@' + domain\n\n if isinstance(recipients, str):\n recipients = [recipients]\n\n for i in range(len(recipients)):\n if domain and '@' not in recipients[i] and \\\n (has_smtp or (has_gmail and recipients[i] != 'me')):\n recipients[i] += domain\n\n if not sender:\n if has_gmail:\n sender = 'me'\n else:\n sender = recipients[0]\n elif domain and ('@' not in sender) and \\\n (has_smtp or (has_gmail and sender != 'me')):\n sender += domain\n\n cfg = {'type': section.name, 'to': recipients, 'from': sender}\n if has_smtp:\n host, port = section.get('host'), section.getint('port')\n if not (host and port):\n raise ValueError(\"Must specify the 'host' and 'port' of the SMTP server\")\n\n username, password = section.get('username'), section.get('password')\n if username and not password:\n raise ValueError(\"Must specify the 'password' since a \"\n \"'username' is specified\")\n elif password and not username:\n raise ValueError(\"Must specify the 'username' since a \"\n \"'password' is specified\")\n\n cfg.update({\n 'host': host,\n 'port': port,\n 'starttls': section.getboolean('starttls'),\n 'username': username,\n 'password': password,\n })\n else:\n scopes = section.get('scopes')\n cfg.update({\n 'account': section.get('account'),\n 'credentials': section.get('credentials'),\n 'scopes': scopes.split() if scopes else None\n })\n return cfg\n\n\ndef get_basename(obj):\n \"\"\"Get the :func:`~os.path.basename` of a file.\n\n Parameters\n ----------\n obj : :term:`path-like ` or :term:`file-like `\n The object to get the :func:`~os.path.basename` of. If the object does not\n support the :func:`~os.path.basename` function then the\n :attr:`__name__ ` of the `obj` is returned.\n\n Returns\n -------\n :class:`str`\n The basename of `obj`.\n \"\"\"\n try:\n return os.path.basename(obj)\n except (TypeError, AttributeError):\n try:\n return os.path.basename(obj.name)\n except AttributeError:\n return obj.__class__.__name__\n\n\ndef git_head(directory):\n \"\"\"Get information about the ``HEAD`` of a repository.\n\n This function requires that `git `_ is installed\n and that it is available on ``PATH``.\n\n Parameters\n ----------\n directory : :class:`str`\n A directory that is under version control.\n\n Returns\n -------\n :class:`dict` or :data:`None`\n Information about the most recent commit on the current branch.\n If `directory` is not a directory that is under version control\n then returns :data:`None`.\n \"\"\"\n cmd = ['git', 'show', '-s', '--format=%H %ct', 'HEAD']\n try:\n out = subprocess.check_output(cmd, cwd=directory, stderr=subprocess.PIPE)\n except subprocess.CalledProcessError:\n return None\n\n sha, timestamp = out.split()\n return {\n 'hash': sha.decode('ascii'),\n 'datetime': datetime.fromtimestamp(int(timestamp))\n }\n\n\ndef remove_write_permissions(path):\n \"\"\"Remove all write permissions of a file.\n\n On Windows, this function will set the file attribute to be read only.\n\n On linux and macOS, write permission is removed for the User,\n Group and Others. The read and execute permissions are preserved.\n\n Parameters\n ----------\n path : :term:`path-like object`\n The path to remove the write permissions of.\n \"\"\"\n current_permissions = stat.S_IMODE(os.lstat(path).st_mode)\n disable_writing = ~stat.S_IWUSR & ~stat.S_IWGRP & ~stat.S_IWOTH\n os.chmod(path, current_permissions & disable_writing)\n\n\ndef run_as_admin(args=None, executable=None, cwd=None, capture_stderr=False,\n blocking=True, show=False, **kwargs):\n \"\"\"Run a process as an administrator and return its output.\n\n Parameters\n ----------\n args : :class:`str` or :class:`list` of :class:`str`, optional\n A sequence of program arguments or else a single string. Providing a\n sequence of arguments is generally preferred, as it allows the module\n to take care of any required escaping and quoting of arguments\n (e.g., to permit spaces in file names).\n executable : :class:`str`, optional\n The executable to pass the `args` to.\n cwd : :class:`str`, optional\n The working directory for the elevated process.\n capture_stderr : :class:`bool`, optional\n Whether to send the stderr stream to stdout.\n blocking : :class:`bool`, optional\n Whether to wait for the process to finish before returning to the\n calling program.\n show : :class:`bool`, optional\n Whether to show the elevated console (Windows only). If\n :data:`True` then the stdout stream of the process is not captured.\n kwargs\n If the current process already has admin privileges or if the operating\n system is not Windows then all additional keyword arguments are passed\n to :func:`~subprocess.check_output`. Otherwise, only a `timeout` keyword\n argument is used (Windows).\n\n Returns\n -------\n :class:`bytes`, :class:`int` or :class:`~subprocess.Popen`\n The returned object depends on whether the process is executed in blocking\n or non-blocking mode. If blocking then :class:`bytes` are returned (the\n stdout stream of the process). If non-blocking, then the returned object\n will either be the :class:`~subprocess.Popen` instance that is running the\n process (POSIX) or an :class:`int` which is the process ID (Windows).\n\n Examples\n --------\n .. invisible-code-block: pycon\n\n >>> SKIP_RUN_AS_ADMIN()\n\n Import the modules\n\n >>> import sys\n >>> from msl.io import run_as_admin\n\n Run a shell script\n\n >>> run_as_admin(['./script.sh', '--message', 'hello world'])\n\n Run a Python script\n\n >>> run_as_admin([sys.executable, 'script.py', '--verbose'], cwd='D:\\\\\\\\My Scripts')\n\n Create a service in the Windows registry and in the Service Control Manager database\n\n >>> run_as_admin(['sc', 'create', 'MyLogger', 'binPath=', 'C:\\\\\\\\logger.exe', 'start=', 'auto'])\n \"\"\"\n if not args and not executable:\n raise ValueError('Must specify the args and/or an executable')\n\n stderr = subprocess.STDOUT if capture_stderr else None\n process = subprocess.check_output if blocking else subprocess.Popen\n\n if is_admin():\n return process(args, executable=executable, cwd=cwd,\n stderr=stderr, **kwargs)\n\n if cwd is None:\n cwd = os.getcwd()\n\n if os.name != 'nt':\n if not args:\n command = ['sudo', executable]\n elif isinstance(args, str):\n exe = executable or ''\n command = 'sudo {} {}'.format(exe, args)\n else:\n exe = [executable] if executable else []\n command = ['sudo'] + exe + list(args)\n return process(command, cwd=cwd, stderr=stderr, **kwargs)\n\n # Windows is more complicated\n\n if args is None:\n args = ''\n\n if not isinstance(args, str):\n args = subprocess.list2cmdline(args)\n\n if executable is None:\n executable = ''\n else:\n executable = subprocess.list2cmdline([executable])\n\n # the 'runas' verb starts in C:\\WINDOWS\\system32\n cd = subprocess.list2cmdline(['cd', '/d', cwd, '&&'])\n\n # check if a Python environment needs to be activated\n activate = ''\n if executable == sys.executable or args.startswith(sys.executable):\n conda = os.getenv('CONDA_PREFIX') # conda\n venv = os.getenv('VIRTUAL_ENV') # venv\n if conda:\n env = os.getenv('CONDA_DEFAULT_ENV')\n assert env, 'CONDA_DEFAULT_ENV environment variable does not exist'\n if env == 'base':\n bat = os.path.join(conda, 'Scripts', 'activate.bat')\n else:\n bat = os.path.abspath(os.path.join(conda, os.pardir, os.pardir,\n 'Scripts', 'activate.bat'))\n assert os.path.isfile(bat), 'Cannot find {!r}'.format(bat)\n activate = subprocess.list2cmdline([bat, env, '&&'])\n elif venv:\n bat = os.path.join(venv, 'Scripts', 'activate.bat')\n assert os.path.isfile(bat), 'Cannot find {!r}'.format(bat)\n activate = subprocess.list2cmdline([bat, '&&'])\n\n # redirect stdout (stderr) to a file\n redirect = ''\n stdout_file = ''\n if not show:\n import uuid\n import tempfile\n stdout_file = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))\n r = ['>', stdout_file]\n if capture_stderr:\n r.append('2>&1')\n redirect = subprocess.list2cmdline(r)\n if re.search(r'\\d$', args):\n # this number is also considered as a file handle, so add a space\n redirect = ' ' + redirect\n\n # the string that is passed to cmd.exe\n params = '/S /C \"{cd} {activate} {executable} {args}\"{redirect}'.format(\n cd=cd, activate=activate, executable=executable, args=args, redirect=redirect)\n\n from ctypes.wintypes import DWORD, ULONG, HWND, LPCWSTR, INT, HINSTANCE, HKEY, HANDLE\n\n class ShellExecuteInfoW(ctypes.Structure):\n _fields_ = [\n ('cbSize', DWORD),\n ('fMask', ULONG),\n ('hwnd', HWND),\n ('lpVerb', LPCWSTR),\n ('lpFile', LPCWSTR),\n ('lpParameters', LPCWSTR),\n ('lpDirectory', LPCWSTR),\n ('nShow', INT),\n ('hInstApp', HINSTANCE),\n ('lpIDList', ctypes.c_void_p),\n ('lpClass', LPCWSTR),\n ('hkeyClass', HKEY),\n ('dwHotKey', DWORD),\n ('hIcon', HANDLE),\n ('hProcess', HANDLE)]\n\n sei = ShellExecuteInfoW()\n sei.fMask = 0x00000040 | 0x00008000 # SEE_MASK_NOCLOSEPROCESS | SEE_MASK_NO_CONSOLE\n sei.lpVerb = kwargs.get('verb', u'runas') # change the verb when running the tests\n sei.lpFile = u'cmd.exe'\n sei.lpParameters = params\n sei.lpDirectory = u'{}'.format(cwd) if cwd else None\n sei.nShow = int(show)\n sei.cbSize = ctypes.sizeof(sei)\n if not ctypes.windll.Shell32.ShellExecuteExW(ctypes.byref(sei)):\n raise ctypes.WinError()\n\n if not blocking:\n return sei.hProcess\n\n kernel32 = ctypes.windll.kernel32\n timeout = kwargs.get('timeout', -1) # INFINITE = -1\n milliseconds = int(timeout * 1e3) if timeout > 0 else timeout\n\n ret = kernel32.WaitForSingleObject(sei.hProcess, milliseconds)\n if ret == 0: # WAIT_OBJECT_0\n stdout = b''\n if stdout_file and os.path.isfile(stdout_file):\n with open(stdout_file, mode='rb') as fp:\n stdout = fp.read()\n os.remove(stdout_file)\n\n code = DWORD()\n if not kernel32.GetExitCodeProcess(sei.hProcess, ctypes.byref(code)):\n raise ctypes.WinError()\n\n if code.value != 0:\n msg = ctypes.FormatError(code.value)\n out_str = stdout.decode('utf-8', 'ignore').rstrip()\n if show:\n msg += '\\nSet show=False to capture the stdout stream.'\n else:\n if not capture_stderr:\n msg += '\\nSet capture_stderr=True to see if ' \\\n 'more information is available.'\n if out_str:\n msg += '\\n{}'.format(out_str)\n raise ctypes.WinError(code=code.value, descr=msg)\n\n kernel32.CloseHandle(sei.hProcess)\n return stdout\n\n if ret == 0xFFFFFFFF: # WAIT_FAILED\n raise ctypes.WinError()\n\n if ret == 0x00000080: # WAIT_ABANDONED\n msg = 'The specified object is a mutex object that was not ' \\\n 'released by the thread that owned the mutex object before ' \\\n 'the owning thread terminated. Ownership of the mutex ' \\\n 'object is granted to the calling thread and the mutex state ' \\\n 'is set to non-signaled. If the mutex was protecting persistent ' \\\n 'state information, you should check it for consistency.'\n elif ret == 0x00000102: # WAIT_TIMEOUT\n msg = \"The timeout interval elapsed after {} second(s) and the \" \\\n \"object's state is non-signaled.\".format(timeout)\n else:\n msg = 'Unknown return value 0x{:x}'.format(ret)\n\n raise WindowsError('WaitForSingleObject: ' + msg)\n","sub_path":"msl/io/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":27679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"431552428","text":"import kucoin.client as kuclinet\nfrom .api_creds import *\n\norder_ids=[]\n\ndef order(side, quantity, symbol):\n client = kuclinet.Client(KU_API_PUBLIC, KU_API_SECRET, KU_PASSPHRASE)\n fixed_symbol = eth_tick_fix(symbol)\n try:\n print('\\nSending order!')\n order = client.create_market_order(fixed_symbol, side, quantity)\n print(order)\n order_ids.append(order)\n except Exception as e:\n print('Failed to place order')\n print(e.__cause__)\n return (False, str(e.__cause__))\n return (True, order)\n\ndef eth_tick_fix(eth_ticker):\n if 'ETH' in eth_ticker:\n return 'ETH-USDT'\n return 'WRONG'\n\nif __name__=='__main__':\n order('buy', .01, 'ETH-USDT' )\n","sub_path":"botbu/chalicelib/test_order.py","file_name":"test_order.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"223075289","text":"import json\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom pandas import json_normalize\nimport seaborn as sns\nfrom wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\nimport folium\nfrom folium import plugins\nfrom folium.plugins import HeatMap\nfrom PIL import Image\nimport scipy.stats as stats\n\n\ndef make_ax_bar(loc_dict, col):\n fig, ax = plt.subplots()\n for df, loc in loc_dict.items():\n ax.bar(loc, all_df[all_df[\"location\"] == df][col].mean())\n plt.xticks(rotation=45)\n return ax\n\n\ndef make_ax_difficulty_bar(color_dict, col):\n fig, ax = plt.subplots()\n for color, code in color_dict.items():\n ax.bar(color, all_df[all_df[\"difficulty\"] == code[0]][col].mean(),\n color=code[1])\n plt.xticks(rotation=45)\n return ax\n\n\ndef load_trail_df_from_file(filename, location_name):\n with open(filename) as data_file:\n data = json.load(data_file)\n data_trails = data['trails']\n data_df = json_normalize(data_trails)\n data_df[\"location\"] = location_name\n return data_df\n\n\ndef update_map(color):\n return folium.CircleMarker([row['latitude'], row['longitude']],\n radius=15,\n popup=row['name'],\n fill_color=color, # divvy color\n ).add_to(m)\n\n\nif __name__ == \"__main__\":\n plt.rcParams.update({'font.size': 16})\n\n # Denver\n denver_file = '../data/denver.json'\n denver_df = load_trail_df_from_file(denver_file, \"denver\")\n\n # Park City\n park_city_file = '../data/parkcity.json'\n park_city_df = load_trail_df_from_file(park_city_file, \"park_city\")\n\n # Moab\n moab_file = '../data/moab.json'\n moab_df = load_trail_df_from_file(moab_file, \"moab\")\n\n # Sedona\n sedona_file = '../data/sedona.json'\n sedona_df = load_trail_df_from_file(sedona_file, \"sedona\")\n\n # Marin County\n marin_county_file = '../data/marincounty.json'\n marin_county_df = load_trail_df_from_file(marin_county_file,\n \"marin_county\")\n\n # Crested Butte\n crested_butte_file = '../data/crestedbutte.json'\n crested_butte_df = load_trail_df_from_file(crested_butte_file,\n \"crested_butte\")\n\n loc_dict = {\"denver\": \"Denver\",\n \"crested_butte\": \"Crested Butte\",\n \"marin_county\": \"Marin County\",\n \"sedona\": \"Sedona\",\n \"park_city\": \"Park City\",\n \"moab\": \"Moab\"}\n\n color_dict = {\"Green\": [\"green\", \"green\"],\n \"Green Blue\": [\"greenBlue\", \"#0d98ba\"],\n \"Blue\": [\"blue\", \"blue\"],\n \"Blue Black\": [\"blueBlack\", \"#003366\"],\n \"Black\": [\"black\", \"black\"]}\n\n # MTB_Trail_Data_EDA\n\n all_df = pd.concat([crested_butte_df, marin_county_df,\n denver_df, park_city_df, sedona_df, moab_df])\n\n\n cb_ids = set(crested_butte_df['id'])\n\n mc_ids = set(marin_county_df['id'])\n den_ids = set(denver_df['id'])\n pc_ids = set(park_city_df['id'])\n sed_ids = set(sedona_df['id'])\n moab_ids = set(moab_df['id'])\n\n for val in crested_butte_df['id']:\n print(f'mtbproject.com/trail/{val}')\n\n text = \" \".join(review for review in all_df.summary)\n print(\"There are {} words in the combination of all review.\"\n .format(len(text)))\n\n stopwords = set(STOPWORDS)\n stopwords.update([\"This ,\", \"An \"])\n\n bike_mask = np.array(Image.open(\"../images/wordcloud_bike.png\"))\n bike_mask\n\n bike_mask[bike_mask == 0] = 255\n\n # Create a word cloud image\n wc = WordCloud(background_color=\"white\", max_words=1000, mask=bike_mask,\n stopwords=stopwords, contour_width=3)\n\n # Generate a wordcloud\n wc.generate(text)\n\n # show\n # plt.figure(figsize=[20,10])\n # plt.imshow(wc, interpolation='bilinear')\n # plt.axis(\"off\")\n # plt.savefig(\"../images/wordcloud_bike_after.png\")\n # plt.show()\n\n # all_df.corr(method ='pearson')\n\n m = folium.Map(\n location=[38.8697, -106.9878],\n zoom_start=8,\n tiles='Stamen Terrain'\n )\n\n for index, row in crested_butte_df.iterrows():\n if row['difficulty'] == 'black':\n update_map(\"#000000\")\n elif row['difficulty'] == 'blue':\n update_map(\"#0000FF\")\n elif row['difficulty'] == 'green':\n update_map(\"#008000\")\n elif row['difficulty'] == 'blueBlack':\n update_map(\"#003366\")\n elif row['difficulty'] == 'greenBlue':\n update_map(\"#00DDDD\")\n elif row['difficulty'] == 'dblack':\n update_map(\"#000000\")\n\n # convert to (n, 2) nd-array format for heatmap\n stationArr = crested_butte_df[['latitude', 'longitude']].to_numpy()\n\n # plot heatmap\n m.add_children(plugins.HeatMap(stationArr, radius=15))\n # m\n m.save(\"../images/crested_butte_locations2.html\")\n\n # all_df.to_csv(\"../data/all_data.csv\")\n\n # fig, ax = plt.subplots()\n\n # Mean Ascent Per Trail\n ax = make_ax_bar(loc_dict, \"ascent\")\n ax.set_xlabel('Location')\n ax.set_ylabel('Mean Ascent Per Trail')\n ax.set_title('Mean Ascent Per Trail by Location')\n plt.tight_layout()\n # plt.savefig(\"../images/ascent_per_trail.png\")\n # plt.show()\n\n # Mean Length Per Trail Plot\n ax = make_ax_bar(loc_dict, \"length\")\n ax.set_xlabel('Location')\n ax.set_ylabel('Mean Length Per Trail')\n ax.set_title('Mean Length Per Trail by Location')\n plt.xticks(rotation=45)\n plt.tight_layout()\n # plt.savefig(\"../images/length_per_trail.png\")\n # plt.show()\n\n all_df[\"ascent_per_trail\"] = all_df[\"ascent\"] / all_df[\"length\"]\n all_df[\"descent_per_trail\"] = abs(all_df[\"descent\"] / all_df[\"length\"])\n\n # print(all_df[\"ascent_per_trail\"].mean())\n\n # Mean Ascent Per Mile by Location\n ax = make_ax_bar(loc_dict, \"ascent_per_trail\")\n ax.set_xlabel('Location')\n ax.set_ylabel('Mean Ascent Per Mile Per Trail')\n ax.set_title('Mean Ascent Per Mile by Location')\n plt.xticks(rotation=45)\n plt.tight_layout()\n # plt.savefig(\"../images/ascent_per_mile.png\")\n # plt.show()\n\n # Mean Descent Per Trail by Location\n ax = make_ax_bar(loc_dict, \"descent\")\n ax.set_xlabel('Location')\n ax.set_ylabel('Mean Descent Per Trail')\n ax.set_title('Mean Descent Per Trail by Location')\n plt.xticks(rotation=45)\n plt.tight_layout()\n # plt.savefig(\"../images/descent_per_trail.png\")\n # plt.show()\n\n # Mean Descent per Mile by Location\n ax = make_ax_bar(loc_dict, \"descent_per_trail\")\n ax.set_xlabel('Location')\n ax.set_ylabel('Mean Descent Per Mile Per Trail')\n ax.set_title('Mean Descent Per Mile by Location')\n plt.xticks(rotation=45)\n plt.tight_layout()\n # plt.savefig(\"../images/descent_per_mile.png\")\n # plt.show()\n\n # Difficulty\n # Ascent Per Mile by Difficulty\n ax = make_ax_difficulty_bar(color_dict, \"ascent_per_trail\")\n ax.bar(\"Double Black\", all_df[all_df[\"difficulty\"] == \"dblack\"]\n [\"ascent_per_trail\"].mean(),\n color=\"white\",\n hatch='*',\n edgecolor=\"black\")\n\n ax.set_xlabel('Difficulty')\n ax.set_ylabel('Mean Ascent Per Mile Per Trail')\n ax.set_title('Mean Ascent Per Mile by Difficulty')\n plt.xticks(rotation=45)\n plt.tight_layout()\n # plt.savefig(\"../images/apm_by_difficulty.png\")\n # plt.show()\n\n # Descent Per Mile by Difficulty\n ax = make_ax_difficulty_bar(color_dict, \"descent_per_trail\")\n ax.bar(\"Double Black\", all_df[all_df[\"difficulty\"] == \"dblack\"]\n [\"descent_per_trail\"].mean(),\n color=\"white\",\n hatch='*',\n edgecolor=\"black\")\n\n ax.set_xlabel('Difficulty')\n ax.set_ylabel('Descent/Mile (ft)')\n ax.set_title('Mean Descent/Mile by Difficulty')\n plt.xticks(rotation=45)\n plt.tight_layout()\n # plt.savefig(\"../images/dpm_by_difficulty.png\")\n # plt.show()\n\n # Stars by Difficulty\n ax = make_ax_difficulty_bar(color_dict, \"stars\")\n ax.bar(\"Double Black\", all_df[all_df[\"difficulty\"] == \"dblack\"]\n [\"stars\"].mean(),\n color=\"white\",\n hatch='*',\n edgecolor=\"black\")\n ax.set_xlabel('Difficulty')\n ax.set_ylabel('Mean Stars')\n ax.set_title('Mean Stars by Difficulty')\n plt.xticks(rotation=45)\n plt.tight_layout()\n # plt.savefig(\"../images/stars_by_difficulty.png\")\n # plt.show()\n\n print(all_df.columns)\n\n\n","sub_path":"src/data_pipeline.py","file_name":"data_pipeline.py","file_ext":"py","file_size_in_byte":8493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"487193409","text":"\"\"\" Test of SED-ML utilities\n\n:Author: Jonathan Karr \n:Date: 2020-03-20\n:Copyright: 2020, Center for Reproducible Biomedical Modeling\n:License: MIT\n\"\"\"\n\nfrom Biosimulations_utils.chart.data_model import Chart, ChartDataField, ChartDataFieldShape, ChartDataFieldType\nfrom Biosimulations_utils.data_model import OntologyTerm, RemoteFile\nfrom Biosimulations_utils.biomodel import read_biomodel\nfrom Biosimulations_utils.biomodel.data_model import Biomodel, BiomodelVariable, BiomodelFormat\nfrom Biosimulations_utils.simulation import write_simulation, read_simulation, sedml\nfrom Biosimulations_utils.simulation.core import SimulationIoError, SimulationIoWarning\nfrom Biosimulations_utils.simulation.data_model import SimulationFormat, TimecourseSimulation, SimulationResult\nfrom Biosimulations_utils.simulation.sedml import modify_xml_model_for_simulation\nfrom Biosimulations_utils.visualization.data_model import Visualization, VisualizationLayoutElement, VisualizationDataField\nimport json\nimport libsedml\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\n\nclass WriteSedMlTestCase(unittest.TestCase):\n def setUp(self):\n self.dirname = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.dirname)\n\n def test_gen_sedml(self):\n with open('tests/fixtures/simulation.json', 'rb') as file:\n sim = TimecourseSimulation.from_json(json.load(file))\n sim.model = Biomodel(\n id='sbml_model',\n name='SBML model',\n file=RemoteFile(\n name=os.path.join(self.dirname, 'model.sbml.xml'),\n type='application/sbml+xml',\n ),\n format=BiomodelFormat.sbml.value,\n variables=[\n BiomodelVariable(id='species_1', target=\"/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='species_1']\"),\n BiomodelVariable(id='species_2', target=\"/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='species_2']\"),\n ],\n )\n sim.model.format.version = 'L1V3'\n sim_filename = os.path.join(self.dirname, 'simulation.sedml')\n write_simulation(sim, sim_filename, SimulationFormat.sedml, level=1, version=3)\n\n sims_2, _ = read_simulation(\n sim_filename, SimulationFormat.sedml)\n self.assertEqual(len(sims_2), 1)\n sim_2 = sims_2[0]\n self.assertEqual(sim_2.id, sim.id)\n self.assertEqual(sim_2.format.version, 'L1V3')\n self.assertEqual(sim_2.format, sim.format)\n self.assertEqual(sim_2.name, sim.name)\n self.assertEqual(sim_2.model.id, sim.model.id)\n self.assertEqual(sim_2.model.name, sim.model.name)\n self.assertEqual(\n set(v.id for v in sim_2.model.variables),\n set(v.id for v in sim.model.variables))\n self.assertEqual(sim_2.model.file.name, sim.model.file.name)\n self.assertEqual(sim_2.algorithm.id, sim.algorithm.id)\n self.assertEqual(sim_2.algorithm, sim.algorithm)\n self.assertEqual(sim_2.created, sim.created)\n self.assertEqual(sim_2.updated, sim.updated)\n self.assertEqual(sim_2, sim)\n\n with self.assertRaisesRegex(NotImplementedError, 'not supported'):\n read_simulation(None, SimulationFormat.sessl)\n\n def test_gen_sedml_errors(self):\n # Other versions/levels of SED-ML are not supported\n sim = TimecourseSimulation(\n model=Biomodel(\n format=BiomodelFormat.sbml.value,\n ),\n format=SimulationFormat.sedml.value,\n )\n with self.assertRaisesRegex(ValueError, 'Format must be SED-ML'):\n write_simulation(sim, None, format=SimulationFormat.sedml, level=1, version=1)\n\n # other simulation experiments formats (e.g., SESSL) are not supported\n sim = TimecourseSimulation(\n model=Biomodel(\n format=BiomodelFormat.sbml.value,\n ),\n format=SimulationFormat.sessl.value,\n )\n with self.assertRaisesRegex(NotImplementedError, 'is not supported'):\n write_simulation(sim, None, SimulationFormat.sessl, level=1, version=3)\n with self.assertRaisesRegex(ValueError, 'Format must be SED-ML'):\n write_simulation(sim, None, SimulationFormat.sedml, level=1, version=3)\n\n def test__get_obj_annotation(self):\n reader = sedml.SedMlSimulationReader()\n\n doc = libsedml.SedDocument()\n self.assertEqual(reader._get_obj_annotation(doc), [])\n\n doc.setAnnotation('')\n self.assertEqual(reader._get_obj_annotation(doc), [])\n\n doc.setAnnotation('')\n self.assertEqual(reader._get_obj_annotation(doc), [])\n\n doc.setAnnotation(\n ''\n ''\n '')\n self.assertEqual(reader._get_obj_annotation(doc), [])\n\n def test__call_sedml_error(self):\n doc = libsedml.SedDocument()\n with self.assertRaisesRegex(ValueError, 'libsedml error:'):\n sedml.SedMlSimulationWriter._call_libsedml_method(doc, doc, 'setAnnotation', '= 0:\n return 'Es positivo'\n else:\n return 'Es negativo'\n\nprint(signoNumero(miNumero))\n\n\n## 8. Pedir por teclado dos números y mostrar por pantalla uno de los dos \n# mensajes siguientes en función de los números leídos: \n#\n# a) El primer número es mayor que el segundo ; \n# b) El primer número es menor que el segundo ; \n# c) Los dos números son iguales .\n\nnumeroA = input('Introduzca un número')\nnumeroB = input('Introduzca un segundo número')\n\n\ndef relacionOrden(numeroA, numeroB):\n\n if float(numeroA) > float(numeroB):\n return 'El primer número ' + str(numeroA) + ' es mayor que el segundo ' + str(numeroB)\n\n elif float(numeroA) < float(numeroB):\n return 'El primer número ' + str(numeroA) + ' es menor que el segundo ' + str(numeroB)\n\n else:\n return 'Los dos números son iguales ' + str(numeroA)\n\n\nprint(relacionOrden(numeroA, numeroB))\n\n\n## 9. Pedir por teclado dos números enteros y mostrar por pantalla la suma de los dos números \n# solamente si son los dos positivos. \n# Si no se cumple que los dos números sean positivos, mostrar un mensaje \n# indicándolo. \n#\n# La salida ha de tener el siguiente formato: \n# 'La suma de los dos números es: XX' o 'No se calcula la suma porque alguno de \n# los números o los dos no son positivos'.\n\nnumeroA = int(input('Introduzca un número entero'))\nnumeroB = int(input('Introduzca otro número entero'))\n\n\ndef suma(numeroA, numeroB):\n\n if numeroA < 0 or numeroB < 0:\n return 'No se calcula la suma porque alguno de los números o los dos no son positivos'\n else:\n return 'La suma de los dos números es: ' + str(numeroA + numeroB)\n \n\nprint(suma(numeroA, numeroB))\n\n\n## 10. Variante ejercicio anterior.\n# Mostrar por pantalla algo distinto para cada una de las situaciones que se puedan producir, \n# utilizando los siguientes mensajes:\n#\n# a. 'No se calcula la suma porque el primer número es negativo'\n# b. 'No se calcula la suma porque el segundo número es negativo'\n# c. 'No se calcula la suma porque los dos números son negativos'\n\nnumeroA = int(input('Introduzca un número entero'))\nnumeroB = int(input('Introduzca otro número entero'))\n\n\ndef suma(numeroA, numeroB):\n\n if numeroA < 0 and numeroB < 0:\n return 'No se calcula la suma porque los dos números son negativos'\n\n elif numeroA < 0:\n return 'No se calcula la suma porque el primer número es negativo'\n\n elif numeroB < 0:\n return 'No se calcula la suma porque el segundo número es negativo'\n\n else:\n return 'La suma de los dos números es: ' + str(numeroA + numeroB)\n \n\nprint(suma(numeroA, numeroB))\n\n\n## 11. Pedir por teclado tres valores de tipo entero. \n#\n# Calcular si se cumple que la suma de dos de ellos es igual al tercero. \n#\n# La salida del programa tiene que tener el formato: \n# 'Números introducidos: \t (tabulador)'\n#\n# Y una de las cuatro líneas de salida siguientes:\n#\n# 'Se cumple que N1 = N2 + N3'\n# 'Se cumple que N2 = N1 + N3'\n# 'Se cumple que N3 = N1 + N2'\n# 'Los números no se relacionan por suma y resultado'\n\nnumeroA = int(input('Introduzca el primer número entero'))\nnumeroB = int(input('Introduzca el segundo número entero'))\nnumeroC = int(input('Introduzca el tercer número entero'))\n\nprint('Números introducidos: ' + str(numeroA) + ' ' + str(numeroB) + ' ' + str(numeroC) + '\\t')\n\n\ndef relacionSumaResultado(N1, N2, N3):\n\n if N1 == N2 + N3:\n return 'Se cumple que ' + str(N1) + ' ' + '= ' + str(N2) + ' ' + '+ ' + str(N3)\n\n elif N2 == N1 + N3:\n return 'Se cumple que ' + str(N2) + ' ' + '= ' + str(N1) + ' ' + '+ ' + str(N3)\n\n elif N3 == N1 + N2:\n return 'Se cumple que ' + str(N3) + ' ' + '= ' + str(N1) + ' ' + '+ ' + str(N2)\n else:\n return 'Los números no se relacionan por suma y resultado'\n\n\nprint(relacionSumaResultado(numeroA, numeroB, numeroC))\n\n\n## 12. Pedir por teclado dos números. \n# Calcular y mostrar la suma solamente si:\n#\n# a. los dos son pares \n# b. el primero es menor que cincuenta \n# c. y el segundo está dentro del intervalo cerrado 100-500. \n# En el caso de que no se cumplan las condiciones, en vez de la suma ha de visualizarse un \n# mensaje de error.\n\nnumeroA = float(input('Introduzca el primer número'))\nnumeroB = float(input('Introduzca el segundo número'))\n\n\ndef sonPares(numeroA, numeroB):\n\n if numeroA % 2 == 0 and numeroB % 2 == 0:\n return True\n else:\n return False\n\n\ndef mayorQueCincuenta(numero):\n\n if numero > 50:\n return True\n else:\n return False\n\n\ndef entreCienYQuinientos(numero):\n\n if numero >= 100 and numero <= 500:\n return True\n else:\n return False\n\n\ndef sumaEnCondiciones(numeroA, numeroB):\n\n if sonPares(numeroA, numeroB) and mayorQueCincuenta(numeroA) and (entreCienYQuinientos(numeroB)):\n\n return numeroA + numeroB\n\n elif not sonPares(numeroA, numeroB):\n return 'Almenos uno de los números no es par'\n\n elif not mayorQueCincuenta(numeroA):\n return 'El primer número no es mayor que 50'\n\n elif not entreCienYQuinientos(numeroB):\n return 'El segundo número no está entre 100 y 500'\n\n\nprint(sumaEnCondiciones(numeroA, numeroB))\n\n\n## 13. Calcular el importe final de una venta considerando que sobre el valor bruto se hace un \n# descuento según la siguiente tabla:\n\n# a. Valores <=20 implican un descuento del 0%\n# b. Valores >20 y <=100 implican un descuento descuento del 5%\n# c. Valores >100 implican un descuento 10%\n\n\ndef descuento(valorBruto):\n\n if valorBruto <= 20:\n return 0\n \n elif valorBruto > 20 and valorBruto <= 100:\n return (valorBruto * 5) / 100\n \n else:\n return (valorBruto * 10) / 100\n\n\ndef importeFinal(valorBruto):\n return valorBruto - descuento(valorBruto)\n\n\n\n\n## CASOS TEST\nif __name__ == '__main__':\n \n # Ejercicio 1\n if isinstance(numeroEscrito, int):\n pass\n else:\n print('Los datos proporcionados son incorrectos')\n\n assert dobleNumeroEntero(2) == 4\n assert dobleNumeroEntero(8) == 16\n assert dobleNumeroEntero(50) == 100\n assert dobleNumeroEntero(3560) == 7120\n\n assert tripleNumeroEntero(2) == 6\n assert tripleNumeroEntero(8) == 24\n assert tripleNumeroEntero(50) == 150\n assert tripleNumeroEntero(3560) == 10680\n\n\n # Ejercicio 2\n if isinstance(miRadio, int) or isinstance(miRadio, float):\n pass\n else:\n print('Los datos proporcionados son incorrectos')\n \n assert longitudCircumferencia(2) == 12.56\n assert longitudCircumferencia(3.6) == 22.61\n assert longitudCircumferencia(11) == 69.08\n assert longitudCircumferencia(45.0) == 282.6\n\n assert areaCirculo(2) == 12.56\n assert areaCirculo(3.6) == 40.69\n assert areaCirculo(11) == 379.94\n assert areaCirculo(45.0) == 6358.5\n\n\n # Ejercicio 3\n if isinstance(miRadio, int) or isinstance(miRadio, float):\n pass\n else:\n print('Los datos proporcionados son incorrectos')\n \n assert longitudCircumferencia(2) == 12.56\n assert longitudCircumferencia(3.6) == 22.61\n assert longitudCircumferencia(11) == 69.08\n assert longitudCircumferencia(45.0) == 282.6\n\n assert areaCirculo(2) == 12.56\n assert areaCirculo(3.6) == 40.69\n assert areaCirculo(11) == 379.94\n assert areaCirculo(45.0) == 6358.5\n\n\n # Ejercicio 4\n assert areaRectangulo(2, 4) == 8\n assert areaRectangulo(2.0, 4.0) == 8.0\n assert areaRectangulo(5, 12) == 60\n assert areaRectangulo(15.4, 50.2) == 773.08\n\n assert perimetroRectangulo(2, 4) == 12\n assert perimetroRectangulo(2.0, 4.0) == 12.0\n assert perimetroRectangulo(5, 12) == 34\n assert perimetroRectangulo(15.4, 50.2) == 131.2\n\n\n # Ejercicio 5\n assert sueldoAPercibir(1000) == 800\n assert sueldoAPercibir(1200.0) == 960.0\n assert sueldoAPercibir(560) == 448\n assert sueldoAPercibir(978.5) == 782.8\n\n\n # Ejercicio 7\n assert signoNumero(8) == 'Es positivo'\n assert signoNumero(5.5) == 'Es positivo'\n assert signoNumero(0) == 'Es positivo'\n assert signoNumero(-5) == 'Es negativo'\n assert signoNumero(-12.8) == 'Es negativo'\n\n\n # Ejercicio 8\n assert relacionOrden(6, 7) == 'El primer número 6 es menor que el segundo 7'\n assert relacionOrden(10.5, 10.0) == 'El primer número 10.5 es mayor que el segundo 10.0'\n assert relacionOrden(37, 37) == 'Los dos números son iguales 37'\n assert relacionOrden(120.0, 120) == 'Los dos números son iguales 120.0'\n\n\n # Ejercicio 9\n assert suma(2, 4) == 'La suma de los dos números es: 6'\n assert suma(20, 35) == 'La suma de los dos números es: 55'\n assert suma(-8, 16) == 'No se calcula la suma porque alguno de los números o los dos no son positivos'\n assert suma(-47, -1) == 'No se calcula la suma porque alguno de los números o los dos no son positivos'\n\n\n # Ejercicio 10\n assert suma(2, 4) == 'La suma de los dos números es: 6'\n assert suma(-20, 35) == 'No se calcula la suma porque el primer número es negativo'\n assert suma(8, -16) == 'No se calcula la suma porque el segundo número es negativo'\n assert suma(-47, -1) == 'No se calcula la suma porque los dos números son negativos'\n\n\n # Ejercicio 11\n assert relacionSumaResultado(2, 3, 5) == 'Se cumple que 5 = 2 + 3'\n assert relacionSumaResultado(4, 12, 8) == 'Se cumple que 12 = 4 + 8'\n assert relacionSumaResultado(25, 11, 14) == 'Se cumple que 25 = 11 + 14'\n assert relacionSumaResultado(6, 2, 3) == 'Los números no se relacionan por suma y resultado'\n\n\n # Ejercicio 12\n assert sumaEnCondiciones(2, 3) == 'Almenos uno de los números no es par'\n assert sumaEnCondiciones(6, 12) == 'El primer número no es mayor que 50'\n assert sumaEnCondiciones(54, 90) == 'El segundo número no está entre 100 y 500'\n assert sumaEnCondiciones(60, 300) == 360\n\n\n # Ejercicio 13\n assert importeFinal(19) == 19\n assert importeFinal(50) == 47.5\n assert importeFinal(780) == 702.0\n","sub_path":"ejerciciosIniciacion_1-13.py","file_name":"ejerciciosIniciacion_1-13.py","file_ext":"py","file_size_in_byte":12493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"366755304","text":"import sqlite3\nimport random\n\nfrom sqlite3 import Error\n\ncreate_table_players_q = \"\"\" CREATE TABLE IF NOT EXISTS players (\n id integer PRIMARY KEY,\n room text ); \"\"\"\ncreate_table_words_q = \"\"\" CREATE TABLE IF NOT EXISTS words (\n word text,\n author integer,\n room text,\n used integer ); \"\"\"\nadd_word_q = \"\"\" INSERT INTO words(word, author, room, used) VALUES(\"{}\", {}, \"{}\", 0);\"\"\"\nget_word_q = \"\"\" SELECT word FROM words WHERE room=\"{}\" AND used=0; \"\"\"\nmark_word_used_q = \"\"\" UPDATE words\n SET used=1\n WHERE (word=\"{}\" AND room=\"{}\");\"\"\"\n\nfind_unused_word_q = \"\"\" SELECT word FROM words WHERE word=\"{}\" AND used=0 AND room=\"{}\"; \"\"\"\nnum_words_in_hat_q = \"\"\" SELECT COUNT(word) as num FROM words WHERE used=0 AND room=\"{}\"; \"\"\"\nadd_player_q = \"\"\" INSERT INTO players(id, room) VALUES({}, \"{}\");\"\"\"\nfind_player_room_q = \"\"\" SELECT room FROM players WHERE id={};\"\"\"\nremove_player_room_q = \"\"\" DELETE FROM players\n WHERE (id={});\"\"\"\nroom_count_q = \"\"\" SELECT COUNT(id) FROM players WHERE room=\"{}\";\"\"\"\n\n\ndef create_connection(db_file):\n \"\"\" create a database connection to a SQLite database \"\"\"\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except Error as e:\n print(e)\n return conn\n\n\ndef execute_sql(db_file, sql):\n conn = create_connection(db_file)\n try:\n c = conn.cursor()\n c.execute(sql)\n conn.commit()\n conn.close()\n return True\n except Error as e:\n print(e)\n return False\n\n\ndef execute_sql_select(db_file, sql):\n conn = create_connection(db_file)\n try:\n c = conn.cursor()\n c.execute(sql)\n ans = c.fetchall()\n c.close()\n return ans\n except Error as e:\n print(e)\n\n\ndef check_word(word):\n def check_rus(word):\n for c in word:\n if not (('а' <= c <= 'я') or c == '-' or c == 'ё'):\n return False\n return True\n\n def check_en(word):\n for c in word:\n if not (('a' <= c <= 'z') or c == '-'):\n return False\n return True\n\n if not len(word):\n return False\n\n return check_rus(word) or check_en(word)\n\n\nclass Hat:\n def __init__(self, db_file):\n self.db_file = db_file\n execute_sql(self.db_file, create_table_words_q)\n\n def add_word(self, word, player_id, room):\n word = word.lower()\n if not check_word(word):\n return False\n else:\n query = find_unused_word_q.format(word, room)\n res = execute_sql_select(self.db_file, query)\n if res:\n return False\n query = add_word_q.format(word, player_id, room)\n return execute_sql(self.db_file, query)\n\n def get_word(self, room):\n words = execute_sql_select(self.db_file, get_word_q.format(room))\n if not words:\n return None\n word = random.choice(words)[0]\n if execute_sql(self.db_file, mark_word_used_q.format(word, room)):\n return word\n else:\n return \"Не удалось вернуть слово в шляпу\"\n\n def remove_word(self, word, room):\n query = find_unused_word_q.format(word, room)\n res = execute_sql_select(self.db_file, query)\n if not res:\n return False\n status = execute_sql(self.db_file, mark_word_used_q.format(word, room))\n return status\n\n def words_in_hat(self, room):\n words_num = execute_sql_select(self.db_file, num_words_in_hat_q.format(room))\n if not words_num:\n return \"\"\n return words_num[0][0]\n\n\nclass HatWrapper:\n def __init__(self, room, hat):\n self.room = room\n self.hat = hat\n\n def get_word(self):\n word = self.hat.get_word(self.room)\n if not word:\n return None\n return word\n\n def add_word(self, word, player):\n return self.hat.add_word(word, player, self.room)\n\n\nclass Game:\n def __init__(self, db_file):\n self.db_file = db_file\n execute_sql(self.db_file, create_table_players_q)\n\n def add_player(self, player_id, room):\n execute_sql(self.db_file, add_player_q.format(player_id, room))\n\n def leave_room(self, player_id):\n execute_sql(self.db_file, remove_player_room_q.format(player_id))\n\n def room_for_player(self, player_id):\n rooms = execute_sql_select(self.db_file, find_player_room_q.format(player_id))\n if rooms:\n return rooms[0][0]\n else:\n return None\n\n def room_size(self, room):\n rooms = execute_sql_select(self.db_file, room_count_q.format(room))\n if rooms:\n return rooms[0][0]\n else:\n return None\n\n\ndef start_game(db_file):\n hat = Hat(db_file)\n game = Game(db_file)\n return hat, game\n\n\nif __name__ == '__main__':\n hat, game = start_game(\"test165.db\")\n assert game.room_for_player(1) is None\n game.add_player(1, \"room1\")\n game.add_player(3, \"room2\")\n game.add_player(2, \"room1\")\n game.add_player(4, \"room2\")\n assert game.room_size(\"room1\") == 2\n assert game.room_size(\"room2\") == 2\n assert game.room_for_player(2) == \"room1\"\n assert hat.get_word(\"room1\") is None\n assert hat.add_word(\"первое\", 1, \"room1\")\n assert hat.add_word(\"второе\", 1, \"room1\")\n hat.words_in_hat(\"room1\")\n assert hat.add_word(\"треТье\", 1, \"room1\")\n assert not hat.remove_word(\"кусь\", \"room1\")\n assert hat.add_word(\"чеТвертое\", 1, \"room1\")\n assert hat.add_word(\"пятое\", 1, \"room1\")\n assert hat.remove_word(\"пятое\", \"room1\")\n assert not hat.add_word(\"djfkjsd\", 1, \"room1\")\n assert hat.get_word(\"room1\")\n assert hat.get_word(\"room1\")\n assert hat.get_word(\"room1\")\n assert hat.get_word(\"room1\")\n assert not hat.get_word(\"room1\")\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":6055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"208086599","text":"##\n## Example 1:\n## ==========\n\nfrom mpl_toolkits.mplot3d import axes3d\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\n\nfig = plt.figure()\nax = fig.gca(projection = '3d')\nX, Y, Z = axes3d.get_test_data(0.05)\ncset = ax.contourf(X, Y, Z, cmap = cm.coolwarm)\nax.clabel(cset, fontsize = 9, inline = 1)\n\nplt.title('Filled Countour Plots 3D example 1')\nplt.savefig('Filled_Countour_Plots_3D_example1.png')\nplt.show()\n\n##\n## Example 2:\n## ==========\n\nfig = plt.figure()\nax = fig.gca(projection = '3d')\nX, Y, Z = axes3d.get_test_data(0.05)\nax.plot_surface(X, Y, Z, rstride = 8, cstride = 8, alpha = 0.3)\ncset = ax.contourf(X, Y, Z, zdir = 'z', offset = -100, cmap = cm.coolwarm)\ncset = ax.contourf(X, Y, Z, zdir = 'x', offset = -40, cmap = cm.coolwarm)\ncset = ax.contourf(X, Y, Z, zdir = 'y', offset = 40, cmap = cm.coolwarm)\n\nax.set_xlabel('X')\nax.set_xlim(-40, 40)\nax.set_ylabel('Y')\nax.set_ylim(-40, 40)\nax.set_zlabel('Z')\nax.set_zlim(-100, 100)\n\nplt.title('Filled Countour Plots 3D example 2')\nplt.savefig('Filled_Countour_Plots_3D_example2.png')\nplt.show()\n\n\n# source : https://matplotlib.org/mpl_toolkits/mplot3d/tutorial.html","sub_path":"Graphics/Pyplot/Filled_Countour_Plots_3D.py","file_name":"Filled_Countour_Plots_3D.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"309782085","text":"from math import *\nfrom numpy import *\nfrom random import uniform\n\n\ndef get_initial_theta():\n l1_in = 3\n l1_out = 2\n epsilon1 = sqrt(6) / sqrt(l1_in + l1_out)\n\n l2_in = 3\n l2_out = 1\n epsilon2 = sqrt(6) / sqrt(l2_in + l2_out)\n\n theta1 = zeros((2, 3))\n theta2 = zeros((1, 3))\n\n for i in range(theta1.shape[0]):\n for j in range(theta1.shape[1]):\n theta1[i, j] = uniform(-epsilon1, epsilon1)\n\n for i in range(theta2.shape[0]):\n for j in range(theta2.shape[1]):\n theta2[i, j] = uniform(-epsilon2, epsilon2)\n\n return theta1, theta2\n","sub_path":"initial_theta.py","file_name":"initial_theta.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"59805104","text":"# Lambda or Anonymous Functions in Python\n\"\"\"\ndef add(a,b):\n return a+b\nthis function calculates sum of two numbers as arguments\nthis function's shortcut form is\n\"\"\"\na = int(input(\"Enter A Number\"))\nb = int(input(\"Enter A Number\"))\nsum = lambda a, b: a+b\n# This is a lambda function(a function sum is created which take two arguments (a & b) and return a+b\nprint(\"Sum =\", sum(a, b))\n\na = [[1, 14], [8, 12], [0, 5]] # This is a list of lists i.e. a list inside a list\n\n\ndef a_1stindex(n):\n return n[1]\n\n\na.sort(key = a_1stindex) # key takes a function as its value\n# Sorting is performed in a_1stindex(a) and then the sorted array is displayed as a\nprint(\"List of list sorted in order of its first element is\", a)\n\n# Above code using lambda function is\na = [[1, 14], [8, 12], [0, 5]]\na.sort(key=lambda n: n[0]) # Refer first lambda function to understand working of lambda functions\n# Sorting is performed in lambda a : a[0] and then the sorted array is printed as a\nprint(\"List of list sorted in order of its first element is\", a)\n\n# Now we will study sort function(an inbuilt python function\nl = [13, 2, 8, 101, 32, 11, 75, 23, 69]\nl.sort()\nprint(l)\n\n# To print list inn descending order (using reverse argument of sort method)\nl1 = [13, 2, 8, 101, 32, 11, 75, 23, 69]\nl.sort(reverse=True)\nprint(l1)\n\n# sort() has another parameter called key which takes a function and sorts the values that the function returns\n# For example:-\nsl = ['aaa', 'bb', 'cccc', 'dd']\nsl.sort(key=len)\n# Sorting will be performed in the array sl.length and the sorted array is displayed as sl\n\n# In the above code len is passed as a function so the list will be sorted on the basic of lenght rather than ASCII values\nprint(sl.len)\n\n\n\n\n","sub_path":"Lambda Functions In Python.py","file_name":"Lambda Functions In Python.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"588555195","text":"#!/usr/bin/env python\n\n# Copyright (c) 2016. Mount Sinai School of Medicine\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom __future__ import (\n print_function,\n division,\n absolute_import,\n)\nimport argparse\n\nfrom mhcflurry.common import (\n parse_int_list,\n split_uppercase_sequences,\n split_allele_names,\n)\nfrom mhcflurry.predict import predict\nfrom six import string_types\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\n \"--mhc\",\n default=\"HLA-A*02:01\",\n type=split_allele_names,\n help=\"Comma separated list of MHC alleles\")\n\nparser.add_argument(\n \"--sequence\",\n required=True,\n type=split_uppercase_sequences,\n help=\"Comma separated list of protein sequences\")\n\nparser.add_argument(\n \"--fasta-file\",\n help=\"FASTA file of protein sequences to chop up into peptides\")\n\nparser.add_argument(\n \"--peptide-lengths\",\n default=[9],\n type=parse_int_list,\n help=\"Comma separated list of peptide length, e.g. 8,9,10,11\")\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n if len(args.peptide_lengths) == 0:\n raise ValueError(\"Must specify at least one peptide length\")\n\n long_sequences = args.sequence\n if isinstance(long_sequences, string_types):\n long_sequences = [long_sequences]\n\n peptides = []\n for peptide_length in args.peptide_lengths:\n for long_sequence in long_sequences:\n total_length = len(long_sequence)\n for i in range(total_length - peptide_length):\n peptides.append(long_sequence[i:i + peptide_length])\n print(\"Running predictor over %d sub-sequences\" % len(peptides))\n df = predict(alleles=args.mhc, peptides=peptides)\n print(df.to_csv(sep=\"\\t\", index=False), end=\"\")\n","sub_path":"script/mhcflurry-predict-class1.py","file_name":"mhcflurry-predict-class1.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"238999894","text":"#!/usr/bin/env python3\n\nimport telebot\nfrom optparse import OptionParser\n\n\ndef message_for_study_comments():\n if options.website == '':\n msg = f'Внимание!\\n\\n' \\\n f'Пользователь: {options.name}\\n' \\\n f'К обучению: {options.subject}\\n\\n' \\\n f'Оставил коментарий: {options.message}\\n\\n' \\\n f'E-mail пользователя: {options.email}\\n' \\\n f'Cайт пользователя: У пользователя отсутствует сайт'\n else:\n msg = f'Внимание!\\n\\n' \\\n f'Пользователь: {options.name}\\n' \\\n f'К обучению: {options.subject}\\n\\n' \\\n f'Оставил коментарий: {options.message}\\n\\n' \\\n f'E-mail пользователя: {options.email}\\n' \\\n f'Cайт пользователя: {options.website}'\n return msg\n\n\ndef message_for_news_comments():\n if options.website == '':\n msg = f'Внимание!\\n\\n' \\\n f'Пользователь: {options.name}\\n' \\\n f'К новости: {options.subject}\\n\\n' \\\n f'Оставил коментарий: {options.message}\\n\\n' \\\n f'E-mail пользователя: {options.email}\\n' \\\n f'Cайт пользователя: У пользователя отсутствует сайт'\n else:\n msg = f'Внимание!\\n\\n' \\\n f'Пользователь: {options.name}\\n' \\\n f'К новости: {options.subject}\\n\\n' \\\n f'Оставил коментарий: {options.message}\\n\\n' \\\n f'E-mail пользователя: {options.email}\\n' \\\n f'Cайт пользователя: {options.website}'\n return msg\n\n\ndef message_for_question():\n msg = f'Внимание!\\n\\n' \\\n f'Пользователь: {options.name}\\n' \\\n f'К теме: {options.subject}\\n\\n' \\\n f'Написал сообщение: {options.message}\\n\\n' \\\n f'E-mail пользователя: {options.email}'\n return msg\n\n\nparser = OptionParser()\nparser.add_option('-n', '--name', dest='name',\n help='Name', metavar='NAME')\nparser.add_option('-e', '--email', dest='email',\n help='Email', metavar='EMAIL')\nparser.add_option('-s', '--subject', dest='subject',\n help='Subject', metavar='SUBJECT')\nparser.add_option('-m', '--message', dest='message',\n help='Message', metavar='MESSAGE')\nparser.add_option('-w', '--website', dest='website',\n help='Web-Site', metavar='WEBSITE')\nparser.add_option('-o', '--option', dest='option',\n help='comments or question', metavar='OPTION')\n\n(options, args) = parser.parse_args()\n\nwith open('bot_engine/api.txt', 'r') as apifile:\n bot = telebot.TeleBot(apifile.read())\n\nwith open('bot_engine/chat_id.txt', 'r') as chat_id:\n if options.option == 'news_comments':\n bot.send_message(chat_id=chat_id.read(), text=message_for_news_comments(), parse_mode='html')\n elif options.option == 'question':\n bot.send_message(chat_id=chat_id.read(), text=message_for_question(), parse_mode='html')\n elif options.option == 'study_comments':\n bot.send_message(chat_id=chat_id.read(), text=message_for_study_comments(), parse_mode='html')\n","sub_path":"bot_engine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"385231214","text":"import os\nimport yaml\nimport glob\nimport pickle\nimport operator\nimport requests\nimport shutil\nfrom collections import OrderedDict\nfrom typing import (Any, List, Optional)\n\nfrom gtmcore.gitlib import get_git_interface\nfrom gtmcore.logging import LMLogger\nfrom gtmcore.configuration import Configuration\n\nlogger = LMLogger.get_logger()\n\n\ndef repo_url_to_name(url: str) -> str:\n \"\"\"Method to generate a directory name from the repo URL for local storage\n\n Assumes URL of the form whatever/namespace/repo(.git)(@branch). SSH URLs will not work.\n\n Args:\n url(str): repository URL\n\n Returns:\n str\n \"\"\"\n if \"@\" in url:\n url, branch = url.split(\"@\")\n if url.endswith('.git'):\n url = url[:-4]\n _, namespace, repo = url.rsplit(\"/\", 2)\n return \"{}_{}\".format(namespace, repo)\n\n\nclass RepositoryManager(object):\n \"\"\"Class to manage local copies of Base Repositories\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"Constructor\n \"\"\"\n self.config = Configuration()\n self.local_repo_directory = os.path.expanduser(os.path.join(self.config.app_workdir,\n \".labmanager\", \"environment_repositories\"))\n self.git = get_git_interface(self.config.config['git'])\n\n @staticmethod\n def _internet_is_available() -> bool:\n \"\"\"Private method to check if the user can get to GitHub, since that is where the component repos are\n\n Returns:\n None\n \"\"\"\n # Create the directory to clone into\n try:\n requests.head('https://github.com', timeout=15)\n except requests.exceptions.ConnectionError:\n return False\n\n return True\n\n def _update_repo(self, location: str, branch: str) -> None:\n \"\"\"Private method to update a repository\n\n Args:\n location: the directory containing the repository\n branch: any refspec acceptable to git (default is all configured remote branches, which defaults to all\n branches with a regular clone, or just the initial branch with a single-branch clone)\n\n Returns:\n None\n \"\"\"\n # Set the gitlib to point to that directory\n self.git.set_working_directory(location)\n\n # Fetch the requested branch\n self.git.remote_set_branches([branch])\n self.git.fetch(refspec=branch)\n self.git.checkout(branch)\n # We do a reset instead of a merge because we don't want to retain local changes\n # If the branch was freshly checked out above, this is redundant, but it's just called once at service start-up\n self.git.reset(f'origin/{branch}')\n\n def update_repositories(self) -> bool:\n \"\"\"Method to update all repositories in the LabManager configuration file\n\n If the repositories do not exist, they are cloned\n\n Returns:\n bool: flag indicting if repos updated successfully\n \"\"\"\n if self._internet_is_available():\n # Get repo Urls\n repo_urls = self.config.config[\"environment\"][\"repo_url\"]\n\n for repo_url in repo_urls:\n repo_dir_name = repo_url_to_name(repo_url)\n full_repo_dir = os.path.join(self.local_repo_directory, repo_dir_name)\n\n # Get branch if encoded in URL\n branch = 'master'\n if \"@\" in repo_url:\n repo_url, branch = repo_url.split(\"@\")\n\n # Check if repo exists locally\n if not os.path.exists(full_repo_dir):\n # Create the directory to clone into\n os.makedirs(full_repo_dir)\n\n # Clone the repo\n self.git.clone(repo_url, full_repo_dir, branch, single_branch=True)\n else:\n # Need to update\n self._update_repo(full_repo_dir, branch)\n\n for existing_dir in [n for n in os.listdir(self.local_repo_directory)\n if os.path.isdir(os.path.join(self.local_repo_directory, n))]:\n if existing_dir not in [repo_url_to_name(r) for r in repo_urls]:\n # We need to remove old component repos because they may be out of date\n # and crash any further processing.\n logger.warning(f\"Removing old LabManager index repository {existing_dir}\")\n shutil.rmtree(os.path.join(self.local_repo_directory, existing_dir))\n return True\n else:\n return False\n\n def index_repository(self, repo_name: str) -> OrderedDict:\n \"\"\"Method to 'index' a base image directory in a single environment component repository\n\n Currently, the `index` is simply an ordered dictionary of all of the base image components in the repo\n The dictionary contains the contents of the YAML files for every version of the component and is structured:\n\n {\n \"\": {\n \"info\": { repo info stored in repo config.yaml }\n \"\": {\n \"\": { YAML contents }, ...\n }, ...\n }\n }\n\n Args:\n repo_name: The name of the repo cloned locally\n\n Returns:\n OrderedDict\n \"\"\"\n # Get full path to repo\n repo_dir = os.path.join(self.local_repo_directory, repo_name)\n\n # Get all base image YAML files\n # E.g., repo/*/*.yaml\n yaml_files = glob.glob(os.path.join(repo_dir, \"*\", \"*.yaml\"))\n\n data: OrderedDict[str, Any] = OrderedDict()\n data[repo_name] = OrderedDict()\n\n # Read YAML files and write data to dictionary\n for yf in yaml_files:\n with open(yf, 'rt', encoding='utf8') as yf_file:\n yaml_data = yaml.safe_load(yf_file)\n _, component_name, _ = yf.rsplit(os.path.sep, 2)\n\n # Save the COMPONENT repository to aid in accessing components via API\n # Will pack this info into the `component` field for use in mutations to access the component\n yaml_data[\"repository\"] = repo_name\n\n if component_name not in data[repo_name]:\n data[repo_name][component_name] = OrderedDict()\n\n revision = yaml_data['revision']\n data[repo_name][component_name][revision] = yaml_data\n\n return data\n\n @staticmethod\n def build_base_list_index(index_data: OrderedDict) -> List:\n \"\"\"Method to convert the structured index of all versions into a flat list with only the latest version\n\n Returns:\n list\n \"\"\"\n base_list = []\n repos = list(index_data.keys())\n for repo in repos:\n if repo == 'info':\n # ignore the repository info section\n continue\n\n bases = list(index_data[repo].keys())\n\n for base in bases:\n # Sort based on the revision\n revs = list(index_data[repo][base].items())\n revs = sorted(revs, reverse=True, key=operator.itemgetter(0))\n base_list.append(revs[0][1])\n\n return sorted(base_list, key=lambda n: n['id'])\n\n def index_repositories(self) -> None:\n \"\"\"Method to index repos using a naive approach\n\n Stores index data in a pickled dictionaries in /.labmanager/environment_repositories/.index/\n\n Returns:\n None\n \"\"\"\n # Get all local repos\n repo_urls = self.config.config[\"environment\"][\"repo_url\"]\n repo_names = [repo_url_to_name(x) for x in repo_urls]\n\n base_image_all_repo_data: OrderedDict = OrderedDict()\n for repo_name in repo_names:\n # Index Base Images\n base_image_all_repo_data.update(self.index_repository(repo_name))\n\n # Generate list index\n base_image_list_repo_data = self.build_base_list_index(base_image_all_repo_data)\n\n # Write files\n with open(os.path.join(self.local_repo_directory, \"base_index.pickle\"), 'wb') as fh:\n pickle.dump(base_image_all_repo_data, fh)\n with open(os.path.join(self.local_repo_directory, \"base_list_index.pickle\"), 'wb') as fh:\n pickle.dump(base_image_list_repo_data, fh)\n","sub_path":"packages/gtmcore/gtmcore/environment/repositorymanager.py","file_name":"repositorymanager.py","file_ext":"py","file_size_in_byte":8375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"270879928","text":"# -*- coding:utf-8 -*-\n\n# Stdlib imports\n\n# Core Django imports\nfrom django.db import models\nfrom django.utils.translation import ugettext as _\nfrom django.contrib.sites.models import Site\n\n# Third-party app imports\nfrom imagekit.models import ProcessedImageField\nfrom imagekit.processors import ResizeToFill\nfrom solo.models import SingletonModel\n\n\n# Imports from your apps\nfrom core.models import TimeStampedModel\n\n\nclass General(models.Model):\n \"\"\"\n Stores values for ``mezzanine.conf`` that can be edited via the admin.\n \"\"\"\n site = models.ForeignKey(\"sites.Site\", editable=False)\n name = models.CharField(max_length=50)\n value = models.CharField(max_length=100)\n\n class Meta:\n verbose_name = _(\"Setting\")\n verbose_name_plural = _(\"Settings\")\n\n def __str__(self):\n return \"%s: %s\" % (self.name, self.value)\n\n def save(self, update_site=False, *args, **kwargs):\n \"\"\"\n Set the site to the current site when the record is first\n created, or the ``update_site`` argument is explicitly set\n to ``True``.\n \"\"\"\n if update_site or not self.id:\n self.site_id = Site.objects.get(pk=1).id\n super(General, self).save(*args, **kwargs)\n\n\nclass Company(TimeStampedModel, SingletonModel):\n u\"\"\"\n Configurações da empresa\n \"\"\"\n\n logo = ProcessedImageField(\n verbose_name=_(u'Logomarca'),\n upload_to='company_logo',\n processors=[ResizeToFill(200, 50)],\n format='PNG',\n options={'quality': 90},\n help_text=_(\n u'Faça upload de imagem com o tamanho 200x50 em .png'\n )\n )\n u\"\"\"\n atributo para setar a logomarca dos sites\n utilizado no header e no footer do template\n \"\"\"\n\n name = models.CharField(\n verbose_name=_(u'Nome da empresa'),\n max_length=100,\n help_text=_(\n u'Exemplo: BonitoWay'\n )\n )\n u\"\"\"\n Atributo para setar o nome da empresa\n \"\"\"\n\n short_description_name = models.CharField(\n verbose_name=_(u'Breve descrição do nome da empresa'),\n max_length=100,\n help_text=_(\n u'Frase de complemento do logo\\n'\n u'Turismo e eventos'\n )\n )\n u\"\"\"\n Atributo para setar uma descrição ou frase impactante com nome da empresa\n \"\"\"\n\n url = models.URLField(\n verbose_name=_(u'Site da empresa'),\n max_length=100,\n help_text=_(\n u'Insira a url da empresa com http://www.domain.com'\n )\n )\n u\"\"\"\n Atributo para setar o site da empresa\n \"\"\"\n\n address = models.CharField(\n verbose_name=_(u'Endereço'),\n max_length=100,\n help_text=_(\n u'Insira o endereço da sua empresa'\n )\n )\n u\"\"\"\n Atributo para setar o logradouro da empresa\n \"\"\"\n\n number = models.CharField(\n verbose_name=_(u'Nº'),\n max_length=100,\n help_text=_(\n u'Insira o número do logradouro'\n )\n )\n u\"\"\"\n Atributo para setar o número do endereço\n \"\"\"\n\n neighborhood = models.CharField(\n verbose_name=_(u'Bairro'),\n max_length=50,\n help_text=_(\n u'Insira o bairro da empresa'\n )\n )\n u\"\"\"\n Atributo para setar o bairro do endereço\n \"\"\"\n\n city = models.CharField(\n verbose_name=_(u'Cidade'),\n max_length=50,\n help_text=_(\n u'Insira a cidade que localiza a empresa'\n )\n )\n u\"\"\"\n Atributo para setar a cidade da empresa\n \"\"\"\n\n state = models.CharField(\n verbose_name=_(u'Estado'),\n max_length=50,\n help_text=_(\n u'Insira o estado que localiza a empresa'\n )\n )\n u\"\"\"\n Atributo para setar o estado da empresa\n \"\"\"\n\n zip = models.CharField(\n verbose_name=_(u'CEP'),\n max_length=12,\n help_text=_(\n u'Insira o código postal do endereço'\n )\n )\n u\"\"\"\n Atributo para setar o cep da empresa\n \"\"\"\n\n latitude = models.CharField(\n verbose_name=_(u'Latitude'),\n max_length=50,\n blank=True,\n help_text=_(\n u'Insira a latitude usado no google maps'\n )\n )\n u\"\"\"\n Atributo para setar o latitude da empresa\n \"\"\"\n\n longitude = models.CharField(\n verbose_name=_(u'Longitude'),\n max_length=50,\n blank=True,\n help_text=_(\n u'Insira a longitude usado no google maps'\n )\n )\n u\"\"\"\n Atributo para setar a longitude da empresa\n \"\"\"\n\n email = models.EmailField(\n verbose_name=_(u'Email'),\n help_text=_(\n u\"Preencha esse campo para definir o email principal da empresa\"\n )\n )\n u\"\"\"\n Atributo para setar o email da empresa\n \"\"\"\n\n phones = models.CharField(\n verbose_name=_(u'Telefones'),\n max_length=255,\n help_text=_(\n u'Dica: Separe cada telefone por \";\" ex: 31 9999 8888,'\n u'31 8888 7777 ...'\n )\n )\n u\"\"\"\n Atributo para setar os telefones da empresa\n \"\"\"\n\n phone_hotline = models.CharField(\n verbose_name=_(u'Telefone de Emergência'),\n max_length=50,\n blank=True,\n help_text=_(\n u'Insira um telefone de plantão para atendimento'\n )\n )\n u\"\"\"\n Atributo para setar o telefone emergencial da empresa\n \"\"\"\n\n class Meta:\n verbose_name = _(u\"Empresa\")\n verbose_name_plural = _(u\"Empresa\")\n\n def __unicode__(self):\n return u\"%s\" % self.name\n\n def get_phones(self):\n \"\"\"\n Lista telefones cadastrados\n \"\"\"\n telefones = self.phone.split(',')\n telefones = [i for i in telefones]\n return telefones\n","sub_path":"configurations/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"634478087","text":"def generator(space):\n\ti = 0; html = \"\"\"\n\t\t\n\t\"\"\"\n\twhile i < len(space):\n\t\ti+=1\n\t\tline = '
' + space[i][0] + '
\\n' \n\t\thtml += line\n\tdoc = open(\"dinamic.html\", \"w\")\n\tdoc.write(html)\n","sub_path":"Scripts/aqwa/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"548447533","text":"# -*- coding: UTF-8 -*-\n\nimport json\nfrom sanic import Sanic\nfrom sanic import response\nimport logging\n\nlogger = logging.getLogger(__name__)\n\napp = Sanic()\n\n\n@app.route(\"/health\", methods=['GET'])\nasync def health(request):\n result = {'status': 'UP'}\n return response.json(result)\n\n@app.route('/', methods=['GET'])\nasync def get_name(request):\n logger.info(request)\n return response.json({\"name\": \"sanic_service\"})\n\n\n#定义\n@app.route('/responds/', methods=['POST'])\nasync def post_handler(request):\n request_json = request.body\n input_json = json.loads(request_json.decode('utf8'))\n logger.info(input_json)\n return response.text(\"连接服务成功\")\n\n \nif __name__ == \"__main__\":\n app.run(host=\"localhost\", port=8400)\n","sub_path":"springboot-python/python/sanic_server.py","file_name":"sanic_server.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"397796384","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport pandas as pd #pandas library for data\nimport matplotlib.pyplot as plt #matlpotlib library for visualization (but not used here (see LLD document))\nimport numpy as np # numpy library for manipulation \ndatatrain=pd.read_csv(r\"C:\\Users\\welcome\\Desktop\\Train.csv\") #datasets..\ndatatest=pd.read_csv(r\"C:\\Users\\welcome\\Desktop\\Test.csv\")\ndatatest_filled=pd.read_excel(r\"C:\\Users\\welcome\\Desktop\\dummy_test.xlsx\")\nop=datatest.iloc[:,[0,6,4,5,10,1,2,7]].values #op dataset used for evaluate results wrt given input\nx1=datatrain.iloc[:,[4,5,10]].values # training dataset independent variables\ny1=datatrain.iloc[:,-1].values # training dataset dependent variable\nx2=datatest.iloc[:,[4,5,10]].values # testing dataset \n\nfrom sklearn.preprocessing import LabelEncoder #data preprocessing\nb=LabelEncoder() #label encorder encode the strings into lables(num)\nx1[:,0]=b.fit_transform([i for i in x1[:,0]])\nx2[:,0]=b.transform([i for i in x2[:,0]])\nc=LabelEncoder()\nx1[:,-1]=c.fit_transform([i for i in x1[:,-1]])\nx2[:,-1]=c.fit_transform([i for i in x2[:,-1]])\n\n\nfrom sklearn.linear_model import LinearRegression #Algorithm part\nfrom sklearn.preprocessing import PolynomialFeatures\nre=PolynomialFeatures(degree=7) \nRepoly=LinearRegression().fit(re.fit_transform(x1),y1)\nwhile(True):\n print(\"ENTER Id's (Item_identifier & Outlet_identifier) here \",end=\" \")\n a=list(map(str,input().split(' '))) #input\n for i in op:\n if (i[0]==a[0] and i[1]==a[1]): # true if both id's matchs\n print(\"detailes:- \",i[2],\",\",\"Rs.\"+str(i[3]),\",\",i[4],\",\",str(i[5])+\"g/Kg.\",\",\",i[6],\",\",i[7])\n print(\"Predicted Sale Rs.\"+str(round(Repoly.predict(re.transform([[b.transform([i[2]])[0],i[3],c.transform([i[4]])[0]]]))[0])),\"\\n\\n\\n\") #predicted part\n break\n\n\n# datatest[\"Predicted_item_outlet_sale\"]=Repoly.predict(re.transform(x2)) # prediction of test dataset goes to PredictedTest.csv file\n# datatest.to_csv(\"PredictedTest.csv\",index=False) \n\n","sub_path":"finalproject.py","file_name":"finalproject.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"170880950","text":"#!/usr/bin/env python3 -tt\n\"\"\"\nFile: triangle-numbers.py\n\nThe nth term of the sequence of triangle numbers\n is given by 1 + 2 + ... + n = n(n+1) / 2. For\n example, the first ten triangle numbers are: \n\n1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...\n\nBy converting each letter in a word to a number \ncorresponding to its alphabetical position and\nadding these values we form a word value. For\nexample, the word value for SKY is:\n19 + 11 + 25 = 55 and 55 is a triangle number.\n If the word value is a triangle number then \nwe shall call the word a triangle word.\n\nUsing either /usr/share/dict/words or \nhttp://stanfordpython.com/words, a 2.5M text\nfile containing over 200 thousand English words,\nwhich are triangle words? As a sanity check, we\nfound 16303 distinct triangle words.\n\nHint: you can use ord(ch) to get the integer\nASCII value of a character\n\"\"\"\n\ntriangle_numbers = [x*(x+1)//2 for x in range(40)]\n\n# Note: assumes character is in [a-zA-Z]\ndef get_alphabetical_order(char):\n if 'a' <= char <= 'z':\n return ord(char) - ord('a') + 1\n return ord(char) - ord('A') + 1\n\ndef get_word_sum(word):\n return sum(get_alphabetical_order(char) for char in word)\n\nwith open('/usr/share/dict/words', 'r') as f:\n lines = f.readlines()\n\ndistinct_words = {word.lower() for word in lines}\n\n\nprint(len([word for word in distinct_words\n if (get_word_sum(word.strip()) in triangle_numbers)]))\n","sub_path":"cs41/lab2/triangle-numbers.py","file_name":"triangle-numbers.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"484037236","text":"\"\"\"Base class for ResourceSync capabilities with lists of resources including \nsupport for both sitemaps and sitemapindexes.\n\nExtends ListBase to add support for sitemapindexes.\n\"\"\"\n\nimport collections\nimport os\nfrom datetime import datetime\nimport re\nimport sys\nfrom urllib import URLopener\n\nfrom list_base import ListBase\nfrom resource import Resource\nfrom sitemap import Sitemap\nfrom mapper import Mapper, MapperError\nfrom url_authority import UrlAuthority\nfrom utils import compute_md5_for_file\n\nclass ListBaseIndexError(Exception):\n \"\"\"Exception for problems with sitemapindexes\"\"\"\n pass\n\nclass ListBaseWithIndex(ListBase):\n\n def __init__(self, resources=None, md=None, ln=None, allow_multifile=None, mapper=None):\n super(ListBaseWithIndex, self).__init__(resources=resources, md=md, ln=ln)\n # specific to lists with indexes\n self.resources_class=dict\n self.max_sitemap_entries=50000\n self.mapper = mapper\n self.allow_multifile = (True if (allow_multifile is None) else allow_multifile)\n self.check_url_authority = False\n self.content_length = 0\n self.num_files = 0 # Number of files read\n self.bytes_read = 0 # Aggregate of content_length values\n\n ##### General sitemap methods that also handle sitemapindexes #####\n\n def write(self, basename='/tmp/sitemap.xml', **kwargs):\n \"\"\"Write one or a set of sitemap files to disk\n\n resources is a ResourceContainer that may be an ResourceList or\n a ChangeList. This may be a generator so data is read as needed\n and length is determined at the end.\n\n basename is used as the name of the single sitemap file or the \n sitemapindex for a set of sitemap files.\n\n Uses self.max_sitemap_entries to determine whether the resource_list can \n be written as one sitemap. If there are more entries and \n self.allow_multifile is set true then a set of sitemap files, \n with an sitemapindex, will be written.\n \"\"\"\n # Access resources through iterator only\n resources_iter = iter(self.resources)\n ( chunk, next ) = self.get_resources_chunk(resources_iter)\n s = Sitemap(**kwargs)\n if (next is not None):\n # Have more than self.max_sitemap_entries => sitemapindex\n if (not self.allow_multifile):\n raise ListBaseIndexError(\"Too many entries for a single sitemap but multifile disabled\")\n # Work out how to name the sitemaps, attempt to add %05d before \".xml$\", else append\n sitemap_prefix = basename\n sitemap_suffix = '.xml'\n if (basename[-4:] == '.xml'):\n sitemap_prefix = basename[:-4]\n # Use iterator over all resources and count off sets of\n # max_sitemap_entries to go into each sitemap, store the\n # names of the sitemaps as we go\n sitemaps=ListBase()\n while (len(chunk)>0):\n file = sitemap_prefix + ( \"%05d\" % (len(sitemaps)) ) + sitemap_suffix\n self.logger.info(\"Writing sitemap %s...\" % (file))\n f = open(file, 'w')\n s.resources_as_xml(chunk, fh=f)\n f.close()\n # Record information about this sitemap for index\n r = Resource( uri = self.mapper.dst_to_src(file),\n path = file,\n timestamp = os.stat(file).st_mtime,\n md5 = compute_md5_for_file(file) )\n sitemaps.add(r)\n # Get next chunk\n ( chunk, next ) = self.get_resources_chunk(resources_iter,next)\n self.logger.info(\"Wrote %d sitemaps\" % (len(sitemaps)))\n f = open(basename, 'w')\n self.logger.info(\"Writing sitemapindex %s...\" % (basename))\n s.resources_as_xml(resources=sitemaps,sitemapindex=True,fh=f)\n f.close()\n self.logger.info(\"Wrote sitemapindex %s\" % (basename))\n else:\n f = open(basename, 'w')\n self.logger.info(\"Writing sitemap %s...\" % (basename))\n s.resources_as_xml(chunk, fh=f)\n f.close()\n self.logger.info(\"Wrote sitemap %s\" % (basename))\n\n\n def read(self, uri=None, resources=None, capability=None, index_only=False):\n \"\"\"Read sitemap from a URI including handling sitemapindexes\n\n If index_only is True then individual sitemaps references in a sitemapindex\n will not be read. This will result in no resources being returned and is\n useful only to read the metadata and links listed in the sitemapindex.\n\n Includes the subtlety that if the input URI is a local file and is a \n sitemapindex which contains URIs for the individual sitemaps, then these\n are mapped to the filesystem also.\n \"\"\"\n try:\n fh = URLopener().open(uri)\n self.num_files += 1\n except IOError as e:\n raise IOError(\"Failed to load sitemap/sitemapindex from %s (%s)\" % (uri,str(e)))\n # Get the Content-Length if we can (works fine for local files)\n try:\n self.content_length = int(fh.info()['Content-Length'])\n self.bytes_read += self.content_length\n self.logger.debug( \"Read %d bytes from %s\" % (self.content_length,uri) )\n except KeyError:\n # If we don't get a length then c'est la vie\n self.logger.debug( \"Read ????? bytes from %s\" % (uri) )\n pass\n self.logger.info( \"Read sitemap/sitemapindex from %s\" % (uri) )\n s = Sitemap()\n s.parse_xml(fh=fh,resources=self,capability='resourcelist')\n # what did we read? sitemap or sitemapindex?\n if (s.parsed_index):\n # sitemapindex\n if (not self.allow_multifile):\n raise ListBaseIndexError(\"Got sitemapindex from %s but support for sitemapindex disabled\" % (uri))\n self.logger.info( \"Parsed as sitemapindex, %d sitemaps\" % (len(self.resources)) )\n sitemapindex_is_file = self.is_file_uri(uri)\n if (index_only):\n # don't read the component sitemaps\n self.sitemapindex = True\n return\n # now loop over all entries to read each sitemap and add to resources\n sitemaps = self.resources\n self.resources = self.resources_class()\n self.logger.info( \"Now reading %d sitemaps\" % len(sitemaps.uris()) )\n for sitemap_uri in sorted(sitemaps.uris()):\n self.read_component_sitemap(uri,sitemap_uri,s,sitemapindex_is_file)\n else:\n # sitemap\n self.logger.info( \"Parsed as sitemap, %d resources\" % (len(self.resources)) )\n\n\n def read_component_sitemap(self, sitemapindex_uri, sitemap_uri, sitemap, sitemapindex_is_file):\n \"\"\"Read a component sitemap of a Resource List with index\n \"\"\"\n if (sitemapindex_is_file):\n if (not self.is_file_uri(sitemap_uri)):\n # Attempt to map URI to local file\n remote_uri = sitemap_uri\n sitemap_uri = self.mapper.src_to_dst(remote_uri)\n self.logger.info(\"Mapped %s to local file %s\" % (remote_uri, sitemap_uri))\n else:\n # The individual sitemaps should be at a URL (scheme/server/path)\n # that the sitemapindex URL can speak authoritatively about\n if (self.check_url_authority and\n not UrlAuthority(sitemapindex_uri).has_authority_over(sitemap_uri)):\n raise ListBaseIndexError(\"The sitemapindex (%s) refers to sitemap at a location it does not have authority over (%s)\" % (sitemapindex_uri,sitemap_uri))\n try:\n fh = URLopener().open(sitemap_uri)\n self.num_files += 1\n except IOError as e:\n raise ListBaseIndexError(\"Failed to load sitemap from %s listed in sitemap index %s (%s)\" % (sitemap_uri,sitemapindex_uri,str(e)))\n # Get the Content-Length if we can (works fine for local files)\n try:\n self.content_length = int(fh.info()['Content-Length'])\n self.bytes_read += self.content_length\n except KeyError:\n # If we don't get a length then c'est la vie\n pass\n self.logger.info( \"Reading sitemap from %s (%d bytes)\" % (sitemap_uri,self.content_length) )\n sitemap.parse_xml( fh=fh, resources=self.resources, sitemapindex=False )\n\n\n def index_as_xml(self,**kwargs):\n \"\"\"Return XML serialization of this list taken to be sitemapindex entries\n\n \"\"\"\n self.default_capability_and_modified()\n s = Sitemap(**kwargs)\n return s.resources_as_xml(self,sitemapindex=True)\n\n\n ##### Utility ##### \n\n def get_resources_chunk(self, resource_iter, first=None):\n \"\"\"Return next chunk of resources from resource_iter, and next item\n \n If first parameter is specified then this will be prepended to\n the list.\n\n The chunk will contain self.max_sitemap_entries if the iterator \n returns that many. next will have the value of the next value from\n the iterator, providing indication of whether more is available. \n Use this as first when asking for the following chunk.\n \"\"\"\n chunk = ListBase()\n chunk.capability_name = self.capability_name\n chunk.capability_md = self.capability_md\n chunk.default_capability_and_modified()\n if (first is not None):\n chunk.add(first)\n for r in resource_iter:\n chunk.add(r)\n if (len(chunk)>=self.max_sitemap_entries):\n break\n # Get next to see whether there are more resources\n try:\n next = resource_iter.next()\n except StopIteration:\n next = None\n return(chunk,next)\n \n def is_file_uri(self, uri):\n \"\"\"Return true if uri looks like a local file URI, false otherwise\n \n Test is to see whether have either an explicit file: URI or whether\n there is no scheme name.\n \"\"\"\n return(re.match('file:',uri) or not re.match('\\w{3,4}:',uri))\n","sub_path":"resync/list_base_with_index.py","file_name":"list_base_with_index.py","file_ext":"py","file_size_in_byte":10338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"565296012","text":"#!/usr/bin/env python3\n\n##!/home/[username]/miniconda3/bin/python\n\nimport numpy as np\nimport json\nimport os\nfrom joblib import Parallel, delayed\nimport sys\nfrom process import tweet_text_cleaner, time_split, get_users\nimport time\n\n# label\nTYPE = sys.argv[1]\nLABEL = sys.argv[2]\n\n# number of threads\nn_threads = int(sys.argv[3])\n\n# directories\ndir_0 = TYPE+'/'+LABEL+'/'\ndir_1 = TYPE+'-processed/'\ntry:\n os.mkdir(dir_1)\nexcept:\n pass\n\nimport time\n\ndef organizer(json_object,main_storage,user_storage):\n try:\n # load json object\n json_file_i = json.loads(json_object)\n\n # clean texts\n txts, hts, ums = tweet_text_cleaner(json_file_i['text'])\n for ums_i in ums:\n ums_i = ums_i.replace('@','')\n try:\n test = user_storage[ums_i]\n except:\n user_storage[ums_i] = {}\n if hts != []:\n hts = ','.join(hts)\n else:\n hts = '*'\n if ums != []:\n ums = ','.join(ums)\n else:\n ums = '*'\n\n # split time\n created_at, Z = time_split(json_file_i['datetime'],by='second')\n\n # check urls\n try:\n medias = json_file_i['medias']\n mds_list = ','.join(medias)\n except:\n mds_list = '*'\n\n # numerical data\n nfv = int(json_file_i['nbr_favorite'])\n nrt = int(json_file_i['nbr_retweet'])\n nrp = int(json_file_i['nbr_reply'])\n\n # store user screen names\n usn = json_file_i['usernameTweet']\n usn = usn.replace('@','')\n try:\n test = user_storage[usn]\n except:\n user_storage[usn] = {}\n\n # check if it exists and update the latest\n try:\n test = main_storage[int(json_file_i['ID'])]\n nfv_prev = test['favorite_count']\n nrt_prev = test['retweet_count']\n nrp_prev = text['reply_count']\n\n if nfv_prev != nfv:\n main_storage[int(json_file_i['ID'])]['favorite_count'] = nfv\n if nrt_prev != nrt:\n main_storage[int(json_file_i['ID'])]['retweet_count'] = nrt\n if nrp_prev != nrp:\n main_storage[int(json_file_i['ID'])]['reply_count'] = nrp\n except:\n # populate user\n user_storage[usn]['user_id'] = json_file_i['user_id']\n\n # populate tweet\n main_storage[int(json_file_i['ID'])] = {}\n main_storage[int(json_file_i['ID'])]['user_screen_name'] = json_file_i['usernameTweet']\n main_storage[int(json_file_i['ID'])]['user_id'] = json_file_i['user_id']\n main_storage[int(json_file_i['ID'])]['text'] = txts\n main_storage[int(json_file_i['ID'])]['hashtags'] = hts\n main_storage[int(json_file_i['ID'])]['usermentions'] = ums\n main_storage[int(json_file_i['ID'])]['favorite_count'] = nfv\n main_storage[int(json_file_i['ID'])]['retweet_count'] = nrt\n main_storage[int(json_file_i['ID'])]['reply_count'] = nrp\n main_storage[int(json_file_i['ID'])]['year'] = created_at[0]\n main_storage[int(json_file_i['ID'])]['month'] = created_at[1]\n main_storage[int(json_file_i['ID'])]['day'] = created_at[2]\n main_storage[int(json_file_i['ID'])]['hour'] = created_at[3]\n main_storage[int(json_file_i['ID'])]['minute'] = created_at[4]\n main_storage[int(json_file_i['ID'])]['second'] = created_at[5]\n main_storage[int(json_file_i['ID'])]['is_reply'] = json_file_i['is_reply']\n main_storage[int(json_file_i['ID'])]['is_retweet'] = json_file_i['is_retweet']\n main_storage[int(json_file_i['ID'])]['urls'] = mds_list\n except:\n pass\n\ndef user_assign_patch(k,user,user_storage):\n try:\n patch = '['+LABEL[0].upper()+'-USN'+str(k)+']'\n user_storage[user]['patch'] = patch\n except:\n pass\n\ndef user_patcher(k,tweet_storage,user_storage):\n try:\n # patch user screen name\n usn = tweet_storage[k]['user_screen_name']\n usn_patch = user_storage[usn]['patch']\n tweet_storage[k]['user_screen_name'] = usn_patch\n\n # patch user mentions\n ums = tweet_storage[k]['usermentions']\n if ums != '*':\n ums = ums.split(',')\n ums_patched = []\n for ums_i in ums:\n ums_i = ums_i.replace('@','')\n ums_i_patch = user_storage[ums_i]['patch']\n ums_patched.append(ums_i_patch)\n tweet_storage[k]['usermentions'] = ','.join(ums_patched)\n\n # patch users on texts\n text = tweet_storage[k]['text']\n usns = get_users(text)\n text_patched = text\n if usns != []:\n for usns_i in usns:\n usns_i_noat = usns_i.replace('@','')\n usns_i_patch = user_storage[usns_i_noat]['patch']\n text_patched = text_patched.replace(usns_i,usns_i_patch)\n tweet_storage[k]['text'] = text_patched\n except:\n pass\n\ndef user_patch_switcher(k,user_storage,user_storage_key):\n try:\n patch_k = user_storage[k]['patch']\n user_storage_key[patch_k] = k\n user_storage_key[k] = patch_k\n except:\n pass\n\nprint('organizing '+TYPE+'-'+LABEL+'...')\nstart = time.time()\ntweets_storage = {}\nusers_storage_info = {}\nwith open(dir_0+LABEL+'.json','r',encoding='utf-8') as in_file:\n Parallel(n_jobs=n_threads,backend='threading')(delayed(organizer)(i,tweets_storage,users_storage_info) for i in in_file)\n\nusers = users_storage_info.keys()\nParallel(n_jobs=n_threads,backend='threading')(delayed(user_assign_patch)(i,j,users_storage_info) for i,j in enumerate(users))\n\ntweets = tweets_storage.keys()\nParallel(n_jobs=n_threads,backend='threading')(delayed(user_patcher)(i,tweets_storage,users_storage_info) for i in tweets)\n\nusers_storage_key= {}\nParallel(n_jobs=n_threads,backend='threading')(delayed(user_patch_switcher)(i,users_storage_info,users_storage_key) for i in users)\nusers_storage = {'key':users_storage_key,'information':users_storage_info}\n\nnp.save(dir_1+TYPE+'-'+LABEL+'-tweets.npy',tweets_storage)\nnp.save(dir_1+TYPE+'-'+LABEL+'-users.npy',users_storage)\nend = time.time()\nprint('time elapsed: '+str(end-start)+' seconds')\n","sub_path":"organize-tweets.py","file_name":"organize-tweets.py","file_ext":"py","file_size_in_byte":6295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"449764836","text":"#!/usr/bin/env python\n\"\"\"Simple functions of combinatorics.\"\"\"\n\n# Logging facility\nfrom logging import getLogger\n\n__author__ = \"Bin Gao\"\n__copyright__ = \"Copyright 2007, 2008, 2009, 2010\"\n__credits__ = [\"Radovan Bast\", \"Jun Jiang\", \"Kai Liu\", \"Yi Luo\",\n \"Kenneth Ruud\", \"Andreas J. Thorvaldsen\"]\n__license__ = \"LGPLv3\"\n__version__ = \"0.3.0\"\n__maintainer__ = \"Bin Gao\"\n__email__ = \"bin.gao@uit.no\"\n__status__ = \"Development\"\n\n# Set up a specific logger with our desired output level\nlogger = getLogger(\"Combinatorics\")\n\ndef factorial(x):\n \"\"\"Simple factorial, for the version of Python < 2.6\"\"\"\n if x >= 1:\n return x*factorial(x-1)\n return 1\n\ndef double_factorial(x):\n \"\"\"Simple double factorial.\"\"\"\n if x >= 1:\n return x*double_factorial(x-2)\n return 1\n","sub_path":"BioNanoLEGO/Combinatorics.py","file_name":"Combinatorics.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"566255092","text":"def get_dimension(self, folder_path):\r\n \r\n dir_list = os.listdir(self.folder_path)\r\n for scan_dir in dir_list:\r\n file_path = self.folder_path + '\\\\' + scan_dir\r\n if os.path.isdir(file_path):\r\n method_path = self.find_fid(file_path)\r\n\r\n with open(method_path, mode='rb') as file: \r\n method_r = file.read()\r\n f=method_r.find(b'PVM_Matrix=( 2 )\\n')\r\n dimension_locked=method_r[f+17:f+24]\r\n arr=np.zeros(2)\r\n arr[0]=(str(dimension_locked)[2:5])\r\n arr[0]=int(arr[0])\r\n arr[1]=(str(dimension_locked)[6:9])\r\n arr[1]=int(arr[1])\r\n return arr","sub_path":"get_dimension.py","file_name":"get_dimension.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"443114978","text":"import komand\nfrom komand.exceptions import PluginException\nfrom .schema import ApproveFileLocallyInput, ApproveFileLocallyOutput\n\n# Custom imports below\nimport json\nimport requests\n\n\nclass ApproveFileLocally(komand.Action):\n def __init__(self):\n super(self.__class__, self).__init__(\n name=\"approve_file_locally\",\n description=\"Approve file locally\",\n input=ApproveFileLocallyInput(),\n output=ApproveFileLocallyOutput(),\n )\n\n def run(self, params={}):\n file_id = params.get(\"file_id\")\n url = self.connection.host + \"/api/bit9platform/v1/fileInstance/%d\" % file_id\n\n self.logger.info(\"Getting file instance info...\")\n file_info_request = self.connection.session.get(url, verify=self.connection.verify)\n\n try:\n file_info_request.raise_for_status()\n except requests.exceptions.RequestException as e:\n self.logger.info(f\"Call to Carbon Black raised exception: {e}\")\n raise PluginException(\n cause=\"Call to Carbon Black failed!\",\n assistance=\"The connection may not be configured properly or an invalid\" \" file ID was found.\",\n )\n\n file_instance_object = file_info_request.json()\n file_instance_object[\"localState\"] = 2\n\n self.logger.info(\"Approving local file...\")\n\n r = self.connection.session.put(url, json.dumps(file_instance_object), verify=self.connection.verify)\n\n try:\n r.raise_for_status()\n except requests.exceptions.RequestException as e:\n self.logger.info(f\"Request content: {r.text}\")\n self.logger.info(f\"Call to Carbon Black raised exception: {e}\")\n raise PluginException(\n cause=\"Call to Carbon Black failed\",\n assistance=\"The connection may not be configured properly, please\" \"check your connection settings.\",\n )\n\n result = komand.helper.clean(r.json())\n\n return {\"file_instance\": result}\n\n def test(self):\n url = (\n self.connection.host + \"/api/bit9platform/v1/approvalRequest?limit=-1\"\n ) # -1 returns just the count (lightweight call)\n\n request = self.connection.session.get(url=url, verify=self.connection.verify)\n\n try:\n request.raise_for_status()\n except:\n raise Exception(\"Run: HTTPError: %s\" % request.text)\n\n return {}\n","sub_path":"plugins/carbon_black_protection/komand_carbon_black_protection/actions/approve_file_locally/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"8199400","text":"from django.shortcuts import render, redirect\nfrom anuncios.models import Anuncios, Categoria, Estado\nfrom clientes.views import get_cliente_logado\nfrom anuncios.forms import CadastrarAnuncioForm, AtualizarAnuncioForm, BuscarAnuncioForm\nfrom django.views.generic.base import View\nfrom clientes.models import Cliente\nfrom django.core.exceptions import PermissionDenied\nfrom clientes.views import anuncios_vendedor\nfrom django.contrib.auth.decorators import login_required, permission_required\n\n# Create your views here.\n\ndef anuncios(request):\n\tif not request.user.is_authenticated():\n\t\treturn render(request, 'anuncios.html', {\"anuncios\": Anuncios.objects.all()})\n\telse:\n\t\treturn render(request, 'anuncios.html', {'cliente_logado': get_cliente_logado(request),\"anuncios\": Anuncios.objects.all()})\n\ndef categorias(request):\n\tif not request.user.is_authenticated():\n\t\treturn render(request, 'categorias.html', {'categoria': Categoria.objects.all()} )\n\telse:\n\t\treturn render(request, 'categorias.html', {'categoria': Categoria.objects.all(), 'cliente_logado': get_cliente_logado(request)} )\n\t\t\n\ndef estados(request):\n\tif not request.user.is_authenticated():\n\t\treturn render(request, 'estados.html', {'uf': Estado.objects.all()} )\n\n\telse:\n\t\treturn render(request, 'estados.html', {'uf': Estado.objects.all(), 'cliente_logado': get_cliente_logado(request)} )\n\ndef exibir(request, anuncios_id):\n\tanuncios = Anuncios.objects.get(id=anuncios_id)\n\t#vend = Cliente.objects.get(nome=Anuncios.cliente_id)\n\tif not request.user.is_authenticated():\n\t\treturn render(request, 'exibir-anuncios.html', {'anuncios' : anuncios})\n\n\telse:\n\t\treturn render(request, 'exibir-anuncios.html', {'anuncios' : anuncios, \n\t\t\t'cliente_logado': get_cliente_logado(request)})\n\ndef anuncio_estado(request, estado_id):\n\testado_ida = Estado.objects.get(id=estado_id)\n\ta_estado = Anuncios.objects.all()\n\tif not request.user.is_authenticated():\n\t\treturn render(request, 'anuncios-estado.html', {'es': estado_ida,'anu' : a_estado})\n\n\telse:\n\t\treturn render(request, 'anuncios-estado.html', {'es': estado_ida,'anu' : a_estado,\n\t\t\t 'cliente_logado': get_cliente_logado(request)})\n\ndef anuncio_categoria(request, categoria_id):\n\tcategoria_ida = Categoria.objects.get(id=categoria_id)\n\ta_categoria = Anuncios.objects.all()\n\tif not request.user.is_authenticated():\n\t\treturn render(request, 'anuncios-categoria.html', {'ca': categoria_ida ,'anu' : a_categoria})\n\telse:\n\t\treturn render(request, 'anuncios-categoria.html', {'ca': categoria_ida ,'anu' : a_categoria, 'cliente_logado': get_cliente_logado(request)})\n\t\n\t\t\n\n@login_required\ndef excluir(request, anuncios_id):\n\tanunc = Anuncios.objects.get(id=anuncios_id)\n\tcliente_log = get_cliente_logado(request)\n\tclie = Cliente.objects.get(id=anunc.cliente_id.id)\t\n\n\t\n\tif clie == cliente_log:\n\t\tanunc.delete()\n\t\treturn render(request, 'sucesso.html', {'cliente_logado' : get_cliente_logado(request)})\n\telse:\n\t\traise PermissionDenied\nclass CadastrarAnuncioView(View):\n\n\ttemplate_name = 'cadastrar-anuncio.html'\n\t\n\tdef get(self, request):\n\t\tif not request.user.is_authenticated():\n\t\t\treturn redirect('login')\n\t\telse:\n\t\t\treturn render(request, self.template_name, {'est': Estado.objects.all() ,\n\t\t\t\t'cat': Categoria.objects.all() ,'cliente_logado' : get_cliente_logado(request)})\n\n\tdef post(self, request):\n\t\tform = CadastrarAnuncioForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tdados_form = form.data\n\t\t\tcliente = get_cliente_logado(request)\t\n\t\t\tcategoria_id = Categoria.objects.get(tipo=dados_form['categoria_id'])\n\t\t\testado_id = Estado.objects.get(nome=dados_form['estado_id'])\t\n\t\t\tanunc = Anuncios(titulo=dados_form['titulo'],\n\t\t\t\t\t\t\tdescricao= dados_form['descricao'], #\n\t\t\t\t\t\t\tpreco= dados_form['preco'],#\n\t\t\t\t\t\t\tcategoria_id= categoria_id,\n\t\t\t\t\t\t\testado_id=estado_id,\n\t\t\t\t\t\t\tcliente_id = cliente)\n\t\t\tanunc.save()\n\t\t\treturn render(request, 'sucesso.html', {'cliente_logado': get_cliente_logado(request)})\n\n\t\treturn render(request, self.template_name, {'formulario' : form ,'est': Estado.objects.all() ,\n\t\t\t'cat': Categoria.objects.all() ,'cliente_logado' : get_cliente_logado(request) }) \n\nclass AtualizarAnuncioView(View):\n\n\ttemplate_name = 'atualizar-anuncios.html'\n\t\n\n\tdef get(self, request, anuncios_id):\n\t\tanunciante = Anuncios.objects.get(id=anuncios_id)\n\t\tif not request.user.is_authenticated():\n\t\t\treturn redirect('login')\n\t\telse:\n\t\t\tif anunciante.cliente_id == get_cliente_logado(request):\n\t\t\t\treturn render(request, self.template_name, { 'est': Estado.objects.all() ,\n\t\t\t\t'cat': Categoria.objects.all() ,'anuncio': Anuncios.objects.get(id=anuncios_id) ,\n\t\t\t\t\t\t'cliente_logado' : get_cliente_logado(request)})\n\t\t\telse:\n\t\t\t\traise PermissionDenied\n\t\t\n\t\t\t\n\t\t\n\t\t\n\tdef post(self, request, anuncios_id):\n\t\tform = AtualizarAnuncioForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tdados_form = form.data\n\t\t\t\n\t\t\tan = Anuncios.objects.get(id=anuncios_id)\n\t\t\tan.titulo = dados_form['titulo']\n\t\t\tan.descricao = dados_form['descricao']\n\t\t\tan.preco = dados_form['preco']\n\t\t\tan.categoria_id = Categoria.objects.get(tipo=dados_form['categoria_id'])\n\t\t\tan.estado_id = Estado.objects.get(nome=dados_form['estado_id'])\n\t\t\tan.cliente_id = get_cliente_logado(request)\n\t\t\t#cliente.usuario = cliente.usuario\n\t\t\tan.save()\n\t\t\treturn render(request, 'sucesso.html', {'cliente_logado': get_cliente_logado(request)})\n\n\t\treturn render(request, self.template_name, {'anuncio': Anuncios.objects.get(id=anuncios_id), 'formulario' : form , 'cliente_logado' : get_cliente_logado(request) }) \n\nclass BuscarAnuncioView(View):\n\n\ttemplate_name = 'buscar-anuncios.html'\n\t\n\t\n\n\tdef post(self, request):\n\t\tform = BuscarAnuncioForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tdados_form = form.data\n\t\t\tan = Anuncios.objects.filter(titulo__icontains= dados_form['busca'])\n\t\t\t\n\t\t\tif not request.user.is_authenticated():\n\t\t\t\treturn render(request, 'buscar-anuncios.html', {'resultado': an})\n\t\t\telse:\n\t\t\t\treturn render(request, 'buscar-anuncios.html', {'resultado': an, \n\t\t\t\t\t'cliente_logado': get_cliente_logado(request)})\n\n\t\t#return render(request, self.template_name, {'formulario' : form , 'cliente_logado' : get_cliente_logado(request) }) ","sub_path":"anuncios/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"318271215","text":"import copy\nfrom api.domain.scene import Scene\n\nRESOURCE_DEF = {\n 'login': {\n \"errorCode\": None,\n \"error\": \"\",\n \"data\": \"2fd976601eef1ebd632b545a8fef11a3\",\n \"api_version\": \"1.4.1\"\n },\n 'logout': {\n \"errorCode\": None,\n \"error\": \"\",\n \"data\": True,\n \"api_version\": \"1.4.1\"\n },\n 'idLookup': {\n \"errorCode\": None,\n \"error\": \"\",\n \"data\": {\n \"LC08_L1TP_156063_20170207_20170216_01_T1\": \"LC81560632017038LGN00\",\n \"LE07_L1TP_028028_20130510_20160908_01_T1\": \"LE70280282013130EDC00\",\n \"LT05_L1TP_032028_20120425_20160830_01_T1\": \"LT50320282012116EDC00\",\n \"INVALID_ID\": None\n }\n },\n 'downloadoptions': {\n \"errorCode\": None,\n \"error\": \"\",\n \"data\": [{'entityId': 'LC81560632017038LGN00', 'downloadOptions': [{'downloadCode': \"STANDARD\", 'available': True}]},\n {'entityId': 'LE70280282013130EDC00', 'downloadOptions': [{'downloadCode': \"STANDARD\", 'available': True}]},\n {'entityId': 'LT50320282012116EDC00', 'downloadOptions': [{'downloadCode': \"STANDARD\", 'available': True}]}]\n },\n 'download': {\n \"errorCode\": None,\n \"error\": \"\",\n \"data\": [\n {\"entityId\": \"LC81560632017038LGN00\",\n \"product\": \"STANDARD\",\n \"url\": \"http://invalid.com/path/to/downloads/l1/2014/013/029/LC81560632017038LGN00.tar.gz?iid=LC81560632017038LGN00&did=63173803&ver=\"},\n {\"entityId\": \"LE70280282013130EDC00\",\n \"product\": \"STANDARD\",\n \"url\": \"http://invalid.com/path/to/downloads/l1/2014/013/029/LE70280282013130EDC00.tar.gz?iid=LE70280282013130EDC00&did=63173803&ver=\"},\n {\"entityId\": \"LT50320282012116EDC00\",\n \"product\": \"STANDARD\",\n \"url\": \"http://invalid.com/path/to/downloads/l1/2014/013/029/LT50320282012116EDC00.tar.gz?iid=LT50320282012116EDC00&did=63173803&ver=\"}\n ],\n \"api_version\": \"1.4.1\"\n },\n 'userContext': {\n \"errorCode\": None,\n \"error\": \"\",\n \"data\": True\n },\n 'clearUserContext': {\n \"errorCode\": None,\n \"error\": \"\",\n \"data\": True\n }\n}\n\n\nclass RequestsSpoof(object):\n def __init__(self, *args, **kwargs):\n self.url = args[0]\n self.resource = self.url.split('/')[-1]\n\n self.ok = True\n self.data = RESOURCE_DEF.get(self.resource)\n self.content = str(self.data)\n\n def __repr__(self):\n message = ('REQUEST SPOOF'\n '\\n\\tURL: {}'\n '\\n\\tRESOURCE: {}'\n '\\n\\tDATA:{}').format(self.url, self.resource, self.data)\n return message\n\n def json(self):\n return self.data\n\n def raise_for_status(self):\n pass\n\n\nclass BadRequestSpoofError(RequestsSpoof):\n def __init__(self, *args, **kwargs):\n super(BadRequestSpoofError, self).__init__(*args, **kwargs)\n self.data = copy.deepcopy(self.data)\n\n if 'data' in self.data:\n self.data['data'] = None\n if 'errorCode' in self.data:\n self.data['errorCode'] = 'UNKNOWN'\n if 'error' in self.data:\n self.data['error'] = 'A fake server error occurred'\n\n\nclass BadRequestSpoofNegative(RequestsSpoof):\n def __init__(self, *args, **kwargs):\n super(BadRequestSpoofNegative, self).__init__(*args, **kwargs)\n self.data = copy.deepcopy(self.data)\n\n if 'data' in self.data and isinstance(self.data.get('data'), bool):\n self.data['data'] = not(self.data.get('data'))\n\n\nclass CachedRequestPreventionSpoof(object):\n def __init__(self, *args, **kwargs):\n raise RuntimeError('Should only require Cached values!')\n\n\n# ----------------------------------------+\n# Validation API testing |\ndef get_cache_values(self, product_list):\n response = {i: 'LC08_L1TP_025027_20160521_20170223_01_T1' for i in product_list}\n return response\n\n\n# ----------------------------------------+\n# Production API testing |\ndef get_download_urls(token, contactid, product_list, usage):\n response = {'LC08_L1TP_025027_20160521_20170223_01_T1': 'http://one_time_use.tar.gz' for i in product_list}\n return response\n\ndef get_cached_convert(token, product_list):\n response = {i: 'LC08_L1TP_025027_20160521_20170223_01_T1' for i in product_list}\n return response\n\ndef get_cached_session():\n return '2fd976601eef1ebd632b545a8fef11a3'\n\ndef check_valid_landsat(token, prod_name_list):\n _scenes = Scene.where({\"status\":\"submitted\", \"sensor_type\":\"landsat\"})\n _names = [s.name for s in _scenes]\n return {_names[0]: True}\n\ndef check_valid_modis(token, prod_name_list):\n _scenes = Scene.where({\"status\":\"submitted\", \"sensor_type\":\"modis\"})\n _names = [s.name for s in _scenes]\n return {_names[0]: True}\n\ndef check_valid_modis_unavailable(token, prod_name_list):\n _scenes = Scene.where({\"status\":\"submitted\", \"sensor_type\":\"modis\"})\n _names = [s.name for s in _scenes]\n return {_names[0]: False}\n\n\ndef check_valid_viirs(token, prod_name_list):\n _scenes = Scene.where({\"status\":\"submitted\", \"sensor_type\":\"viirs\"})\n _names = [s.name for s in _scenes]\n return {_names[0]: True}\n\ndef check_valid_sentinel(token, prod_name_list):\n _scenes = Scene.where({\"status\": \"submitted\", \"sensor_type\":\"sentinel\"})\n _names = [s.name for s in _scenes]\n return {_names[0]: True}\n\ndef check_valid_modis_false(token, prod_name_list):\n _scenes = Scene.where({\"status\":\"submitted\", \"sensor_type\":\"modis\"})\n _names = [s.name for s in _scenes]\n return {_names[0]: False}\n\ndef check_valid_viirs_false(token, prod_name_list):\n _scenes = Scene.where({\"status\":\"submitted\", \"sensor_type\":\"viirs\"})\n _names = [s.name for s in _scenes]\n return {_names[0]: False}\n\ndef check_valid_sentinel_false(token, prod_name_list):\n _scenes = Scene.where({\"status\": \"submitted\", \"sensor_type\": \"sentinel\"})\n _names = [s.name for s in _scenes]\n return {_names[0]: False}\n\ndef get_user_name(token, contactid, ipaddr):\n return 'klmsith@usgs.gov'\n\ndef get_order_status(token, tramid):\n response = None\n if tramid == sample_tram_order_ids()[0]:\n response = {'units': [{'orderingId':sample_scene_names()[0], 'statusCode': 'R'}]}\n elif tramid == sample_tram_order_ids()[1]:\n response = {'units': [{'orderingId':sample_scene_names()[1], 'statusCode': 'C'}]}\n elif tramid == sample_tram_order_ids()[2]:\n response = {'units': [{'orderingId':sample_scene_names()[2], 'statusCode': 'R'}]}\n else:\n response = {'units': [{'orderingId': sample_scene_names()[0], 'statusCode': 'C'}]}\n return response\n\ndef update_order_status(token, ee_order_id, ee_unit_id, something):\n return True, True, True\n\n\ndef update_order_status_fail(token, ee_order_id, ee_unit_id, something):\n raise Exception('lta comms failed')\n\ndef sample_tram_order_ids():\n return '0611512239617', '0611512239618', '0611512239619'\n\ndef sample_scene_names():\n return 'LC81370432014073LGN00', 'LC81390422014071LGN00', 'LC81370422014073LGN00'\n\ndef get_available_orders_partial(token, contactid, partial=False):\n units = [{u'datasetName': None,\n u'displayId': None,\n u'entityId': None,\n u'orderingId': u'LT05_L1GS_125061_19871229_20170210_01_T2',\n u'productCode': u'SR05',\n u'productDescription': u'LANDSAT TM COLLECTIONS LAND SURFACE REFLECTANCE ON-DEMAND',\n u'statusCode': None,\n u'statusText': None,\n u'unitNumber': 1}]\n\n ret = [{u'contactId': contactid,\n u'orderNumber': u'0101905173361',\n u'statusCode': u'Q',\n u'statusText': u'Queued for Processing',\n u'units': units}]\n\n if partial:\n units.append({u'datasetName': None,\n u'displayId': None,\n u'entityId': None,\n u'orderingId': u'LT05_L1TP_025027_20110913_20160830_01_T1',\n u'productCode': u'SR05',\n u'productDescription': u'LANDSAT TM COLLECTIONS LAND SURFACE REFLECTANCE ON-DEMAND',\n u'statusCode': None,\n u'statusText': None,\n u'unitNumber': 2})\n\n ret = [{u'contactId': contactid,\n u'orderNumber': u'0101905173361',\n u'statusCode': u'Q',\n u'statusText': u'Queued for Processing',\n u'units': units}]\n\n return ret\n","sub_path":"api/external/mocks/inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":8577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"350105145","text":"#!/usr/bin/env python\n# coding:utf-8\n\"\"\"\n Purpose: Soft SVM\n Created: 11/06/2017\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\n\nstyle.use('bmh')\n\n\ndef main():\n X = pd.read_csv('data_set_3_X.csv').values\n print(\"X shape: \", X.shape)\n\n X1 = X[:, 0].reshape(-1, 1)\n X2 = X[:, 1].reshape(-1, 1)\n X3 = X[:, 2].reshape(-1, 1)\n X_prime = np.ones((X.shape[0], 1), dtype=np.float64)\n X_prime = np.concatenate((X_prime, X1, X2, X3), axis=1)\n X_prime = np.concatenate((X_prime, np.square(X1), np.square(X2), np.square(X3)), axis=1)\n X_prime = np.concatenate((X_prime, np.multiply(X1, X2), np.multiply(X1, X3), np.multiply(X2, X3)), axis=1)\n X = X_prime\n print(\"X shape: \", X.shape)\n\n y = pd.read_csv('data_set_3_Y.csv').values\n print(\"y shape: \", y.shape)\n x_plt = list(range(500, 4501, 500))\n y_plt = []\n\n for m_prime in x_plt:\n print(\"m_prime: \", m_prime)\n accuracies = []\n for _ in range(10): # cross validation\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=m_prime / 10 ** 5)\n svm = SVC(C=1, kernel='linear')\n svm.fit(X_train, y_train.ravel())\n y_pred = svm.predict(X_test)\n accuracies.append(accuracy_score(y_test, y_pred))\n y_plt.append(sum(accuracies) / len(accuracies))\n\n y_plt = [(1 - i) * 100 for i in y_plt]\n plt.plot(x_plt, y_plt)\n plt.ylim(ymin=0)\n plt.xlabel(\"Number of examples m'\")\n plt.ylabel(\"Error rate (%)\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Électif Machine Learning/data_set_3_kernel_svm/question4.py","file_name":"question4.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"430963729","text":"from enum import IntEnum\nfrom concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor\nimport concurrent.futures.thread\nimport time\nimport threading\nfrom typing import Dict, List, Any, Optional\n\nfrom commlib.endpoints import TransportType\nfrom commlib.utils import gen_random_id\nfrom commlib.logger import Logger\nfrom commlib.bridges import TopicBridge, RPCBridge\nfrom commlib.msg import HeartbeatMessage, RPCMessage, DataClass, DataField\n\n\nclass NodePort:\n \"\"\"NodePort.\n \"\"\"\n\n def __init__(self):\n pass\n\n\nclass NodeInputPort(NodePort):\n \"\"\"NodeInputPort.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"__init__.\n\n Args:\n args:\n kwargs:\n \"\"\"\n super().__init__(*args, **kwargs)\n\n\nclass NodeOutputPort(NodePort):\n \"\"\"NodeOutputPort.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"__init__.\n\n Args:\n args:\n kwargs:\n \"\"\"\n super().__init__(*args, **kwargs)\n\n\nclass NodePortType(IntEnum):\n \"\"\"NodePortType.\n \"\"\"\n\n Input = 1\n Output = 2\n\n\nclass NodeExecutorType(IntEnum):\n \"\"\"NodeExecutorType.\n \"\"\"\n\n ProcessExecutor = 1\n ThreadExecutor = 2\n\n\nclass NodeState(IntEnum):\n \"\"\"NodeState.\n \"\"\"\n\n IDLE = 1\n RUNNING = 2\n STOPPED = 4\n EXITED = 3\n\n\nclass HeartbeatThread(threading.Thread):\n \"\"\"HeartbeatThread.\n \"\"\"\n\n def __init__(self, pub_instance=None,\n interval: int = 10,\n logger: Logger = None,\n *args, **kwargs):\n \"\"\"__init__.\n\n Args:\n pub_instance:\n interval (int): interval\n logger (Logger): logger\n args:\n kwargs:\n \"\"\"\n super().__init__(*args, **kwargs)\n self._stop_event = threading.Event()\n self._rate_secs = interval\n self._heartbeat_pub = pub_instance\n if logger is None:\n logger = Logger(self.__class__.__name__)\n self.logger = logger\n self.daemon = True\n\n def run(self):\n \"\"\"run.\n \"\"\"\n try:\n msg = HeartbeatMessage(ts=self.get_ts())\n while not self._stop_event.isSet():\n self.logger.info(\n f'Sending heartbeat message - {self._heartbeat_pub._topic}')\n if self._heartbeat_pub._msg_type == None:\n self._heartbeat_pub.publish(msg.as_dict())\n else:\n self._heartbeat_pub.publish(msg)\n # Wait for n seconds or until stop event is raised\n self._stop_event.wait(self._rate_secs)\n msg.ts = self.get_ts()\n except Exception as exc:\n self.logger.info(f'Exception in Heartbeat-Thread: {exc}')\n finally:\n self.logger.info('Heartbeat Thread Ended')\n\n def force_join(self, timeout: float = None):\n \"\"\"force_join.\n Sudo stop the thread!\n\n Args:\n timeout (float): timeout\n \"\"\"\n self._stop_event.set()\n threading.Thread.join(self, timeout)\n\n def stop(self):\n \"\"\"stop.\n \"\"\"\n self._stop_event.set()\n\n def stopped(self):\n \"\"\"stopped.\n \"\"\"\n return self._stop_event.is_set()\n\n def get_ts(self):\n \"\"\"get_ts.\n \"\"\"\n timestamp = (time.time() + 0.5) * 1000000\n return int(timestamp)\n\n\n\nclass NodeStartMessage(RPCMessage):\n @DataClass\n class Request(RPCMessage.Request):\n pass\n\n @DataClass\n class Response(RPCMessage.Response):\n status: int = DataField(default=0)\n error: str = DataField(default='')\n\n\nclass NodeStopMessage(RPCMessage):\n @DataClass\n class Request(RPCMessage.Request):\n pass\n\n @DataClass\n class Response(RPCMessage.Response):\n status: int = DataField(default=0)\n error: str = DataField(default='')\n\n\nclass Node:\n \"\"\"Node.\n \"\"\"\n\n def __init__(self,\n node_name: str = '',\n transport_type: TransportType = TransportType.REDIS,\n ## DEPRECATED - Used only for backward compatibility\n transport_connection_params: Any = None,\n connection_params: Any = None,\n remote_logger: bool = False,\n remote_logger_uri: str = '',\n debug: bool = False,\n heartbeat_thread: bool = True,\n heartbeat_uri: Optional[str] = None,\n device_id: Optional[str] = None,\n has_start_rpc: bool = False,\n has_stop_rpc: bool = False):\n \"\"\"__init__.\n\n Args:\n node_name (str): node_name\n transport_type (TransportType): transport_type\n transport_connection_params:\n connection_params:\n max_workers (int): max_workers\n remote_logger (bool): remote_logger\n remote_logger_uri (str): remote_logger_uri\n debug (bool): debug\n device_id (str): device_id\n \"\"\"\n if node_name == '' or node_name is None:\n node_name = gen_random_id()\n node_name = node_name.replace('-', '_')\n self._node_name = node_name\n self._debug = debug\n self._heartbeat_thread = heartbeat_thread\n self._heartbeat_uri = heartbeat_uri\n self._hb_thread = None\n self.state = NodeState.IDLE\n self._device_id = device_id\n if device_id is None:\n self._namespace = f'{self._node_name}'\n else:\n self._namespace = f'thing.{device_id}.{self._node_name}'\n\n self._publishers = []\n self._subscribers = []\n self._rpc_services = []\n self._rpc_clients = []\n self._action_services = []\n self._action_clients = []\n self._event_emitters = []\n\n if transport_type == TransportType.REDIS:\n import commlib.transports.redis as comm\n elif transport_type == TransportType.AMQP:\n import commlib.transports.amqp as comm\n elif transport_type == TransportType.MQTT:\n import commlib.transports.mqtt as comm\n else:\n raise ValueError('Transport type is not supported!')\n self._commlib = comm\n\n if transport_connection_params is None:\n if transport_type == TransportType.REDIS:\n from commlib.transports.redis import \\\n UnixSocketConnectionParameters as ConnParams\n elif transport_type == TransportType.AMQP:\n from commlib.transports.amqp import \\\n ConnectionParameters as ConnParams\n elif transport_type == TransportType.MQTT:\n from commlib.transports.mqtt import \\\n ConnectionParameters as ConnParams\n transport_connection_params = ConnParams()\n self._conn_params = transport_connection_params\n if connection_params is not None:\n self._conn_params = connection_params\n\n self._logger = Logger(self._node_name, debug=debug)\n if has_start_rpc:\n self.init_start_service()\n if has_stop_rpc:\n self.init_stop_service()\n self._logger.info(f'Created Node <{self._node_name}>')\n\n def init_heartbeat_thread(self, topic: str = None) -> None:\n \"\"\"init_heartbeat_thread.\n\n Args:\n topic (str): topic\n \"\"\"\n if topic is None:\n topic = f'{self._namespace}.heartbeat'\n self._hb_thread = HeartbeatThread(\n self.create_publisher(topic=topic, msg_type=HeartbeatMessage),\n logger=self._logger\n )\n self._hb_thread.start()\n self._logger.info(\n f'Started Heartbeat Publisher <{topic}> in background')\n\n def init_stop_service(self, uri: str = None) -> None:\n if uri is None:\n uri = f'{self._namespace}.stop'\n stop_rpc = self.create_rpc(rpc_name=uri,\n msg_type=NodeStopMessage,\n on_request=self._stop_rpc_callback)\n stop_rpc.run()\n self._stop_rpc = stop_rpc\n\n def _stop_rpc_callback(self, msg: NodeStartMessage.Request) -> None:\n resp = NodeStartMessage.Response()\n if self.state == NodeState.RUNNING:\n self.state = NodeState.STOPPED\n self.stop()\n else:\n resp.status = 1\n resp.error = 'Cannot make the transition from current state!'\n return resp\n\n def init_start_service(self, uri: str = None) -> None:\n if uri is None:\n uri = f'{self._namespace}.start'\n start_rpc = self.create_rpc(rpc_name=uri,\n msg_type=NodeStartMessage,\n on_request=self._start_rpc_callback)\n start_rpc.run()\n self._start_rpc = start_rpc\n\n def _start_rpc_callback(self, msg: NodeStartMessage.Request) -> None:\n resp = NodeStartMessage.Response()\n if self.state == NodeState.STOPPED:\n self.run()\n else:\n resp.status = 1\n resp.error = 'Cannot make the transition from current state!'\n return resp\n\n @property\n def input_ports(self) -> dict:\n return {\n 'subscriber': self._subscribers,\n 'rpc_service': self._rpc_services,\n 'action_service': self._action_services\n }\n\n @property\n def output_ports(self):\n return {\n 'publisher': self._publishers,\n 'rpc_client': self._rpc_clients,\n 'action_client': self._action_clients\n }\n\n @property\n def ports(self):\n return {\n 'input': self.input_ports,\n 'output': self.output_ports\n }\n\n def get_logger(self):\n return self._logger\n\n def run(self) -> None:\n \"\"\"run.\n Starts Services, Subscribers and ActionServices.\n Also starts the heartbeat thread (if enabled).\n\n Args:\n\n Returns:\n None:\n \"\"\"\n for s in self._subscribers:\n s.run()\n for r in self._rpc_services:\n r.run()\n for r in self._action_services:\n r.run()\n if self._heartbeat_thread:\n self.init_heartbeat_thread(self._heartbeat_uri)\n self.state = NodeState.RUNNING\n\n def run_forever(self, sleep_rate: float = 0.001) -> None:\n \"\"\"run_forever.\n Starts Services, Subscribers and ActionServices and blocks\n the main thread from exiting.\n Also starts the heartbeat thread (if enabled).\n\n Args:\n sleep_rate (float): Rate to sleep between wait-state iterations.\n \"\"\"\n if self.state != NodeState.RUNNING:\n self.run()\n while self.state != NodeState.EXITED:\n time.sleep(sleep_rate)\n self.stop()\n\n def stop(self):\n for s in self._subscribers:\n s.stop()\n for r in self._rpc_services:\n r.stop()\n for r in self._action_services:\n r.stop()\n\n def create_publisher(self, *args, **kwargs):\n \"\"\"Creates a new Publisher Endpoint.\n \"\"\"\n pub = self._commlib.Publisher(conn_params=self._conn_params,\n logger = self._logger,\n *args, **kwargs)\n self._publishers.append(pub)\n return pub\n\n def create_subscriber(self, *args, **kwargs):\n \"\"\"Creates a new Publisher Endpoint.\n \"\"\"\n sub = self._commlib.Subscriber(conn_params=self._conn_params,\n logger = self._logger,\n *args, **kwargs)\n self._subscribers.append(sub)\n return sub\n\n def create_rpc(self, *args, **kwargs):\n \"\"\"Creates a new Publisher Endpoint.\n \"\"\"\n rpc = self._commlib.RPCService(conn_params=self._conn_params,\n logger = self._logger,\n *args, **kwargs)\n self._rpc_services.append(rpc)\n return rpc\n\n def create_rpc_client(self, *args, **kwargs):\n \"\"\"Creates a new Publisher Endpoint.\n \"\"\"\n client = self._commlib.RPCClient(conn_params=self._conn_params,\n logger = self._logger,\n *args, **kwargs)\n self._rpc_clients.append(client)\n return client\n\n def create_action(self, *args, **kwargs):\n \"\"\"Creates a new ActionService Endpoint.\n \"\"\"\n action = self._commlib.ActionService(conn_params=self._conn_params,\n logger = self._logger,\n *args, **kwargs)\n self._action_services.append(action)\n return action\n\n def create_action_client(self, *args, **kwargs):\n \"\"\"Creates a new ActionClient Endpoint.\n \"\"\"\n aclient = self._commlib.ActionClient(conn_params=self._conn_params,\n logger = self._logger,\n *args, **kwargs)\n self._action_clients.append(aclient)\n return aclient\n\n def create_event_emitter(self, *args, **kwargs):\n \"\"\"Creates a new EventEmitter Endpoint.\n \"\"\"\n em = self._commlib.EventEmitter(conn_params=self._conn_params,\n logger = self._logger,\n *args, **kwargs)\n self._event_emitters.append(em)\n return em\n","sub_path":"commlib/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":13618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"35699184","text":"import gtk\nimport sys\nfrom gettext import gettext as _\n\nfrom ros_control import ROSControl\n\nVELOCITY = 0.5\n\nclass UICmd:\n \"\"\"UI Class for robot control\"\"\"\n\n def __init__(self, topic):\n self.client = ROSControl(topic)\n\n window = gtk.Window()\n window.set_title(_('ROS UI Cmd'))\n window.resize(200, 200)\n window.connect('delete-event', self.stop)\n\n window.connect('key-press-event', self.key_press_callback)\n window.connect('key-release-event', self.key_release_callback)\n\n button_forward = gtk.Button(_('Forward'))\n button_forward.connect('pressed', self.command_forward)\n button_forward.connect('released', self.command_stop)\n\n button_backward = gtk.Button(_('Backward'))\n button_backward.connect('pressed', self.command_backward)\n button_backward.connect('released', self.command_stop)\n\n button_left = gtk.Button(_('Left'))\n button_left.connect('pressed', self.command_left)\n button_left.connect('released', self.command_stop)\n\n button_right = gtk.Button(_('Right'))\n button_right.connect('pressed', self.command_right)\n button_right.connect('released', self.command_stop)\n\n buttons_hbox = gtk.HBox()\n buttons_hbox.add(button_left)\n buttons_hbox.add(button_right)\n buttons_vbox = gtk.VBox()\n buttons_vbox.add(button_forward)\n buttons_vbox.add(buttons_hbox)\n buttons_vbox.add(button_backward)\n\n window.add(buttons_vbox)\n window.show_all()\n\n self.key_commands = {\\\n 65362: self.command_forward,\n 65364: self.command_backward,\n 65361: self.command_left,\n 65363: self.command_right,\n }\n\n def key_press_callback(self, widget, event):\n keyval = event.keyval\n if keyval in self.key_commands.keys():\n self.key_commands[keyval]()\n\n def key_release_callback(self, widget, event):\n self.command_stop()\n\n def command_forward(self, *args):\n self.client.set_vel(VELOCITY, 0)\n def command_backward(self, *args):\n self.client.set_vel(-VELOCITY, 0)\n def command_left(self, *args):\n self.client.set_vel(0, -2 * VELOCITY)\n def command_right(self, *args):\n self.client.set_vel(0, 2 * VELOCITY)\n def command_stop(self, *args):\n self.client.set_vel(0, 0)\n\n def event_loop(self, *args):\n while gtk.events_pending():\n gtk.main_iteration(block=False)\n return True\n\n def start(self):\n self.client.main_loop = self.event_loop\n self.client.start(timeout=0.01)\n\n def stop(self, *args):\n sys.exit(0)\n\n\nif __name__ == '__main__':\n UICmd('pioneer2dx').start()\n","sub_path":"pioneer/ui_cmd.py","file_name":"ui_cmd.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"180686831","text":"import socket\nimport struct\nfrom ipaddress import ip_address\n\n# from config import mc_timeout, RED, GREEN, RESET\nfrom muffin.shared import RED, RESET, _get_cfg\n\ncfg = _get_cfg()\n\ndef mcast(ip, port, iface, feed, group, q, result):\n \"\"\"connect multicast group connectivity for each solarflare iface\"\"\"\n count = q.get()\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # check if IP is multicast\n if not ip_address(ip).is_multicast:\n print(f\"{group} {feed} {RED} This is not a valid Multicast IP: {ip}{RESET}\")\n return 1\n\n try:\n sock.bind((ip, port))\n except socket.error:\n print(f\"{group} {feed} {RED} Invalid Group {ip}:{port} on {iface} {RESET}\")\n return 1\n\n if iface is \"\":\n mreq = struct.pack(\"4sl\", socket.inet_aton(ip), socket.INADDR_ANY)\n else:\n mreq = socket.inet_aton(ip) + socket.inet_aton(iface)\n\n sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n sock.settimeout(cfg['mc_timeout'])\n\n i = 0\n while i < 1:\n try:\n data, addr = sock.recvfrom(port)\n except socket.timeout:\n result[count] = {\n \"name\": f\"[mc] {group} {feed}\",\n \"status\": \"Timed out\",\n \"ip\": f\"{ip}:{port}\",\n \"iface\": f\"{iface}\",\n }\n break\n else:\n result[count] = {\n \"name\": f\"[mc] {group} {feed}\",\n \"status\": \"Received\",\n \"ip\": f\"{ip}:{port}\",\n \"iface\": f\"{iface}\",\n }\n i += 1\n\n q.task_done()\n return result\n","sub_path":"muffin/mcast.py","file_name":"mcast.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"371320575","text":"from django.contrib import admin\nfrom polymorphic.admin import PolymorphicParentModelAdmin, PolymorphicChildModelAdmin\n\nfrom .models import Payment, DirectBankTransfer, Mutation, Checkout, Checkin\n\n\n@admin.register(DirectBankTransfer)\nclass DirectBankTransferAdmin(PolymorphicChildModelAdmin):\n base_model = Payment\n\n\n@admin.register(Payment)\nclass PaymentAdmin(PolymorphicParentModelAdmin):\n child_models = [DirectBankTransfer]\n list_display = ['name', 'account_name', 'account_number', 'balance']\n\n\n@admin.register(Mutation)\nclass MutationAdmin(PolymorphicParentModelAdmin):\n child_models = [Checkout, Checkin]\n list_display = [\n 'inner_id',\n 'payment_account',\n 'created_at',\n 'flow',\n 'amount',\n 'balance',\n 'is_verified',\n ]\n\n\n@admin.register(Checkin)\nclass CheckinAdmin(PolymorphicChildModelAdmin):\n base_model = Mutation\n fields = [\n 'content_type',\n 'object_id',\n 'account_name',\n 'account_number',\n 'provider_name',\n 'amount',\n 'payment_account',\n 'transfer_receipt',\n 'note',\n 'is_verified',\n ]\n\n def save_model(self, request, obj, form, change):\n super().save_model(request, obj, form, change)\n obj.payment_account.update()\n\n\n@admin.register(Checkout)\nclass CheckoutAdmin(PolymorphicChildModelAdmin):\n base_model = Mutation\n fields = [\n 'content_type',\n 'object_id',\n 'account_name',\n 'account_number',\n 'provider_name',\n 'amount',\n 'payment_account',\n 'transfer_receipt',\n 'note',\n 'is_verified',\n ]\n\n def save_model(self, request, obj, form, change):\n super().save_model(request, obj, form, change)\n obj.payment_account.update()\n","sub_path":"django_cashflow/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"516444490","text":"#Controllers\nfrom Controller.Rent_controller import Rent_controller\nfrom Controller.Salesman_controller import Salesman_controller\nfrom Controller.Order_controller import Order_controller\nfrom Controller.Information_controller import Information_controller\n#UIs\nfrom UI.Print_main_menu import Print_main_menu\n#Utilizations\nfrom Utilizations.Format_text import Format_text\n\nclass Main_controller:\n def __init__(self):\n #Controllers\n self.__rent_controller = Rent_controller()\n self.__salesman_controller = Salesman_controller()\n self.__order_controller = Order_controller()\n self.__information_controller = Information_controller()\n #UI\n self.__main_menu = Print_main_menu()\n #Utilizations\n self.__get_format = Format_text()\n\n def main_page(self):\n \"\"\"Reads choice and directs on a path depending on input\"\"\"\n choice = \"\"\n while choice != \"x\":\n header, main_menu, choices, underline = self.__get_format.main_menu_format()\n choice = self.__main_menu.main_page(header,main_menu,choices,underline)\n if choice == \"1\":\n self.__rent_controller.Rent_page()\n elif choice == \"2\":\n try_again = \"\"\n while try_again != \"n\":\n try_again, valid = self.__salesman_controller.sign_in_page()\n if valid == True:\n self.__salesman_controller.salesman_menu()\n elif choice == \"3\":\n self.__order_controller.find_order_process(page=2)\n elif choice == \"i\":\n self.__information_controller.information_page()","sub_path":"Controller/Main_controller.py","file_name":"Main_controller.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"445250242","text":"# -*- coding: utf-8 -*-\nimport unittest \nfrom appium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nimport time, random, string, csv, logging, logging.config, re\nfrom decimal import Decimal\nfrom time import sleep\nfrom re import sub\n\nCON_LOG = '../config/log.conf'\nlogging.config.fileConfig(CON_LOG)\nlogging = logging.getLogger()\n\nclass TestAccountLinkCheck(unittest.TestCase):\n def setUp(self):\n desired_caps={}\n desired_caps['platformName'] = 'ios'\n desired_caps['deviceName'] = 'iPhone X'\n desired_caps['platformVersion'] = '12.0'\n desired_caps['bundleID'] = 'com.castlery.dev'\n desired_caps['app'] = '/Users/alex/Documents/Castlery_ios_test.app'\n desired_caps['launchTimeout'] = '30000'\n desired_caps['autoAcceptAlerts'] = 'True'\n desired_caps['noReset'] = 'False'\n #desired_caps['unicodekeyboard'] = 'True'\n #desired_caps['resetkeyboard'] = 'True'\n self.driver = webdriver.Remote('http://0.0.0.0:4723/wd/hub', desired_caps)\n self.driver.implicitly_wait(18)\n \n def test_cart(self):\n driver = self.driver\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"Category\"]').click()\n sleep(1)\n driver.find_element_by_accessibility_id(\"Coffee Tables\").click()\n sleep(1)\n driver.find_element_by_xpath('//XCUIElementTypeStaticText[@name=\"George Accent Table, Tall\"]').click()\n sleep(3)\n #price_product1_text = driver.find_element_by_xpath('(//XCUIElementTypeStaticText)[3]').text\n #price_product1 = Decimal(sub(r'[^\\d.]', '', price_product1_text))\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"Add to Cart\"]').click()\n sleep(3)\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"Confirm\"]').click()\n sleep(3)\n guest_cart_product1_name = driver.find_element_by_xpath('//XCUIElementTypeStaticText[@name=\"George Accent Table, Tall\"]').text\n\n logging.info(\"Start test_cart test_case1: page title should be Your cart\")\n try:\n driver.find_element_by_xpath('//XCUIElementTypeOther[@name=\"Your Cart\"]')\n title_cart = True\n except:\n title_cart = False\n if title_cart == True:\n logging.debug(\"title is exist, test_case1 passed\")\n elif title_cart == False:\n logging.warning(\"title is not exist, test_Case1 failed\")\n logging.info(\"test_case1 finished\")\n sleep(3)\n\n subtotal1_text = driver.find_element_by_xpath('(//XCUIElementTypeStaticText)[9]').text\n subtotal1 = Decimal(sub(r'[^\\d.]', '', subtotal1_text))\n shipping_cost_text = driver.find_element_by_xpath('(//XCUIElementTypeStaticText)[11]').text\n shipping_free = \"Free\"\n if subtotal1 <= 300:\n logging.info(\"Start test_cart test_case2: if item subtotal < 300 should have alert link\")\n try:\n driver.find_element_by_xpath('(//XCUIElementTypeStaticText)[1]')\n alert_link_exist = True\n except:\n alert_link_exist = False\n if alert_link_exist == True:\n logging.debug(\"alert link exist, test_case2 passed\")\n elif alert_link_exist == False:\n logging.warning(\"alert link not exist, test_case2 failed\")\n logging.info(\"test_case2 finished\")\n sleep(3)\n\n logging.info(\"Start test_cart test_case3: if item subtotal < 300 sbould have shipping cost\")\n try:\n self.assertNotEqual(shipping_cost_text, shipping_free)\n logging.debug(\"shipping is not free, test_case3 passed\")\n except AssertionError as e:\n logging.warning(\"shipping should not free, test_Case3 failed\")\n logging.info(\"test_case3 finished\")\n sleep(3) \n elif subtotal1 > 300:\n logging.warning(\"something wrong! plz check cart\")\n sleep(5)\n\n driver.find_element_by_xpath('(//XCUIElementTypeButton[@name=\"+\"])[2]').click()\n sleep(1)\n driver.find_element_by_xpath('//XCUIElementTypeTextField').send_keys(\"welcome26\")\n sleep(1)\n driver.find_element_by_accessibility_id(\"Done\").click()\n sleep(5)\n\n logging.info(\"Start test_Cart test_case4: if coupon code not correct get alert\")\n alert_coupon_wrong_msg1 = driver.find_element_by_xpath('(//XCUIElementTypeStaticText)[15]').text\n alert_coupon_text1 = \"The coupon code you entered doesn't exist. Please try again.\"\n try:\n self.assertEqual(alert_coupon_wrong_msg1, alert_coupon_text1)\n logging.debug(\"alert msg is correct, test_case4 passed\")\n except AssertionError as e:\n logging.warning(\"alert msg wrong, test_case4 failed\")\n logging.info(\"test_case4 finished\")\n sleep(3)\n\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"OK\"]').click()\n sleep(3)\n driver.find_element_by_xpath('//XCUIElementTypeTextField').clear()\n sleep(1)\n driver.find_element_by_xpath('//XCUIElementTypeTextField').send_keys(\"welcome25\")\n sleep(1)\n driver.find_element_by_accessibility_id(\"Done\").click()\n sleep(5)\n\n logging.info(\"Start test_cart test_Case5: if not meet the conditions of rules get alert msg\")\n alert_coupon_wrong_msg2 = driver.find_element_by_xpath('(//XCUIElementTypeStaticText)[15]').text\n alert_coupon_text2 = \"This coupon code can't be applied to orders less than $200.00.\"\n try:\n self.assertEqual(alert_coupon_wrong_msg2, alert_coupon_text2)\n logging.debug(\"alert message is correct, test_Case5 passed\")\n except AssertionError as e:\n logging.warning(\"alert msg is wrong! test_Case5 failed\")\n logging.info(\"test_case5 finished\")\n sleep(3)\n\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"OK\"]').click()\n sleep(1)\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"Back\"]').click()\n sleep(1)\n\n logging.info(\"Start test_cart test_Case6: now there is 1 product in cart, will show 1 on cart\")\n try:\n driver.find_element_by_accessibility_id(\"1\")\n qty_is_1 = True\n except:\n qty_is_1 = False\n if qty_is_1 == True:\n logging.debug(\"show qty on cart is 1, test_case6 passed\")\n elif qty_is_1 == False:\n logging.warning(\"show qty on cart wrong! test_case6 failed\")\n logging.info(\"test_case6 finished\")\n sleep(3)\n\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"Coffee Tables\"]').click()\n sleep(1)\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"Category\"]').click()\n sleep(1)\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"Account\"]').click()\n sleep(1)\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"Login\"]').click()\n sleep(1)\n driver.find_element_by_xpath('//XCUIElementTypeTextField').send_keys(\"test_ios@castlery.com\")\n sleep(1)\n driver.find_element_by_xpath('//XCUIElementTypeSecureTextField').send_keys(\"7787782\")\n sleep(1)\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"Login\"]').click()\n sleep(3)\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"Cart\"]').click()\n sleep(1)\n\n logging.info(\"Start test_cart test_case7: after guest cart will merge in user cart\")\n user_cart_product1_name = driver.find_element_by_accessibility_id(\"George Accent Table, Tall\").text\n try:\n self.assertEqual(user_cart_product1_name, guest_cart_product1_name)\n logging.debug(\"product1 name is same,product merge in cart,test_case7 passed\")\n except AssertionError as e:\n logging.warning(\"product1 name wrong! test_case7 failed\")\n logging.info(\"test_case7 finished\")\n sleep(3)\n \n driver.find_element_by_xpath('(//XCUIElementTypeButton[@name=\"+\"])[1]').click()\n sleep(3)\n \n subtotal2_text = driver.find_element_by_xpath('(//XCUIElementTypeStaticText)[8]').text\n subtotal2 = Decimal(sub(r'[^\\d.]', '', subtotal2_text))\n shipping_cost_text = driver.find_element_by_xpath('(//XCUIElementTypeStaticText)[10]').text\n\n if subtotal2 > 300:\n logging.info(\"Start test_cart test_case8: subtotal > 300 shipping free now\")\n try:\n self.assertEqual(shipping_cost_text, \"Free\")\n logging.debug(\"shipping free now, test_case8 passed\")\n except AssertionError as e:\n logging.warning(\"test_case8 failed\")\n logging.info(\"test_case8 finished\")\n elif subtotal2 <= 300:\n logging.warning(\"cart wrong! plz check cart\")\n sleep(3)\n\n driver.swipe(350, 130, 150, 130, 200)\n sleep(5)\n driver.tap([(340,130), (321,156), (359,156)], 500)\n sleep(5)\n\n logging.info(\"Start test_cart test_case9: no product in cart, cart is empty\")\n try:\n driver.find_element_by_xpath('//XCUIElementTypeImage[@name=\"cart_empty\"]')\n cart_empyt = True\n except:\n cart_empyt = False\n if cart_empyt == True:\n logging.debug(\"cart is empty now, test_Case9 passed\")\n elif cart_empyt == False:\n logging.warning(\"cart is not empyt, test_case9 failed\")\n logging.info(\"test_case9 finished\")\n sleep(3)\n\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"Shop Now\"]').click()\n sleep(1)\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"Account\"]').click()\n sleep(1)\n driver.find_element_by_xpath('(//XCUIElementTypeButton[@name=\"More Info\"])[5]').click()\n sleep(1)\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"Log Out\"]').click()\n sleep(1)\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"Confirm\"]').click()\n sleep(1)\n\n def tearDown(self):\n self.driver.close_app()\n self.driver.quit()\n \nif __name__ == '__main__':\n unittest.main()\n \n","sub_path":"appium_ios/test_case/test_cart.py","file_name":"test_cart.py","file_ext":"py","file_size_in_byte":10295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"415279492","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nurl = \"http://gamingboss000.blogspot.com/2011/03/lista-de-tesouros-do-uncharted.html\"\n\nhtml = urlopen(url)\n\nbs = BeautifulSoup(html, 'html.parser')\n\ncd = bs.find_all('h3', class_='js-guide-title')\nlista = bs.find_all('div', class_='guide-section-content')\n\ncdTemp = []\nlistaTemp = []\n\nfor cd in lista:\n\tif cd.get_text() != \"\":\n\t\tlistaTemp.append(cd)\n\nfor cd in cd:\n\tif cd.get_text() != \"\":\n\t\tcdTemp.append(cd)\n\n# for i in range(len(cdTemp)):\n\t# print(cdTemp[i].get_text())\n\n# print(listaTemp)\nfor i in range(len(listaTemp)):\n\tx = listaTemp[i].find('li')\n\tprint(count(x.get_text()))","sub_path":"projects/exemplos/extrair-texto-bf4/extrair-texto-de-html-bf4.py","file_name":"extrair-texto-de-html-bf4.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"503552158","text":"#!/usr/bin/env/python3\n# -*- coding:utf-8 -*-\n\"\"\"\n@project: apiAutoTest\n@author: zy7y\n@file: send_email.py\n@ide: PyCharm\n@time: 2020/8/3\n\"\"\"\nimport yagmail\nfrom tools import logger\nimport zipfile\nimport os\n\n\nclass EmailServe:\n\n @staticmethod\n def zip_report(file_path: str, out_path: str):\n \"\"\"\n 压缩指定文件夹\n :param file_path: 目标文件夹路径\n :param out_path: 压缩文件保存路径+xxxx.zip\n :return: 无\n \"\"\"\n file_path = f\"{file_path}/html\"\n zip = zipfile.ZipFile(out_path, \"w\", zipfile.ZIP_DEFLATED)\n for path, dirnames, filenames in os.walk(file_path):\n # 去掉目标跟路径,只对目标文件夹下边的文件及文件夹进行压缩\n fpath = path.replace(file_path, '')\n\n for filename in filenames:\n zip.write(\n os.path.join(\n path, filename), os.path.join(\n fpath, filename))\n zip.close()\n\n @staticmethod\n def send_email(setting: dict, file_path):\n \"\"\"\n 入参一个字典\n :param user: 发件人邮箱\n :param password: 邮箱授权码\n :param host: 发件人使用的邮箱服务 例如:smtp.163.com\n :param contents: 内容\n :param addressees: 收件人列表\n :param title: 邮件标题\n :param enclosures: 附件列表\n :param file_path: 需要压缩的文件夹\n :return:\n \"\"\"\n EmailServe.zip_report(\n file_path=file_path,\n out_path=setting['enclosures'])\n yag = yagmail.SMTP(\n setting['user'],\n setting['password'],\n setting['host'])\n # 发送邮件\n yag.send(\n setting['addressees'],\n setting['title'],\n setting['contents'],\n setting['enclosures'])\n # 关闭服务\n yag.close()\n logger.info(\"邮件发送成功!\")\n\n\n# if __name__ == '__main__':\n# EmailServe.zip_report('../report/html', 'report.zip')\n","sub_path":"tools/send_email.py","file_name":"send_email.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"67541976","text":"class Stack:\n def __init__(self):\n self.head = None\n self.count = 0\n\n def push(self, data):\n if self.head:\n temp = self.Node(data)\n temp.next = self.head\n self.head = temp\n else:\n self.head = self.Node(data)\n\n def pop(self):\n if self.head:\n temp = self.head\n self.head = temp.next\n temp.next = None\n return temp.data\n\n def top(self):\n return self.head\n\n def is_empty(self):\n if self.head:\n return False\n else:\n return True\n\n def print_stack(self):\n temp = self.head\n\n while temp:\n print(f\"[{temp.data}|*]\", end=\"-->\")\n temp = temp.next\n print(None)\n\n class Node:\n def __init__(self, data):\n self.data = data\n self.next= None\n\ndef main():\n stack = Stack()\n for i in range(10):\n stack.push(i)\n stack.print_stack()\n\n print(f\"Pop : {stack.pop()}\")\n print(f\"Pop : {stack.pop()}\")\n print(f\"Pop : {stack.pop()}\")\n stack.print_stack()\n import os\n print(os.getenv(\"_SC_LEVEL1_DCACHE_LINESIZE\"))\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"Algorithm/stack/create_stack_using_linked_list.py","file_name":"create_stack_using_linked_list.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"375705550","text":"#! /usr/bin/env python\nfrom jinja2 import Environment, Template, FileSystemLoader\nimport os.path\nfrom astropy.table import Table,unique,MaskedColumn\nfrom io import StringIO\nfrom copy import deepcopy\n#from aladin import Aladin\nfrom astropy.wcs import WCS\nfrom astropy.coordinates import SkyCoord\nimport astropy.units as u\nfrom astropy import constants\nfrom astropy.io import fits\nimport numpy as np\nimport re\nimport matplotlib.pyplot as plt\nfrom astroquery.skyview import SkyView\nfrom matplotlib.cbook.deprecation import MatplotlibDeprecationWarning\nfrom astropy.utils.exceptions import AstropyDeprecationWarning,AstropyUserWarning,AstropyWarning\nfrom matplotlib.lines import Line2D\nfrom aplpy import FITSFigure\nfrom pathlib import Path\nfrom astropy import log\nfrom astropy.table import Table,Column,vstack,hstack,MaskedColumn\nfrom collections import deque,defaultdict\nfrom astropy.utils.console import ProgressBar\nfrom regions import LineSkyRegion\nfrom configparser import ConfigParser\nfrom functools import partial\n#from FAOR import FAOR\nfrom .CMap import CMap\nfrom . import MSX\n#from planner import split_leg\nfrom requests.exceptions import ChunkedEncodingError, SSLError, ConnectionError\nfrom regions import RectangleSkyRegion,PointSkyRegion,RegionMeta,RegionVisual,write_ds9,ds9_objects_to_string,DS9Parser\nfrom pylatexenc.latexencode import utf8tolatex\nfrom shapely.geometry import MultiPoint, Polygon\nimport matplotlib.patheffects as path_effects\nimport tarfile\nimport shutil\nimport warnings\nwarnings.filterwarnings('ignore',category=MatplotlibDeprecationWarning)\nwarnings.filterwarnings('ignore',category=AstropyDeprecationWarning)\nwarnings.filterwarnings('ignore',category=AstropyUserWarning)\nwarnings.filterwarnings('ignore',category=AstropyWarning)\nnp.warnings.filterwarnings('ignore')\n\nDEBUG = False\nMP = False if DEBUG else True\n\nif DEBUG is False:\n log.disable_warnings_logging()\n log.setLevel('ERROR')\n\n\nc_km_s = constants.c.to(u.km/u.s).value\n\n# FIFI, from obsmaker.cfg\nOBS_REF_BLUE_LINES = [\"FIV 44.07\", \"FeII 51.30\", \"FeIII 51.68\", \"OIII 51.81\",\n \"OIII* 51.95\", \"FeI 54.31\", \"SI 56.31\", \"NIII* 57.15\",\n \"NIII 57.32\", \"PII 60.64\", \"OI* 62.1\", \"OI 63.18\", \"FII 67.20\",\n \"SiI 68.47\", \"CO 69.07\", \"CO 70.91\", \"CO 77.05\", \"CO 84.41\",\n \"OH* 84.50\", \"CO 87.19\", \"FeII 87.38\", \"OIII 88.35\", \"AlI 89.24\",\n \"CO 96.77\", \"CO 104.44\", \"FeIII 105.37\", \"FeI 111.18\", \"CO 118.58\",\n \"NII 121.89\", \"CO 124.19\", \"SiI 129.68\", \"CO 130.37\", \"Custom\"]\nOBS_REF_BLUE_LAMBDAS = np.array([44.070000, 51.300440, 51.680000, 51.814500,\n 51.950000, 54.310930, 56.311000, 57.150000,\n 57.317000, 60.640000, 62.100000, 63.183705, 67.200000,\n 68.473000, 69.0744058, 70.9072390, 77.058693, 84.410721,\n 84.500000, 87.190422, 87.384400, 88.356000, 89.237000,\n 96.7725080, 104.444952, 105.370000, 111.182800, 118.5807176,\n 121.897570, 124.193352, 129.681730, 130.3689279, 130.])\nOBS_REF_RED_LINES = [\"CO 104.44\", \"FeIII 105.37\", \"FeI 111.18\",\n \"CO 118.58\", \"NII 121.89\", \"CO 124.19\", \"SiI 129.68\", \"CO* 129.80\",\n \"CO 130.37\", \"OI 145.52\", \"CO 153.26\", \"CII 157.74\", \"CO 162.81\",\n \"CO 173.63\", \"CO* 185.00\", \"CO 185.99\", \"CO 200.27\", \"NII 205.17\",\n \"Custom\"]\nOBS_REF_RED_LAMBDAS = np.array([104.444952, 105.370000, 111.182800,\n 118.5807176, 121.897570, 124.193352, 129.681730, 129.800000,\n 130.3689279, 145.525439, 153.266708, 157.740900, 162.811630,\n 173.631434, 185.000000, 185.9992957, 200.272476, 205.178230,210.])\n\n# formatters for astropy table columns\nCOL_FORMATTER = lambda x: x.replace('_','\\_')\nINT_FORMATTER = lambda x: '%i'%int(np.float(x)) if x not in (None,'None',-9999,'--') else ''\nZERO_INT_FORMATTER = lambda x: '%i'%int(np.float(x)) if x not in (None,'None',-9999,'--',0,'0') else ''\n\ndef INT_CONVERTER(row,cols):\n \"\"\"Convert row to ints from strings or floats\"\"\"\n for col in cols:\n if col not in row:\n continue\n val = row[col]\n if isinstance(val,str):\n try:\n val = np.float(val)\n except TypeError:\n continue\n if val is None or np.isnan(val):\n row[col] = ''\n elif np.float(val).is_integer():\n row[col] = int(val)\n else:\n continue\n return row\n \n\nROLL_RE = re.compile('\\[([\\d\\.]*)\\,\\s?([\\d\\.]*)\\]')\nCOMMENT_RE = re.compile('\\%\\s\\\\d\\d?).*\\>\\n(?P[\\s\\S]*?(?=\\n\\%\\s\\))')\n\n#COLORS = ['#ff0000','#00ff00','#0000ff']\nCOLORS = ['#d62728','#1f77b4','#2ca02c','#ff7f0e','#9467bd','#17becf','#e377c2']\nGCOLOR = '#FFD700'\nFCOLOR = '#9467bd'\n\nFOV = {'A_TOT':(2.8,1.7),'B_TOT':(4.2,2.7),'C_TOT':(4.2,2.7),'D_TOT':(7.4,4.6),'E_TOT':(8.4,6.2),\n 'A_POL':(1.4,1.7),'B_POL':(2.1,2.7),'C_POL':(2.1,2.7),'D_POL':(3.7,4.6),'E_POL':(4.2,6.2),\n 'A_C2N':(1.4,1.7),'B_C2N':(2.1,2.7),'C_C2N':(2.1,2.7),'D_C2N':(3.7,4.6),'E_C2N':(4.2,6.2),\n 'FORCAST_IMG':(3.4,3.2),'FORCAST_GSM':(.04,3.18),'FIF_BLUE':(.5,.5),'FIF_RED':(1,1)}\n\nPIXSIZE = {'A_TOT':2.34,'B_TOT':4.00,'C_TOT':4.02,'D_TOT':6.90,'E_TOT':8.62,\n 'A_POL':2.34,'B_POL':4.00,'C_POL':4.02,'D_POL':6.90,'E_POL':8.62,\n 'A_C2N':2.34,'B_C2N':4.00,'C_C2N':4.02,'D_C2N':6.90,'E_C2N':8.62,\n 'FORCAST_IMG':0.768,'FORCAST_GSM':0.768,'FIF_BLUE':12,'FIF_RED':12}\n\n\nIMGOPTIONS = {'width':0.4*u.deg, 'height':0.4*u.deg,\n 'survey':'DSS2 Red',\n 'vmin':None, 'vmax':None,\n 'recenter':None,'roll':True,\n 'invert':True,'irsurvey':None,\n 'compass':True,'nofigure':False,\n 'observed':False}\n\nTARFOFFSET = {'HAWC_PLUS':3*u.deg,\n 'FORCAST':40.6*u.deg,\n 'FIFI-LS':0*u.deg}\n\nINST_REPL = {'California Institute of Technology':'Caltech',\n 'University':'Univ','Universitaet':'Univ',\n '&':'\\&',\n 'Department':'Dept',' and':' \\&',' und':' \\&',\n 'Institute':'Inst','Institut':'Inst',\n 'Observatory':'Obs',\n 'fuer ':'f.\\ ','der ':'d.\\ ',\n 'Astrophysics':'Ast','Astrophysik':'Ast',\n 'Dr. ':'','Mr. ':'','Ms. ':'','Mrs. ':'',\n 'Prof ':'','Prof. ':'',\n 'Karl-Schwarzschild-Observatorium':'KSO',\n '. ':'.\\ '}\n\nHAWC_SIO = {\n 'Lissajous':r\"\"\"\n - Select boresight/pupil \\\\\n - Select instrument configuration \\\\\n - Install Low/HighAlt bias \\\\\n - Perform Lissajous observations at each band until SNR $>5$ \\\\\n \"\"\",\n 'Polarimetry':r\"\"\"\n - Select boresight/pupil \\\\\n - Select instrument configuration. \\\\\n - Install Low/HighAlt bias \\\\\n - Perform chop-nod polarimetric observations \\\\\n - Ensure chop-angle and chop-throw are correct \\\\\n \"\"\",\n 'LISPOL':r\"\"\"\n - Select boresight \\\\\n - Select instrument configuration \\\\\n - Install Low/HighAlt bias \\\\\n - Perform scan-pol observations using Lissajous scans \\\\\n - Do NOT change LOS within a set of 4 scans \\\\\n \"\"\"\n }\n\nSTRIKETHROUGH_REPL = r'\\1[-1.7ex]\\n\\\\hline\\\\noalign{\\\\vspace{\\\\dimexpr 1.7ex-\\\\doublerulesep}}'\n\n\ndef get_latex_env(directory):\n \"\"\"Create jinja environment to load template\"\"\"\n # modify jinja environment to work with latex files\n # http://eosrei.net/articles/2015/11/latex-templates-python-and-jinja2-generate-pdfs\n latex_jinja_env = Environment(\n block_start_string = '\\BLOCK{',\n block_end_string = '}',\n variable_start_string = '\\VAR{',\n variable_end_string = '}',\n comment_start_string = '\\#{',\n comment_end_string = '}',\n line_statement_prefix = '%%',\n line_comment_prefix = '%#',\n trim_blocks = True,\n autoescape = False,\n #loader = FileSystemLoader(os.path.abspath('.'))\n loader = FileSystemLoader(str(Path(directory).resolve()))\n )\n return latex_jinja_env\n\ndef get_latex_template(filename):\n \"\"\"Return jinja template\"\"\"\n filename = Path(filename)\n env = get_latex_env(filename.parent.resolve())\n return env.get_template(filename.name)\n\n\ndef make_box(center, width, height, angle=0*u.deg, TARFoffset=0*u.deg,label=None,\n linewidth=2, color='#ff0000',name=None,split=False,**kwargs):\n '''Generate box overlay'''\n if split:\n # split is for the HAWC+ R0/R1 gap and for FORCAST slit\n try:\n splitgap = PIXSIZE[label]*u.arcsec\n except KeyError:\n if 'G' in label:\n splitgap = PIXSIZE['FORCAST_GSM']*u.arcsec\n else:\n splitgap = PIXSIZE['FORCAST_IMG']*u.arcsec\n r_width = (width-splitgap)/2\n boxangle = (np.arctan2(height,width).to(u.deg)+90*u.deg)/2\n offset = angle + TARFoffset\n \n r0_center = center\n r1_center = center.directional_offset_by(-90*u.deg+offset,r_width+splitgap)\n\n recenter = center.directional_offset_by(-90*u.deg+offset,r_width/2+splitgap/2)\n \n r0 = make_box(r0_center,r_width,height,angle,TARFoffset,label,\n linewidth,color,name,reglabel='_R0')\n r1 = make_box(r1_center,r_width,height,angle,TARFoffset,label,\n linewidth,color,name,reglabel='_R1')\n\n regs = [r0.get('reg'),r1.get('reg')]\n\n boxdict = {'box':[r0['box'][0],r1['box'][0]],'linewidth':linewidth,\n 'color':color,'center':center,'label':label,'name':name,\n 'recenter':recenter,'reg':regs}\n\n elif kwargs.get('scan'):\n try:\n splitgap = PIXSIZE[label]*u.arcsec\n except KeyError:\n if 'G' in label:\n splitgap = PIXSIZE['FORCAST_GSM']*u.arcsec\n else:\n splitgap = PIXSIZE['FORCAST_IMG']*u.arcsec\n r_width = (width/4) - (splitgap*2)\n #boxangle = (np.arctan2(height,width).to(u.deg)+90*u.deg)/2\n \n offset = angle + TARFoffset\n # keep boresite at R0 center for HAWC+\n if 'TOT' in label:\n #center = center.directional_offset_by(-90*u.deg+offset,r_width/2-splitgap*2)\n center = center.directional_offset_by(-90*u.deg+offset, r_width)\n\n scanangle = kwargs.get('scanangle', None)\n scanamp = kwargs.get('scanamp', None)\n \n return make_box(center,width,height,angle,TARFoffset,label,\n linewidth,color,name,split=False,scan=False,\n scanangle=scanangle,scanamp=scanamp,\n reglabel='_scan')\n\n else:\n diag = np.hypot(width/2,height/2)\n boxangle = np.arctan2(height,width).to(u.deg)+90*u.deg\n\n offset = angle + TARFoffset\n\n if kwargs.get('scanangle') and kwargs.get('scanamp'):\n scanangle = kwargs['scanangle']\n ampx,ampy = kwargs['scanamp']\n\n # initial rotated FOV\n tl = center.directional_offset_by( boxangle+offset,diag)\n tr = center.directional_offset_by(-boxangle+offset,diag)\n bl = center.directional_offset_by(-(180*u.deg+boxangle)+offset,diag)\n br = center.directional_offset_by( 180*u.deg+boxangle+offset,diag)\n\n # get max displacements for each corner\n points = deque((tl,tr,bl,br))\n for corner in (tl,tr,bl,br):\n corner0 = corner.directional_offset_by(scanangle, ampx)\n corner1 = corner.directional_offset_by(scanangle + 90*u.deg, ampy)\n corner2 = corner.directional_offset_by(scanangle + 180*u.deg, ampx)\n corner3 = corner.directional_offset_by(scanangle + 270*u.deg, ampy)\n corner4 = corner.directional_offset_by(scanangle, ampy)\n corner5 = corner.directional_offset_by(scanangle + 90*u.deg, ampx)\n corner6 = corner.directional_offset_by(scanangle + 180*u.deg, ampy)\n corner7 = corner.directional_offset_by(scanangle + 270*u.deg, ampx)\n points.extend((corner0,corner1,corner2,corner3,\n corner4,corner5,corner6,corner7))\n mpoints = MultiPoint([(p.ra.value,p.dec.value) for p in points])\n hull = mpoints.minimum_rotated_rectangle\n #hull = mpoints.convex_hull\n box = np.array(tuple(zip(*hull.exterior.coords.xy)))\n \n else:\n # initial rotated FOV\n tl = center.directional_offset_by( boxangle+offset,diag)\n tr = center.directional_offset_by(-boxangle+offset,diag)\n bl = center.directional_offset_by(-(180*u.deg+boxangle)+offset,diag)\n br = center.directional_offset_by( 180*u.deg+boxangle+offset,diag)\n\n box = (tl,tr,br,bl,tl)\n box = np.array([[coord.ra.value,coord.dec.value] for coord in box])\n\n \n boxdict = {'box':[box],'linewidth':linewidth,'color':color,\n 'center':center,'label':label,'name':name}\n\n # make region\n if name is None:\n boxdict['reg'] = None\n else:\n # split name---last two entries are ra/dec\n rlabel = ' '.join((' '.join(name.split()[0:-2]),label,kwargs.get('reglabel','')))\n meta = RegionMeta({'label':rlabel})\n vmeta = {'color':color}\n if ('_scan' in rlabel) or ('_d' in rlabel):\n vmeta['dash'] = 1\n vmeta = RegionVisual(vmeta)\n reg = RectangleSkyRegion(center,width,height,angle=offset,\n meta=meta,visual=vmeta)\n boxdict['reg'] = ds9_objects_to_string([reg])\n return boxdict\n\n\ndef make_dithers(center,scale,angle=0*u.deg):\n \"\"\"Generate box for dithers\"\"\"\n diag = np.hypot(scale,scale)\n posangle = angle+45*u.deg\n tl = center.directional_offset_by(posangle,diag)\n tr = center.directional_offset_by(posangle-90*u.deg,diag)\n bl = center.directional_offset_by(posangle-90*u.deg,-diag)\n br = center.directional_offset_by(posangle,-diag)\n return (tl,tr,bl,br)\n\ndef make_NMC(center,chopthrow=300*u.arcsec,chopangle=0*u.arcsec,label=None):\n '''Given a center and chop nod parameters, calculate the nod throws'''\n if label == 'FIF_RED':\n pass\n \n nodA = center.directional_offset_by(chopangle,chopthrow)\n nodB = center.directional_offset_by(180*u.deg+chopangle,chopthrow)\n\n return (nodA,nodB)\n\ndef make_C2NC2(center,chopthrow,chopangle,nodthrow,nodangle):\n '''Generate asymmetric chopping centers'''\n nodAchopB = center.directional_offset_by(chopangle,chopthrow)\n nodBchopA = center.directional_offset_by(nodangle,nodthrow)\n nodBchopB = nodBchopA.directional_offset_by(chopangle,chopthrow)\n return (nodAchopB,nodBchopA,nodBchopB)\n\n\ndef get_cfgoptions(cfg, blk):\n '''Get obsblk options from cfg'''\n options = {}\n for k,v in IMGOPTIONS.items():\n o = cfg.get(blk, k, fallback=v)\n try:\n o = float(o)\n except (ValueError,TypeError):\n pass\n options[k] = o\n\n options['height'] = u.Quantity(options['height'], u.deg)\n options['width'] = u.Quantity(options['width'], u.deg)\n options = {k:v for k,v in options.items() if v is not None}\n if cfg.has_option(blk,'nofigure') and cfg.getboolean(blk,'nofigure'):\n options['nofigure'] = True\n if cfg.has_option(blk,'roll'):\n roll = cfg.get(blk,'roll')\n if roll.lower() in ('true','on','yes'):\n options['roll'] = cfg.getboolean(blk,'roll')\n else:\n options['roll'] = float(roll)\n\n return options\n\ndef get_img_options(cmap):\n '''Get img options from cmap'''\n options = {}\n for k,v in IMGOPTIONS.items():\n o = cmap.get(blk, k, fallback=v)\n try:\n o = float(o)\n except (ValueError,TypeError):\n pass\n options[k] = o\n\n options['height'] = u.Quantity(options['height'], u.deg)\n options['width'] = u.Quantity(options['width'], u.deg)\n options = {k:v for k,v in options.items() if v is not None}\n if cfg.has_option(blk,'nofigure') and cfg.getboolean(blk,'nofigure'):\n options['nofigure'] = True\n if cfg.has_option(blk,'roll'):\n roll = cfg.get(blk,'roll')\n if roll.lower() in ('true','on','yes'):\n options['roll'] = cfg.getboolean(blk,'roll')\n else:\n options['roll'] = float(roll)\n\n return options\n\ndef get_image(overlays,survey='DSS2 Red',width=0.2*u.deg,height=0.2*u.deg,\n reticle=False,reticle_style_kwargs=None,compass=True,\n vmin=None,vmax=None,recenter=None,invert=True,fpi=False,\n irsurvey=None,**kwargs):\n '''Get image from skyview'''\n \n if overlays is None or not overlays:\n return None\n\n # flatten overlays---FIFI has two\n if recenter:\n if isinstance(recenter,SkyCoord):\n center = recenter\n else:\n center = SkyCoord(recenter,unit=(u.hourangle,u.deg))\n\n else:\n center = overlays[0]['center']\n\n try:\n im = SkyView.get_images(center,survey=survey,\n width=width,height=height,\n show_progress=DEBUG)\n except (SSLError,ChunkedEncodingError,ConnectionError):\n warnings.warn('Cannot query SkyView service. Skipping image.')\n print('Cannot query SkyView service. Skipping image.')\n return None\n\n try:\n hdu = im[0][0]\n except (IndexError,TypeError):\n warnings.warn('Cannot process SkyView response. Skipping image.')\n print('Cannot process SkyView response. Skipping image.')\n return None\n \n fig = FITSFigure(hdu)\n fig.show_grayscale(vmin=vmin,vmax=vmax,invert=invert)\n\n #regs = deque()\n\n for idx,overlay in enumerate(overlays):\n fig.show_polygons(overlay['box'],edgecolor=overlay['color'],lw=overlay['linewidth'])\n fig.show_markers(overlay['center'].ra.value,overlay['center'].dec.value,marker='*',edgecolor=overlay['color'])\n\n if 'overlay2' in overlay:\n fig.show_polygons(overlay['overlay2']['box'],edgecolor=overlay['color'],lw=overlay['linewidth'])\n\n '''\n if overlay.get('reg'):\n rs = [r for r in overlay['reg'] if r]\n regs.extend(rs)\n '''\n\n if overlay['label']:\n if 'overlay2' in overlay:\n #change label to aorid for FIFI\n overlay['label'] = overlay['aorID']\n if len(overlay['label']) < 6:\n # note, ignore this used to be (0.87, 0.95)\n fig.add_label(0.75, 0.95, '%s%s'%('\\n\\n'*idx,overlay['label']),\n horizontalalignment='left', weight='bold',\n relative=True, size='large',color=overlay['color'])\n else:\n fig.add_label(0.75, 0.95, '%s%s'%('\\n\\n'*idx,overlay['label']),\n horizontalalignment='left', weight='bold',\n relative=True, size='large',color=overlay['color'])\n\n\n if overlay['name']:\n fig.add_label(0.02, 0.95, '%s%s'%('\\n\\n'*idx,overlay['name']),\n horizontalalignment='left', weight='bold',\n relative=True, size='large',color=overlay['color'])\n\n if fpi:\n fpiradius = (4.5*u.arcmin).to(u.deg).value\n fig.show_circles(overlay['center'].ra.value,\n overlay['center'].dec.value,\n fpiradius,\n #edgecolor=FCOLOR,\n edgecolor=overlay['color'],\n linestyle='dashed',lw=1,\n alpha=0.5)\n if 'nods' in overlay and len(overlay['nods']) > 2:\n # c2nc2 mode\n fig.show_circles(overlay['nods'][1]['center'].ra.value,\n overlay['nods'][1]['center'].dec.value,\n fpiradius,edgecolor=overlay['color'],\n linestyle='dashed',lw=1,\n alpha=0.4)\n\n\n if reticle:\n pixel_width = hdu.data.shape[0]\n inner,outer = 0.03,0.08\n if reticle_style_kwargs is None:\n reticle_style_kwargs = {}\n reticle_style_kwargs.setdefault('linewidth', 2)\n reticle_style_kwargs.setdefault('color', 'm')\n ax = fig.ax\n\n ax.axvline(x=0.5*pixel_width, ymin=0.5+inner, ymax=0.5+outer,\n **reticle_style_kwargs)\n ax.axvline(x=0.5*pixel_width, ymin=0.5-inner, ymax=0.5-outer,\n **reticle_style_kwargs)\n ax.axhline(y=0.5*pixel_width, xmin=0.5+inner, xmax=0.5+outer,\n **reticle_style_kwargs)\n ax.axhline(y=0.5*pixel_width, xmin=0.5-inner, xmax=0.5-outer,\n **reticle_style_kwargs)\n\n if compass:\n ax = fig.ax\n x,y = 0.95, 0.05\n dispTrans = ax.transData.inverted()\n dispOrigin = dispTrans.transform(ax.transAxes.transform((x,y)))\n origin = fig.pixel2world(*dispOrigin)\n w = WCS(hdu.header)\n\n coo_origin = SkyCoord(ra=origin[0],dec=origin[1],unit=(u.deg,u.deg))\n delta = hdu.header['CDELT2']*u.deg*20\n \n coo_e = coo_origin.directional_offset_by(90*u.deg, delta)\n coo_n = coo_origin.directional_offset_by( 0*u.deg, delta)\n\n for c in (coo_e,coo_n):\n line_sky = LineSkyRegion(start=coo_origin,end=c)\n line_pix = line_sky.to_pixel(w)\n line_pix.meta['line'] = 1\n line_pix.visual['line'] = 1\n line_pix.visual['linewidth'] = 2\n line_pix.plot(ax=ax)\n ax.text(0.881,0.056,'E',color='g',transform=ax.transAxes,weight='bold')\n ax.text(0.942,0.122,'N',color='g',transform=ax.transAxes,weight='bold')\n\n roll_s = coo_origin.directional_offset_by(overlay['roll'][0],delta*0.75)\n roll_e = coo_origin.directional_offset_by(overlay['roll'][1],delta*0.75)\n\n for c,color in zip((roll_s,roll_e),('#1f77b4','#d62728')):\n line_sky = LineSkyRegion(start=coo_origin,end=c)\n line_pix = line_sky.to_pixel(w)\n line_pix.meta['line'] = 1\n line_pix.visual['line'] = 1\n line_pix.visual['linewidth'] = 2\n line_pix.visual['color'] = color\n line_pix.plot(ax=ax)\n\n\n #regs = list(filter(lambda x: x is not None,regs))\n # remove duplicates\n #regs = set(regs) # NOTE: this effectively does nothing if aorids are given to make_box, since each line will be unique\n \n # convert back into objects--this is all because regions don't pickle\n #regs = [DS9Parser(reg).shapes.to_regions()[0] for reg in regs]\n\n # add ir image\n if irsurvey is not None:\n if 'msx' in irsurvey.lower():\n band = irsurvey.split()[-1]\n dfile = MSX.query_region(center,band=band,show_progress=True)\n if dfile is not None:\n irhdu = fits.open(dfile)\n irhdu[0].header['SURVEY'] = 'MSX Band %s'%band\n hdu = fits.HDUList([hdu,irhdu[0]])\n else:\n try:\n im = SkyView.get_images(center,survey=irsurvey,\n width=width,height=height,\n show_progress=DEBUG)\n irhdu = im[0][0]\n hdu = fits.HDUList([hdu,irhdu])\n except (SSLError,ChunkedEncodingError,ConnectionError,IndexError,TypeError):\n pass\n \n return fig, hdu\n \n\ndef make_overview(leg, tex=True):\n '''Make table of overview stats for leg'''\n\n # overview cols to extract\n ocols = ('Start','ObsDur','Target','ObsBlkID','Priority','RA','DEC','NAIFID')\n\n # metacols\n hcols = ('Leg','Name','PI')\n mcols = ('Elev','ROF','ROFRT','MoonAngle','MoonIllum','THdg','THdgRT')\n\n l = leg[0]\n overview = {k:l.get(k,'') for k in ocols}\n\n # make metadata\n if tex:\n if 'PI' not in l:\n l['PI'] = ''\n overview['header'] = '\\\\captionline{Leg %i (%s)}{%s}' % (l['Leg'],l['Name'].replace('_','\\_'),l['PI'])\n # shorten header\n for k,v in INST_REPL.items():\n overview['header'] = overview['header'].replace(k,v)\n else:\n overview['header'] ={k:l.get(k,'') for k in hcols}\n\n # footer holds mis file info\n footer = {}\n for k in mcols:\n if '%s_start'%k in l:\n if 'RT' in k:\n footer[k] = '[%+.2f, %+.2f]' % (l['%s_start'%k],l['%s_end'%k])\n else:\n footer[k] = '[%.1f, %.1f]' % (l['%s_start'%k],l['%s_end'%k])\n elif k == 'MoonAngle':\n footer[k] = '%i$^{\\circ}$'%int(l[k])\n else:\n footer[k] = l[k]\n\n if tex:\n footer1 = '\\quad '.join(['%s: %s'%(k,footer[k]) for k in mcols[0:3]])\n footer1 = ' '.join((footer1,'deg/min'))\n # add priority\n prior = overview.pop('Priority')\n if prior:\n footer1 = '\\quad '.join((footer1,'Priority: %s'%prior))\n footer2 = '\\quad '.join(['%s: %s'%(k,footer[k]) for k in mcols[-4:]])\n footer2 = ' '.join((footer2,'deg/min'))\n footer = '%s\\\\\\\\\\n%s'%(footer1,footer2)\n footer = footer.replace('ROFRT','rate') \\\n .replace('THdgRT','rate') \\\n .replace('Moon','Moon ') \\\n .replace('%','\\%')\n footer = ''.join((r'\\\\[0.5em]','\\n',footer))\n\n overview['footer'] = footer\n\n if tex:\n overview = generate_overview_tex(overview)\n \n return overview\n\n\ndef generate_overview_tex(overview, metakeys=('header','footer')):\n '''Generate latex string of overview table'''\n\n # col align param must be special for boldface header line\n col_align = ['c']*len(overview)\n col_align = '|^'.join(col_align)\n col_align = '|$%s|'%col_align\n\n if len(overview['Target']) > 15:\n # target is too long, so make cells smaller\n preamble = r'\\setlength{\\tabcolsep}{0.5em}'\n overview['footer'] += '\\n'+r'\\setlength{\\tabcolsep}{1em}'\n else:\n preamble = ''\n\n # make meta dict\n meta = {mkey:overview.pop(mkey) for mkey in metakeys}\n\n # convert to table\n overview = Table(data=[overview],names=overview.keys(),meta=meta)\n\n # remove ra/dec cols if non-sidereal object\n if overview['RA'][0] is None:\n overview.remove_columns(('RA','DEC'))\n #overview['RA'][0] = ''\n #overview['DEC'][0] = ''\n\n else:\n # reformat\n coord = SkyCoord(ra=overview['RA'][0],dec=overview['DEC'][0],unit=(u.hourangle,u.deg))\n ra,dec = coord.to_string('hmsdms',sep=':',precision=2).split()\n overview['RA'][0] = ra\n overview['DEC'][0] = dec\n if overview['NAIFID'][0] in (None,''):\n overview.remove_column('NAIFID')\n\n # safe convert obsblkid,target\n for col in ('ObsBlkID','Target'):\n try:\n overview[col] = [k.replace('_','\\_') for k in overview[col]]\n except AttributeError:\n overview[col] == ''\n\n \n \n # rename cols to have headercolor\n for col in overview.colnames:\n newcol = '\\\\cellcolor{headercolor}%s' % col\n overview.rename_column(col,newcol)\n\n with StringIO() as f:\n \n overview.write(f,format='latex',\n latexdict={'header_start':r'\\hline\\rowstyle{\\bfseries}',\n 'tablealign':'h!',\n 'caption':overview.meta['header'],\n 'col_align':col_align,\n 'data_end':'\\hline',\n 'preamble':preamble,\n 'tablefoot':overview.meta['footer']})\n texcode = f.getvalue()\n if 'RA' not in str(overview.colnames):\n # push right\n texcode = texcode.replace(r'\\begin{tabular}','\\\\hspace*{2cm}\\n\\\\begin{tabular}')\n return texcode\n\ndef make_details(tab, tex=True, faor=False):\n '''Make observation details'''\n\n tab = deepcopy(tab)\n\n if tab[0]['aorID'] in ('99_9999_99','--'):\n return ''\n\n instrument = tab[0]['InstrumentName']\n if instrument == 'HAWC_PLUS':\n #keys = ('ObsPlanConfig','aorID','Name','InstrumentSpectralElement1','Repeat','NodTime','ChopThrow','ChopAngle','ScanTime','ScanAmplitudeEL','ScanAmplitudeXEL','ScanRate','ChopAngleCoordinate')\n keys = ('ObsPlanConfig','aorID','target','InstrumentSpectralElement1','Repeat','NodTime','ChopThrow','ChopAngle','ScanTime','ScanAmplitudeEL','ScanAmplitudeXEL','ScanRate','ChopAngleCoordinate','duration')\n key_map = {'ObsPlanConfig':'Mode','aorID':'AORID','ChopAngleCoordinate':'Sys','InstrumentSpectralElement1':'Band/Bore','ScanAmplitudeEL':'ScanAmp','target':'Name','duration':'EstDur'}\n\n # replace some values\n for t in tab:\n # change coordsys\n sys = t['ChopAngleCoordinate']\n t['ChopAngleCoordinate'] = 'ERF' if sys == 'Sky' else 'SIRF'\n\n # store filter for boresite\n filt = t['InstrumentSpectralElement1'][-1]\n spec2 = t['InstrumentSpectralElement2']\n\n # scan mode\n if t['ObsPlanMode'] == 'OTFMAP':\n # combine scan amps\n el,xel = t['ScanAmplitudeEL'], t['ScanAmplitudeXEL']\n if el == xel:\n t['ScanAmplitudeEL'] = str(el)\n else:\n t['ScanAmplitudeEL'] = '%s/%s'%(el,xel)\n\n # change scan modes\n if t['ObsPlanConfig'] == 'TOTAL_INTENSITY':\n if t['ScanType'] == 'Box':\n t['ObsPlanConfig'] = 'BOX'\n elif t['ScanType'] == 'Lissajous':\n t['ObsPlanConfig'] = 'LIS'\n else:\n t['ObsPlanConfig'] = '?'\n\n t['InstrumentSpectralElement1'] = '/'.join((filt,'Open'))\n \n elif t['ObsPlanConfig'] == 'POLARIZATION':\n t['ObsPlanConfig'] = 'LISPOL'\n t['InstrumentSpectralElement1'] = '/'.join((filt,spec2[-1]))\n else:\n t['ObsPlanConfig'] = '?'\n\n # set chops to none\n t['ChopAngle'] = -9999\n t['ChopThrow'] = -9999\n t['NodAngle'] = -9999\n t['NodThrow'] = -9999\n t['NodTime'] = None\n\n # C2N\n else:\n if t['ObsPlanConfig'] == 'POLARIZATION':\n t['ObsPlanConfig'] = 'POL'\n t['InstrumentSpectralElement1'] = '/'.join((filt,spec2[-1]))\n else:\n # C2N total_intensity\n t['ObsPlanConfig'] = 'C2N'\n spec2 = 'Open' if spec2 == 'OPEN' else spec2[-1]\n t['InstrumentSpectralElement1'] = '/'.join((filt,spec2))\n \n \n # keep certain keys\n detail = [{key:t[key] for key in keys} for t in tab]\n\n # make table\n detail = Table(detail,names=keys)\n\n # rename columns\n detail.rename_columns(tuple(key_map.keys()),tuple(key_map.values()))\n\n # remove extra scanamp col\n detail.remove_column('ScanAmplitudeXEL')\n\n # make duration in min\n detail['EstDur'] = [np.round(x/60) for x in detail['EstDur']]\n\n # if all modes are scanning, drop chop/nod params\n if all((mode in ('LIS','LISPOL','BOX') for mode in detail['Mode'])):\n detail.remove_columns(('NodTime','ChopThrow','ChopAngle','Sys'))\n # if all modes are pol, drop scan params\n if all(mode in ('POL','C2N') for mode in detail['Mode']):\n detail.remove_columns(('ScanTime','ScanAmp','ScanRate'))\n\n # if any dithering, make dither footer\n unit_map = {'Sky':'arcsec','Array':'pix'}\n if any((t.get('DitherPattern') for t in tab)):\n dithscale = (t.get('DitherScale') for t in tab)\n dithunit = (unit_map.get(t.get('DitherCoord')) for t in tab)\n dithband = (t['InstrumentSpectralElement1'][0] for t in tab)\n dithscale = (str(int(scale)).rjust(2).replace(' ','~') if scale else '' for scale in dithscale)\n footer = ['\\t%s: %s %s' % (band,scale,unit) for band,scale,unit \\\n in zip(dithband,dithscale,dithunit) if scale]\n #footer = set(footer) # REMOVES ORDER\n footer = list(dict.fromkeys(footer).keys()) # basically a set operation that preserves order\n if len(footer) == 1:\n footer = footer.pop()\n else:\n #footer = '\\\\\\\\\\n'.join(sorted(footer))\n footer = '\\\\\\\\\\n'.join(footer)\n footer = 'dither\\quad\\quad %s\\\\\\\\'%footer\n detail.meta['footer'] = footer\n\n \n\n elif instrument == 'FORCAST':\n keys = ['ObsPlanConfig','ObsPlanMode','aorID','Name',\n 'InstrumentSpectralElement1','InstrumentSpectralElement2',\n 'Repeat','NodTime',\n 'ChopThrow','ChopAngle','ChopAngleCoordinate',\n 'NodThrow','NodAngle','TotalTime']\n key_map = {'ObsPlanConfig':'Mode','aorID':'AORID','ObsPlanMode':'Type','ChopAngleCoordinate':'Sys','InstrumentSpectralElement1':'SWC','InstrumentSpectralElement2':'LWC'}\n faor_keys = ['Nod','Loop','Dithers','Scale','FDUR','TREW','TLOS','TLSPN','DitherCoord',\n 'Rewind','IntTime']\n mode_map = {'ACQUISITION':'ACQ','GRISM':'GSM','IMAGING':'IMG'}\n\n for t in tab:\n sys = t['ChopAngleCoordinate']\n t['ChopAngleCoordinate'] = 'ERF' if sys == 'Sky' else 'SIRF'\n\n # shorten filter config\n t['InstrumentSpectralElement1'] = t['InstrumentSpectralElement1'].replace('FOR_','').replace('OPEN','')\n t['InstrumentSpectralElement2'] = t['InstrumentSpectralElement2'].replace('FOR_','')\n\n # combine filters if dual mode\n if 'DUAL' in t.get('ObsPlanConfig',''):\n t['InstrumentSpectralElement1'] = '/'.join((t['InstrumentSpectralElement1'],\n t['InstrumentSpectralElement2']))\n t['InstrumentSpectralElement2'] = ''\n\n # drop second element if OPEN\n elif t['InstrumentSpectralElement2'] == 'OPEN':\n t['InstrumentSpectralElement2'] = ''\n\n # shorten chop mode\n if t['NodType'] == 'Nod_Match_Chop':\n t['ObsPlanMode'] = 'NMC'\n\n # shorten config\n t['ObsPlanConfig'] = mode_map.get(t['ObsPlanConfig'],t['ObsPlanConfig'])\n\n\n # keep certain keys\n if 'FAORfile' in tab[0]:\n keys += faor_keys\n detail = [{key:t.get(key,None) for key in keys} for t in tab]\n\n # make table\n detail = Table(detail,names=keys)\n \n # rename columns\n detail.rename_columns(tuple(key_map.keys()),tuple(key_map.values()))\n\n # if 'Nod' present from FAOR, then replace NodTime\n if 'Nod' in detail.colnames:\n detail['NodTime'] = detail['Nod']\n detail.remove_column('Nod')\n\n # if all modes are NMC, drop nod/c2nc2 params\n if all((mode == 'NMC' for mode in detail['Type'])):\n if 'NodAngle' in detail.colnames and 'NodThrow' in detail.colnames:\n detail.remove_columns(('NodAngle','NodThrow'))\n if 'Loop' in detail.colnames:\n detail.remove_column('Loop')\n\n # if all modes are c2nc2, drop repeats params\n if all((mode == 'C2NC2' for mode in detail['Type'])):\n if 'Repeat' in detail.colnames:\n detail.remove_column('Repeat')\n\n # if there are no dithers, remove dither cols\n try:\n if not any(detail['Dithers']):\n detail.remove_columns(('Dithers','Scale'))\n except KeyError:\n pass\n \n\n # remove repeats, rewinds, or loops if all are None\n for col in ('Repeat','Rewind','Loop'):\n if col in detail.colnames and (not any(detail[col])):\n detail.remove_column(col)\n\n # add TLOS info to footer\n if any([mode == 'C2NC2' for mode in detail['Type']]):\n # leave TLOS and TLSPN in there\n detail.meta['footer'] = ''\n else:\n try:\n tl = detail['TLOS'][0]\n span = detail['TLSPN'][0].split()[0]\n if tl == 'inf' or np.isinf(tl):\n tl = '--'\n losdet = 'TLOS = %s s @ %s deg'%(tl,span)\n detail.meta['footer'] = '%s\\t\\\\hspace{2in}' % losdet\n detail.remove_columns(('TLSPN','TLOS'))\n except KeyError:\n pass\n\n if 'TLSPN' in detail.colnames:\n detail.replace_column('TLSPN',\n Column([float(x.split()[0]) for x in detail['TLSPN']],name='TLSPN'))\n \n\n elif instrument == 'FIFI-LS':\n keys = ('PrimeArray','aorID','Name','TimePerPoint','Repeat','ChopType','ChopThrow','ChopAngle',\n 'ChopAngleCoordinate','MapRotationAngle','TotalTime')\n sysmap = {'J2000':'ERF','HORIZON':'SIRF'}\n key_map = {'MapRotationAngle':'FAngle','aorID':'AORID','ChopAngleCoordinate':'Sys','PrimeArray':'Prime',\n 'TimePerPoint':'NodTime','ChopType':'ChpType','WavelengthBlue':r'Blue$\\lambda$','WavelengthRed':r'Red$\\lambda$'}\n aor_keys = ('Redshift','Dichroic','WavelengthBlue','WavelengthRed','NumPtsRA','NumPtsDec')\n sct_keys = ('DITHMAP_NUMPOINTS','REDSHIFT')\n\n for t in tab:\n # change coordsys\n sys = t['ChopAngleCoordinate']\n t['ChopAngleCoordinate'] = sysmap.get(sys,sys)\n t['TotalTime'] = t['TimePerPoint'] * t['Repeat']\n \n # keep certain keys\n if 'SCTfile' in tab[0]:\n keys += sct_keys\n keys += aor_keys\n detail = [{key:t.get(key,None) for key in keys} for t in tab]\n\n # make table\n detail = Table(detail,names=keys)\n\n # rename columns\n detail.rename_columns(tuple(key_map.keys()),tuple(key_map.values()))\n\n if 'DITHMAP_NUMPOINTS' in detail.colnames:\n detail['MapPos'] = [int(x) if x is not None else None for x in detail['DITHMAP_NUMPOINTS']]\n else:\n detail['MapPos'] = [int(ra)*int(dec) if ra is not None and dec is not None else None \\\n for ra,dec in zip(detail['NumPtsRA'],detail['NumPtsDec'])]\n\n if 'REDSHIFT' in detail.colnames:\n redshift = [np.float(x) * c_km_s for x in detail['REDSHIFT']]\n detail.replace_column('Redshift',Column(redshift,name='Redshift'))\n \n else:\n #raise NotImplementedError('Instrument %s not implemented. %s' % (instrument, tab[0]['ObsBlkID']))\n warnings.warn('WARNING: Instrument %s not implemented. %s' % (instrument, tab[0]['ObsBlkID']))\n print()\n print('WARNING: Instrument %s not implemented. %s' % (instrument, tab[0]['ObsBlkID']))\n return ''\n\n if tex:\n # set formatter to replace '_' with '\\_'\n for col in ('AORID','Name'):\n detail[col].format = COL_FORMATTER\n\n # set int formatter\n blank_zero_cols = ('ScanDur','ChopThrow','ScanTime','NodTime',\n 'ScanAmp','ScanRate','NodThrow') # make a zero blank\n for col in ('NodTime','Repeat','ScanDur','ChopThrow','ChopAngle','ScanTime','EstDur',\n 'ScanAmp','ScanRate','NodThrow','NodAngle','TotalTime','IntTime',\n 'Rewind','Loop','Dithers','FDUR','TREW','TLOS','Scale','FAngle','Redshift'):\n try:\n try:\n floats = detail[col].filled(0)\n except AttributeError:\n floats = MaskedColumn(detail[col]).filled(0)\n for i,f in enumerate(floats):\n try:\n if f in (None,'None','NONE','--') or np.isnan(f):\n floats[i] = 0\n except TypeError:\n continue\n floats = (np.float(x).is_integer() for x in floats)\n if all(floats):\n if col in blank_zero_cols:\n detail[col].format = ZERO_INT_FORMATTER\n else:\n detail[col].format = INT_FORMATTER\n except (KeyError,ValueError):\n continue\n\n # force some cols to round\n for col in ('IntTime','FDUR','TLOS'):\n if col in detail.colnames:\n detail.replace_column(col,Column(np.rint(detail[col]),name=col))\n detail[col].format = INT_FORMATTER\n\n # set units\n detail.meta['units'] = {'NodTime':'s','ChopThrow':r'$^{\\prime\\prime}$','ChopAngle':r'$^\\circ$','ScanDur':'s','ScanTime':'s','ScanAmp':r'$^{\\prime\\prime}$','ScanRate':'$^{\\prime\\prime}$/s','TotalTime':'s','NodDwell':'s','NodAngle':r'$^\\circ$','NodThrow':r'$^{\\prime\\prime}$','IntTime':'s','FDUR':'s','TLOS':'s','TREW':'s','TLSPN':r'$^\\circ$',r'Blue$\\lambda$':r'$\\mu$m',r'Red$\\lambda$':r'$\\mu$m','Redshift':'km/s','FAngle':r'$^\\circ$','EstDur':'min'}\n\n caption = '\\\\captionline{Observation details %s:}{}' % tab[0]['ObsBlkID']\n detail.meta['caption'] = caption.replace('_','\\_')\n\n # add strikethrough\n observed_dict = {t['aorID']:t.get('observed',False) for t in tab}\n detail.meta['observed'] = observed_dict\n\n\n # if FORCAST, split into two tables\n if instrument == 'FORCAST' and 'FAORfile' in tab[0]:\n detail2 = detail.copy()\n\n # fix metadata\n if 'footer' in detail.meta:\n del detail.meta['footer']\n if 'caption' in detail2.meta:\n del detail2.meta['caption']\n\n d_keep = filter(lambda x: x in detail.colnames, ('Mode','Type','AORID','Name','SWC','LWC',\n 'ChopThrow','ChopAngle','NodThrow','NodAngle',\n 'Sys','TotalTime'))\n d2_keep = filter(lambda x: x in detail2.colnames, ('AORID','Repeat','NodTime','Dithers','Scale','Loop',\n 'FDUR','TREW','TLOS','TLSPN','IntTime'))\n\n detail.keep_columns(tuple(d_keep))\n detail.rename_column('TotalTime','ReqTime')\n detail.meta['units']['ReqTime'] = 's'\n\n detail2.keep_columns(tuple(d2_keep))\n if 'Scale' in detail2.colnames:\n detail2.meta['units']['Scale'] = r'$^{\\prime\\prime}$'\n\n detail = [detail,detail2]\n\n\n # if FIFI, split into two tables\n elif instrument == 'FIFI-LS': # and 'SCTfile' in tab[0]\n detail2 = detail.copy()\n\n d_keep = filter(lambda x:x in detail.colnames, ('Prime','AORID','Name','NodTime','Repeat','ChpType',\n 'ChopThrow','ChopAngle','Sys',\n 'FAngle','TotalTime'))\n d2_keep = filter(lambda x:x in detail2.colnames, ('AORID','Redshift','Dichroic',r'Blue$\\lambda$',r'Red$\\lambda$','MapPos'))\n\n detail.keep_columns(tuple(d_keep))\n detail2.keep_columns(tuple(d2_keep))\n\n blue_order = ['M2' if row[r'Blue$\\lambda$'] < 71 else 'M1' for row in detail2]\n detail2.add_column(Column(blue_order,name='BOrder'),index=2)\n detail2.replace_column('Dichroic',Column(['D%s'%row['Dichroic'].split('_')[0] for row in detail2],name='Dichroic'))\n #detail2[r'Blue$\\lambda$'].format = '%.3f'\n #detail2[r'Red$\\lambda$'].format = '%.3f'\n # get closest wavelength for lambda cols\n blue_lam = np.array([np.float(x) if x is not None else np.nan for x in detail2[r'Blue$\\lambda$']])\n blue_str = (np.argmin(np.abs(OBS_REF_BLUE_LAMBDAS-x)) for x in blue_lam)\n blue_str = (OBS_REF_BLUE_LINES[x].split() for x in blue_str)\n blue_str = ['%.2f %s'%(line,' '.join(x[0:-1])) for line,x in zip(blue_lam,blue_str)]\n detail2.replace_column(r'Blue$\\lambda$',Column(blue_str,name=r'Blue$\\lambda$'))\n red_lam = np.array([np.float(x) if x is not None else np.nan for x in detail2[r'Red$\\lambda$']])\n red_str = (np.argmin(np.abs(OBS_REF_RED_LAMBDAS-x)) for x in red_lam)\n red_str = (OBS_REF_RED_LINES[x].split() for x in red_str)\n red_str = ['%.2f %s'%(line,' '.join(x[0:-1])) for line,x in zip(red_lam,red_str)]\n detail2.replace_column(r'Red$\\lambda$',Column(red_str,name=r'Red$\\lambda$'))\n\n if 'MapPos' in detail2:\n detail2.rename_column('MapPos',r'\\#Dith')\n \n if 'caption' in detail2.meta:\n del detail2.meta['caption']\n\n detail = [detail,detail2]\n\n \n # return tex string\n detail = generate_details_tex(detail)\n\n else:\n # convert to list of dicts\n detail = detail.to_pandas().to_dict('records')\n \n # set int formatter\n intcols = ('NodTime','Repeat','ScanDur','ChopThrow','ChopAngle','ScanTime',\n 'Rewind','Loop','Dithers',\n 'ScanAmp','ScanRate','NodThrow','NodAngle','TotalTime','IntTime')\n intformat_func = partial(INT_CONVERTER,cols=intcols)\n detail = list(map(intformat_func,detail))\n return detail\n\n\ndef generate_details_tex(detail):\n '''Generate latex string of details table'''\n if isinstance(detail,(Table,dict)):\n # single detail table\n detail = [detail]\n\n texcodes = deque()\n \n for d in detail:\n # col align param must be special for boldface header line\n col_align = ['c']*len(d.colnames)\n #for lcol in (r'Blue$\\lambda$',r'Red$\\lambda$'):\n # if lcol in d.colnames:\n # col_align[d.colnames.index(lcol)] = 'l'\n col_align = '|^'.join(col_align)\n col_align = '|$%s|'%col_align\n\n preamble = r'\\setlength{\\tabcolsep}{0.25em}'\n #preamble += '\\n\\\\centering\\n\\\\captionsetup{justification=centering}'\n tablefoot = r'\\\\[0.5em]' + '\\n' + r'\\raggedleft{' + \\\n d.meta.get('footer','')+'}\\n'+r'\\setlength{\\tabcolsep}{1em}'\n colnames = d.colnames.copy()\n\n # rename colnames to have headercolor\n newcols = {col:'\\\\cellcolor{headercolor}%s'%col for col in d.colnames}\n for col,newcol in newcols.items():\n #newcol = '\\\\cellcolor{headercolor}%s'%col\n d.rename_column(col,newcol)\n d.meta['units'][newcol] = d.meta['units'].get(col,'')\n\n # add gray color to unit cells\n units = {col:'\\cellcolor{headercolor}%s'%d.meta['units'].get(col,'') for col in d.colnames}\n \n with StringIO() as f:\n d.write(f,format='latex',\n latexdict={'header_start':r'\\hline\\rowstyle{\\bfseries}',\n 'tablealign':'h!',\n 'caption':d.meta.get('caption',''),\n 'col_align':col_align,\n 'units':units,\n 'data_end':'\\hline',\n 'preamble':preamble,\n 'tablefoot':tablefoot})\n texcode = f.getvalue()\n\n # pull left if too long\n try:\n if (((max([len(name) for name in d[newcols['Name']]]) > 13) and ('ChopThrow' in colnames)) or (('ChopThrow' in colnames) and ('ScanAmp' in colnames))):\n texcode = texcode.replace(r'\\begin{tabular}','\\\\hspace*{-1cm}\\n\\\\begin{tabular}')\n elif 'NodThrow' in colnames or 'NodThw' in colnames:\n texcode = texcode.replace(r'\\begin{tabular}','\\\\hspace*{-1cm}\\n\\\\begin{tabular}')\n else:\n pass\n except KeyError:\n if 'Scale' in colnames:\n texcode = texcode.replace(r'\\begin{tabular}','\\\\hspace*{-1cm}\\n\\\\begin{tabular}')\n #texcode = texcode.replace(r'\\begin{tabular}','\\\\hspace*{-1cm}\\n\\\\begin{tabular}')\n\n #make small\n texcode = texcode.replace(r'\\begin{tabular}','\\\\footnotesize\\n\\\\begin{tabular}')\n \n # shrink whatever we can\n texcode = texcode.replace('Polarimetry','POL')\n texcode = texcode.replace('Lissajous','LIS')\n texcode = texcode.replace('Nod Time','Nod')\n texcode = texcode.replace('nan','')\n texcode = texcode.replace('None','')\n texcode = texcode.replace('Chop Throw','ChpT')\n texcode = texcode.replace('ChopThrow','ChpT')\n texcode = texcode.replace('Chop Ang','ChpA')\n texcode = texcode.replace('ChopAngle','ChpA')\n texcode = texcode.replace('Nod Throw','NodT')\n texcode = texcode.replace('NodThrow','NodT')\n texcode = texcode.replace('Nod Ang','NodA')\n texcode = texcode.replace('NodAngle','NodA')\n texcode = texcode.replace('Nod Time','NodT')\n if 'Nod Dwell' in colnames:\n texcode = texcode.replace('Nod Dwell','Nod')\n\n\n # strikethrough 'observed' aorIDs\n texcode = strikethrough(d.meta['observed'],texcode)\n \n texcodes.append(texcode)\n\n texcodes = '\\n\\\\vspace*{-3em}\\n'.join(list(texcodes))\n texcodes = '\\\\vspace*{-3em}\\n%s' % texcodes\n return texcodes\n\n\ndef make_positions(tab, tex=True):\n '''Make position tables'''\n\n if tab[0]['aorID'] in ('99_9999_99','--'):\n return ''\n \n rows = deque()\n for t in tab:\n if t.get('RA') is None:\n continue\n aorid = t['aorID']\n #name = t['POSName'] if t.get('POSName') else t['Name']\n name = t['POSName'] if t.get('POSName') else t['target']\n coord = SkyCoord(ra=t['RA'],dec=t['DEC'],unit=(u.hourangle,u.deg))\n ra,dec = coord.to_string('hmsdms',sep=':',precision=2).split()\n order = t.get('order',0)\n #order = t['order']\n num = t.get('aornum',0)\n rows.append((aorid,name,ra,dec,order,num))\n\n if not rows:\n return ''\n position = Table(rows=list(rows),names=('AORID','Name','RA','DEC','Order','aornum'))\n position.meta['caption'] = '\\\\captionline{Positions}{}'\n\n # if all positions are the same, remove duplicates\n origlen = len(position)\n position = unique(position,keys=('RA','DEC'))\n position.sort(['Order','aornum','AORID'])\n if len(position) == 1 and origlen != 1:\n position['AORID'][0] = '_'.join(position['AORID'][0].split('_')[0:2]) + '_*'\n\n position.remove_columns(('Order','aornum'))\n if tex:\n for col in ['AORID','Name']:\n position[col].format = COL_FORMATTER\n\n # add strikethrough\n observed_dict = {t['aorID']:t.get('observed',False) for t in tab}\n position.meta['observed'] = observed_dict\n position = generate_pos_tex(position)\n else:\n position = position.to_pandas().to_dict('records')\n\n return position\n\ndef generate_pos_tex(position):\n '''Generate latex string of positions table'''\n\n if position is None:\n return ''\n\n # col align param must be special for boldface header line\n col_align = ['c']*len(position.colnames)\n col_align = '|^'.join(col_align)\n col_align = '|$%s|'%col_align\n\n preamble = '\\\\centering\\n\\\\captionsetup{justification=centering}'\n\n # rename colnames to have headercolor\n for col in position.colnames:\n newcol = '\\\\cellcolor{headercolor}%s'%col\n position.rename_column(col,newcol)\n\n with StringIO() as f:\n position.write(f,format='latex',\n latexdict={'header_start':r'\\hline\\rowstyle{\\bfseries}',\n 'tablealign':'h!',\n 'caption':position.meta['caption'],\n 'preamble':preamble,\n 'col_align':col_align,\n 'data_end':'\\hline'})\n texcode = f.getvalue()\n\n # strikethrough 'observed' aorIDs\n texcode = strikethrough(position.meta['observed'],texcode)\n\n return texcode\n\n\ndef strikethrough(aordict,texcode):\n for aorid,obs in aordict.items():\n if obs:\n a = aorid.replace('_','\\\\\\_')\n pattern = r'(%s\\s\\&.*\\s\\\\\\\\)'%a\n texcode = re.sub(pattern,STRIKETHROUGH_REPL,texcode)\n return texcode\n\n\ndef get_pos_bundle(tab, dcs, odir):\n '''Download pos bundle and filter by included AORs'''\n posnames = [t['POSName'] if t.get('POSName') else t['Name'] for t in tab]\n fpid = tab[0]['FlightPlan']\n posfiles = ['./%s/%s_%s.pos'%(fpid,fpid,x) for x in posnames]\n\n postarfile = dcs.getPOSBundle(fpid)\n if postarfile is None:\n return None\n with tarfile.open(postarfile) as t:\n members = t.getmembers()\n members = list(filter(lambda x: x.name in posfiles, members))\n t.extractall(odir,members=members)\n # move to directory above\n for fname in Path(odir/fpid).glob('*.pos'):\n dest = fname.parent.parent/(fname.name.split(fpid)[1][1:])\n shutil.move(fname,dest)\n try:\n Path(odir/fpid).rmdir()\n except FileNotFoundError:\n pass\n return postarfile\n\ndef generate_overlays(table):\n # add row index for color cycle\n for idx,row in enumerate(table):\n row['cidx'] = idx\n overlays = list(map(generate_overlay,table))\n\n for overlay,row in zip(overlays,table):\n row['overlay'] = overlay\n '''\n if isinstance(overlay,dict):\n row['overlay'] = overlay\n else:\n # FIFI has two overlays per aorid\n row['overlayB'] = overlay[0]\n row['overlayR'] = overlay[1]\n '''\n return table\n\ndef generate_overlay(row,nod=True,dithers=True,FIFI_label=None):\n #tab = unique(tab,keys=['RA_aor','DEC_aor'])\n\n if row['aorID'] in ('99_9999_99','--'):\n return None\n \n try:\n coord = SkyCoord(ra=row['RA'],dec=row['DEC'],unit=(u.hourangle,u.deg))\n except ValueError:\n # likely a solar system object\n return None\n \n # get roll angle\n rolls = (row['ROF_start'],row['ROF_end'])\n\n # override displayed roll if specified in config\n roll = row.get('roll')\n if isinstance(roll,bool) and roll:\n if row['InstrumentName'] == 'FIFI-LS':\n roll = float(row['MapRotationAngle'])*u.deg\n else:\n roll = float(rolls[0])*u.deg\n if isinstance(roll,(float,int,np.float,np.int)):\n roll = roll*u.deg\n elif isinstance(roll,u.Quantity):\n roll = roll\n elif isinstance(roll,str):\n roll = u.Quantity(np.float(roll),u.deg)\n else:\n if row['InstrumentName'] == 'FIFI-LS':\n roll = float(row['MapRotationAngle'])*u.deg\n else:\n roll = float(rolls[0])*u.deg\n\n try:\n TARFoffset = TARFOFFSET[row['InstrumentName']]\n except KeyError:\n # likely wrong instrument?\n TARFoffset = 0*u.deg\n\n # get band and mode for FOV\n band = row['InstrumentSpectralElement1'].split('_')[-1] if 'FIF' not in row['InstrumentSpectralElement1'] else row['InstrumentSpectralElement1']\n mode = row['ObsPlanConfig']\n\n if row['ObsPlanMode'] == 'C2N' and mode == 'TOTAL_INTENSITY':\n mode = 'C2N'\n else:\n mode = 'TOT' if mode == 'TOTAL_INTENSITY' else 'POL'\n #mode = 'TOT' if mode == 'TOTAL_INTENSITY' or row['ObsPlanMode'] == 'OTFMAP' else 'POL'\n\n label = '%s_%s'%(band,mode)\n try:\n fov = FOV[label]\n except KeyError:\n # assume FORCAST\n if 'G' in label:\n fov = FOV['FORCAST_GSM']\n else:\n fov = FOV['FORCAST_IMG']\n\n if row['InstrumentName'] == 'FIFI-LS':\n if FIFI_label:\n labels = [FIFI_label]\n else:\n # make both FIFI cameras\n labels = ['FIF_BLUE','FIF_RED']\n overlays = deque()\n for label in labels:\n width,height = [f*u.arcmin for f in FOV[label]]\n name = '%s %s' % (row['Name'],coord.to_string('hmsdms',precision=2,sep=':'))\n if label == 'FIF_RED' and not FIFI_label:\n # need to tweak FIFI red boresite\n coord = coord.directional_offset_by(roll+TARFoffset+90*u.deg,-0.162*u.arcmin)\n\n \n o = make_box(coord,width,height,roll,TARFoffset,label=label,name=name,\n color=COLORS[row['cidx']%len(COLORS)],split=False,aorid=row['aorID'])\n overlays.append(o)\n overlays = list(overlays)\n else:\n # HAWC and FORCAST\n width,height = [f*u.arcmin for f in fov]\n\n name = '%s %s' % (row['Name'],coord.to_string('hmsdms',precision=2,sep=':'))\n\n split = True if mode == 'TOT' and label in FOV else False\n\n if label not in FOV:\n # FORCAST\n label = label.replace('_TOT','')\n label = label.replace('_POL','')\n\n overlays = [make_box(coord,width,height,roll,TARFoffset,label=label,name=name,\n color=COLORS[row['cidx']%len(COLORS)],split=split,aorid=row['aorID'])]\n\n for overlay in overlays:\n overlay['roll'] = (float(rolls[0])*u.deg,float(rolls[1])*u.deg)\n\n\n if row['NodType'] != 'OTFMAP':\n # we are chop/nod dithering\n if dithers and row['DitherPattern'] not in (None,'None'):\n if row['ChopAngleCoordinate'] == 'Sky':\n scale = row['DitherScale']*u.arcsec\n else:\n try:\n scale = row['DitherScale']*PIXSIZE[label]\n except KeyError:\n if 'G' in label:\n scale = row['DitherScale']*PIXSIZE['FORCAST_GSM']\n else:\n scale = row['DitherScale']*PIXSIZE['FORCAST_IMG']\n\n diths = make_dithers(overlay['center'],scale=scale,angle=roll)\n\n overlay['dithers'] = [make_box(dith, width, height, angle=roll, TARFoffset=TARFoffset, label=label, split=split, color=overlay['color'], reglabel='_d',name=name) for dith in diths]\n\n if nod and row['ObsPlanMode'] == 'C2NC2':\n chopthrow = row['ChopThrow']*u.arcsec\n chopangle = row['ChopAngle']*u.deg\n nodthrow = row['NodThrow']*u.arcsec\n nodangle = row['NodAngle']*u.deg\n\n if row['ChopAngleCoordinate'] == 'Array':\n chopangle += roll\n\n nodAchopB,nodBchopA,nodBchopB = make_C2NC2(overlay['center'],\n chopthrow=chopthrow,chopangle=chopangle,\n nodthrow=nodthrow,nodangle=nodangle)\n nodAchopBdict = row.copy()\n nodBchopAdict = row.copy()\n nodBchopBdict = row.copy()\n\n for ntab,n in zip((nodAchopBdict,nodBchopAdict,nodBchopBdict),(nodAchopB,nodBchopA,nodBchopB)):\n ra,dec = n.to_string('hmsdms').split()\n ntab['RA'] = ra\n ntab['DEC'] = dec\n\n overlay['nods'] = [generate_overlay(n, nod=False,dithers=False) \\\n for n in (nodAchopBdict,nodBchopAdict,nodBchopBdict)]\n\n elif nod:\n chopthrow = row['ChopThrow']*u.arcsec\n chopangle = row['ChopAngle']*u.deg\n\n if row['ChopAngleCoordinate'] == 'Array':\n chopangle += roll\n\n nodA,nodB = make_NMC(overlay['center'],\n chopthrow=chopthrow,\n chopangle=chopangle,label=overlay['label'])\n\n nodAdict = row.copy()\n nodBdict = row.copy()\n \n ra,dec = nodA.to_string('hmsdms').split()\n nodAdict['RA'] = ra\n nodAdict['DEC'] = dec\n ra,dec = nodB.to_string('hmsdms').split()\n nodBdict['RA'] = ra\n nodBdict['DEC'] = dec\n\n if row['InstrumentName'] == 'FIFI-LS':\n # only make current label\n fiflabel = overlay['label']\n else:\n fiflabel = None\n overlay['nods'] = [generate_overlay(n, nod=False,dithers=False, FIFI_label=fiflabel) \\\n for n in (nodAdict,nodBdict)]\n\n\n else:\n # in hawc scanning mode, we have to redraw the final box size\n # based on the scan angles relative to N\n # total scan throw is twice the amplitude\n ampx,ampy = 2 * u.Quantity(row['ScanAmplitudeEL'],u.arcsec), \\\n 2 * u.Quantity(row['ScanAmplitudeXEL'],u.arcsec)\n ampx += width\n ampy += height\n\n # comment out the above two lines to use scanangles, and change width and height\n\n overlay['dithers'] = [make_box(coord,ampx,ampy,roll,TARFoffset,label=label,name=name,\n color=COLORS[row['cidx']%len(COLORS)],\n scan=True,reglabel='scan',scanangle=None,\n aorid=row['aorID'],scanamp=(ampx,ampy))]\n \n #overlay['dithers'] = [make_box(coord,width,height,roll,TARFoffset,label=label,name=name,\n # color=COLORS[row['cidx']%len(COLORS)],\n # scan=True,reglabel='scan',scanangle=None,\n # aorid=row['aorID'],scanamp=(ampx,ampy))]\n\n overlay['aorID'] = row['aorID']\n overlay['InstrumentName'] = row['InstrumentName']\n return overlays\n\ndef get_overlay_params(tab):\n #####UNUSED\n overlays = deque()\n for idx,row in enumerate(tab):\n if 'IMGOVERRIDES' in tab.meta:\n key = 'Leg%i__%s'%(row['Leg'],row['aorID'])\n if key in tab['IMGOVERRIDES']:\n roll = u.Quantity(tab['IMGOVERRIDES'][key],u.deg)\n elif tab['IMGOVERRIDES'].get('roll',False):\n roll = u.Quantity(row['Angle'],u.deg)\n else:\n roll = None\n else:\n roll = None\n overlay = generate_overlay(tab,idx=idx,roll=roll,TARFoffset=TARFoffset)\n overlays.append(overlay)\n \n overlays = [overlay for overlay in overlays if overlay is not None]\n return overlays\n\ndef get_recenter_image(overlaylist):\n '''Get longest wavelength \"recenter\"'''\n recenter = None\n for overlay in overlaylist:\n if 'recenter' not in overlay:\n continue\n recenter = overlay['recenter']\n\n return recenter\n\ndef make_figures(table,fdir,reg=False,guidestars=None,irsurvey=None,savefits=False,fpi=False,**kwargs):\n '''Generate figure from each overlay'''\n\n # make defaults from table\n vrows = list(filter(lambda row:row.get('overlay'),table))\n imgoptions = IMGOPTIONS.copy()\n if vrows and vrows[0]['InstrumentName'] == 'FIFI-LS':\n imgoptions['width'] = 0.2*u.deg\n imgoptions['height'] = 0.2*u.deg\n overlays = [{k:row.get(k,v) for k,v in imgoptions.items()} for row in vrows]\n\n for o,row in zip(overlays,vrows):\n if row.get('overlay'):\n if isinstance(row['overlay'],dict):\n o.update(row['overlay'])\n else:\n # if there is a second overlay (FIFI), add it\n if len(row['overlay']) == 2:\n vals = o.copy()\n o['overlay2'] = vals\n o.update(row['overlay'][0])\n o['overlay2'].update(row['overlay'][1])\n else:\n o.update(row['overlay'][0])\n\n\n #overlays = [row['overlay'] for row in table if row.get('overlay')]\n \n # remove any with 'nofigure' flag\n overlays = list(filter(lambda o:not o.get('nofigure',False),overlays))\n if not overlays:\n return None\n\n # image options are grabbed from first row in blk\n options = overlays[0].copy()\n if irsurvey is not None:\n options['irsurvey'] = irsurvey\n\n if fpi:\n options['fpi'] = fpi\n\n if 'width' in options and isinstance(options['width'],str):\n options['width'] = u.Quantity(float(options['width']),u.deg)\n if 'height' in options and isinstance(options['height'],str):\n options['height'] = u.Quantity(float(options['height']),u.deg)\n\n try:\n fig,hdu = get_image(overlays,**options)\n except TypeError:\n warnings.warn('Issue querying SkyView. Skipping figure.',RuntimeWarning)\n return None\n\n if fig is None:\n return None\n\n if guidestars is not None:\n # get hdu footprint\n try:\n footprint = WCS(hdu).calc_footprint()\n except AttributeError:\n # likely hdu is an hdu list from irhdu\n footprint = WCS(hdu[0]).calc_footprint()\n box = Polygon(footprint)\n \n guides = guidestars[table[0]['ObsBlkID']]\n try:\n guidecoord = SkyCoord([g['COORD'] for g in guides])\n except IndexError:\n guidecoord = None\n\n if guidecoord:\n # do this check in case the above try/except fails\n fig.show_markers(guidecoord.ra,guidecoord.dec,\n marker='o',s=80,\n linewidths=2,edgecolor=GCOLOR)\n\n points = MultiPoint([(g.ra.value,g.dec.value) for g in guidecoord])\n inside = (box.contains(p) for p in points)\n\n gregs = deque()\n\n for g,i in zip(guides,inside):\n if not i:\n continue\n fig.add_label(g['COORD'].ra.value, g['COORD'].dec.value-0.01, g['Name'],layer=g['Name'],size=8)\n txt = fig._layers[g['Name']]\n txt.set_path_effects([path_effects.Stroke(linewidth=1, foreground='white'),\n path_effects.Normal()])\n greg = PointSkyRegion(g['COORD'],\n meta=RegionMeta({'label':' '.join((g['Name'],g['Imager'],g['Catalog']))}),\n visual=RegionVisual({'color':GCOLOR}))\n gregs.append(greg)\n \n \n '''\n guides = guidestars[table[0]['ObsBlkID']]\n guides = list(filter(lambda g: g['Radius'] < np.hypot(table[0]['width'],table[0]['height']),guides))\n guidecoord = SkyCoord([g['COORD'] for g in guides])\n fig.show_markers(guidecoord.ra,guidecoord.dec,\n marker='o',s=80,\n linewidths=2)#edgecolor='#FFD700')\n for g in guides:\n fig.add_label(g['COORD'].ra.value, g['COORD'].dec.value, g['Name'])\n '''\n\n # show chop/nod/dithers\n for o in overlays:\n if 'dithers' in o:\n for dith in o['dithers']:\n fig.show_polygons(dith['box'],edgecolor=dith['color'],lw=1,\n linestyle='dashed',alpha=0.7)\n if 'nods' in o:\n for nod in o['nods']:\n try:\n fig.show_polygons(nod['box'],edgecolor=o['color'],lw=1.5,\n linestyle='dotted',alpha=0.7)\n except TypeError:\n for n in nod:\n fig.show_polygons(n['box'],edgecolor=o['color'],lw=1.5,\n linestyle='dotted',alpha=0.7)\n\n if 'overlay2' in o:\n o = o.get('overlay2','')\n if 'dithers' in o:\n for dith in o['dithers']:\n fig.show_polygons(dith['box'],edgecolor=dith['color'],lw=1,\n linestyle='dashed',alpha=0.7)\n if 'nods' in o:\n for nod in o['nods']:\n try:\n fig.show_polygons(nod['box'],edgecolor=o['color'],lw=1.5,\n linestyle='dotted',alpha=0.7)\n except TypeError:\n for n in nod:\n fig.show_polygons(n['box'],edgecolor=o['color'],lw=1.5,\n linestyle='dotted',alpha=0.7)\n \n\n\n #outfile = fdir/('Leg%02d.png'%tab['Leg'][0])\n fdir.mkdir(exist_ok=True)\n outfile = fdir/('Leg%02d.pdf'%table[0]['Leg'])\n \n fig.savefig(outfile,dpi=300)\n fig.close()\n\n if savefits:\n fitsdir = fdir.parent/'images'\n fitsdir.mkdir(exist_ok=True)\n aorid = table[0]['planID']\n if not isinstance(hdu,fits.HDUList):\n hdu = [hdu]\n for h in hdu:\n surv = h.header['SURVEY'].strip().replace(' ','_')\n fname = '%s_%s_%s.fits'%(outfile.stem,aorid,surv)\n h.writeto(fitsdir/fname,overwrite=True,output_verify='silentfix+ignore')\n\n if reg:\n regs = deque()\n for row in table:\n try:\n reg = row['overlay'].get('reg')\n except AttributeError:\n reg = deque()\n for o in row['overlay']:\n r = o.get('reg')\n if isinstance(r,str):\n reg.append(r)\n else:\n reg.extend(r)\n #reg = [o.get('reg') for o in row['overlay']]\n\n if isinstance(reg,str):\n reg = [reg]\n if reg:\n regs.extend(reg)\n try:\n if row['overlay'].get('dithers'):\n dithreg = [d.get('reg') for d in row['overlay']['dithers'] if d.get('reg')]\n if dithreg:\n regs.extend(dithreg)\n except AttributeError:\n for o in row['overlay']:\n dithreg = [d.get('reg') for d in o['dithers'] if d.get('reg')]\n if dithreg:\n regs.extend(dithreg)\n\n regs = [DS9Parser(r).shapes.to_regions()[0] for r in set(list(regs))]\n if guidestars is not None:\n regs += gregs\n \n rdir = fdir.parent/'reg'\n rdir.mkdir(exist_ok=True)\n aorid = table[0]['planID']\n regfile = '%s_%s.reg'%(outfile.stem,aorid)\n regfile = rdir/regfile\n #with open(regfile,'w') as f:\n # f.write('\\n'.join(regs))\n write_ds9(regs,regfile)\n \n \n return outfile.relative_to(fdir.parent)\n\ndef make_comments(table):\n '''Generate tex for obsblk comments'''\n\n if table[0]['aorID'] in ('99_9999_99','--'):\n comment = table[0]['ObsBlkComment']\n if comment in (None,'None'):\n comment = ''\n comment = utf8tolatex(comment)\n comment = comment.replace('\\n',r'\\\\')\n comment = comment.replace(r'{\\textbackslash}{\\textbackslash}',r'\\\\')\n return comment\n\n comments = table[0].get('ObsBlkComment','')\n if not comments:\n return ''\n\n #safe convert to latex\n comments = utf8tolatex(comments)\n comments = comments.replace(r'{\\textbackslash}{\\textbackslash}',r'\\\\')\n return comments\n\ndef hawc_sio_comments(table):\n '''Assign comment block based on mode'''\n\n if table[0]['aorID'] in ('99_9999_99','--'):\n comment = table[0]['ObsBlkComment']\n comment = utf8tolatex(comment)\n comment = comment.replace('\\n',r'\\\\')\n comment = comment.replace(r'{\\textbackslash}{\\textbackslash}',r'\\\\')\n return comment\n \n head = r'Procedure for Instrument Operator: \\\\'\n \n if any([((row['ObsPlanConfig'] == 'POLARIZATION') and (row['ObsPlanMode'] == 'OTFMAP')) for row in table]):\n mode = 'LISPOL'\n elif any([((row['ObsPlanConfig'] == 'POLARIZATION') and (row['ObsPlanMode'] == 'C2N')) for row in table]):\n mode = 'Polarimetry'\n else:\n mode = 'Lissajous'\n\n comment = '%s%s' % (head, HAWC_SIO[mode])\n \n return comment\n\ndef copy_comments(filename):\n \"\"\"Return comments from .tex file, if they exist\"\"\"\n # first find existing .tex file\n filename = Path(filename)\n if filename.exists():\n with open(filename,'r') as f:\n text = f.read()\n else:\n # try going back to one older version\n parents = [str(p) for p in filename.parents]\n top = parents[-2]\n if top.split('_')[-1][0] == 'v':\n # this is versioned\n version = top[-1]\n name = parents[-3].split('/')[-1]\n try:\n version = int(version)\n prev_version = version - 1\n except ValueError:\n prev_version = chr(ord(version) - 1)\n newparent = '%s%s'%(top[:-1],prev_version)\n newpath = Path(newparent).joinpath(name)/filename.name\n\n if Path(newpath).exists():\n filename = Path(newpath)\n with open(filename,'r') as f:\n text = f.read()\n else:\n return None\n \n else:\n return None\n \n if r'% 1:\r\n wpis.czy_gotow_do_strzalu = True #wpis.czy_gotow_do_strzalu: sprawdza czy przed chwilą nie został wystrzelony pocisk, aby uniknąć spamu\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n gra = False\r\n glowna_petla = False\r\n wpis.wydarzenia(event)\r\n #sterowanie gracza\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n gracz.gracz_zmiana_X = -5\r\n if event.key == pygame.K_RIGHT:\r\n gracz.gracz_zmiana_X = 5\r\n #strzelanie\r\n if event.key == pygame.K_SPACE:\r\n if wpis.czy_gotow_do_strzalu == True:\r\n pociski.append(classes.pocisk(gracz.gracz_X, gracz.gracz_Y - 45, wpis.text)) #wrzucanie wartości do pocisku i puszczenie go w obieg\r\n wpis.text = \"\"\r\n wpis.czy_gotow_do_strzalu = False\r\n odstep_miedzy_strzalami = time.time()\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\r\n gracz.gracz_zmiana_X = 0\r\n\r\n #operacje na meteorytach\r\n if int(time.time() - start) > N + 5: #meteoryt ma się pojawiać co N + 5 sekund\r\n start = time.time()\r\n rownanie = functions.losuj_rownanie_do_gry(N) #N to ilość znaków która ma być w równaniu, w tej funkcji odbywa się cały proces tworzenia i obliczania losowego równanie przez program\r\n wynik = functions.top(rownanie)\r\n rownanie.pop()\r\n rownanie_do_klasy = \"\"\r\n for i in rownanie:\r\n rownanie_do_klasy += i #zliczanie tabeli do stringa\r\n meteoryty.append(classes.meteoryt(functions.losuj_pozycje(), 2, 120, 200, rownanie_do_klasy, wynik)) #tu generowanie są równania do każdego obiektu!\r\n start = time.time()\r\n\r\n for i in meteoryty:\r\n for p in pociski:\r\n if functions.kolizja(i.x, i.y, 120, 206, p.x, p.y, 100, 50, p.wynik, i.wynik, i, screen) == True:\r\n i.czy_trafiony = True\r\n p.czy_trafiony = True #dopiero potem usuwam elementy, żeby nie zakłucić najbliższych operacji\r\n punkty += 1\r\n classes.meteoryt.przyspiesz_spadanie() #zwiększanie poziomu trudności\r\n if punkty % 5 == 0 and punkty != 0:\r\n N += 1\r\n classes.meteoryt.szybkosc_spadania = 0.5 #powrót do zadanej wartości spadania, co LVL się aktualizuje\r\n if i.y > 550:\r\n index = meteoryty.index(i)\r\n meteoryty.pop(index)\r\n gracz.gracz_zycie -= 1\r\n elif i.czy_trafiony == False:\r\n i.spadanie()\r\n i.rysuj_meteor(screen)\r\n i.rysuj_rownanie(screen)\r\n elif i.czy_trafiony == True:\r\n index = meteoryty.index(i)\r\n meteoryty.pop(index) #usuwanie meteorytów które zostały trafione\r\n del i\r\n\r\n #poruszanie się pocisku\r\n for i in pociski:\r\n i.rysuj_pocisk(screen)\r\n if i.czy_trafiony == True or i.y < 0:\r\n index = pociski.index(i)\r\n pociski.pop(index) #usuwanie pocisków które zostały trafione\r\n del i\r\n\r\n #poruszanie się gracza\r\n gracz.gracz_X += gracz.gracz_zmiana_X\r\n if gracz.gracz_X > 1100:\r\n gracz.gracz_X = 1100\r\n if gracz.gracz_X < 0:\r\n gracz.gracz_X = 0\r\n\r\n wpis.rysuj(gracz.gracz_X, gracz.gracz_Y, screen)\r\n gracz.rysuj_gracza(screen)\r\n functions.rysuj_zycie(gracz.gracz_zycie, serce_img, screen)\r\n functions.wypisz_punkty(punkty, screen)\r\n functions.wypisz_poziom(N, screen)\r\n\r\n pygame.display.update()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"360714929","text":"# -*- encoding: utf-8 -*-\nfrom abjad.tools import rhythmmakertools\nfrom experimental import *\n\n\ndef test_RestRhythmMakerEditor_run_01():\n\n editor = scoremanagertools.editors.RestRhythmMakerEditor()\n editor._run(pending_user_input='q', is_autoadvancing=True)\n\n maker = rhythmmakertools.RestRhythmMaker()\n\n assert editor.target == maker\n","sub_path":"abjad/experimental/tools/scoremanagertools/editors/RestRhythmMakerEditor/test/test_RestRhythmMakerEditor_run.py","file_name":"test_RestRhythmMakerEditor_run.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"390397607","text":"#__author: \"Jing Xu\"\n#date: 2018/2/28\n\ntry:\n\tf = open(\"hp\",\"r\")\nexcept IOError:\n\tprint(\"could not open file\")\n\ndef safe_float(obj):\n\ttry:\n\t\tretval = float(obj)\n\texcept ValueError:\n\t\tretval = \"could not convert non-number to float\"\n\texcept TypeError:\n\t\tretval = \"object type can not be converted to float\"\n\treturn retval\n\nprint(safe_float(\"hp\"))\nprint(safe_float({\"hp\":\"pavilion\"}))\nprint(safe_float(\"200\"))\nprint(safe_float(200))\n\ndef safe_float2(obj):\n\ttry:\n\t\tretval = float(obj)\n\texcept (ValueError, TypeError):\n\t\tretval = \"argument must be a numberic string\"\n\treturn retval\n\nprint(safe_float2({}))\nprint(safe_float2(\"123\"))","sub_path":"Repo_Python/zhihu_interest/learn_error.py","file_name":"learn_error.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"60762421","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\nimport unittest\n\nfrom lab2 import ModelCreator, SetField, BoolField, StringField, ListField, TupleField, IntField, FloatField, DictField\n\n__author__ = 'asaskevich'\n\n\nclass TestCase(unittest.TestCase):\n def test_something(self):\n class Sample(object, metaclass=ModelCreator):\n name = StringField()\n male = BoolField(default=True)\n phones = ListField()\n sets = SetField(default=set())\n tuples = TupleField()\n age = IntField(default='18')\n height = FloatField()\n books = DictField()\n\n man = Sample(name='John', sets=set([1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, ]), tuples=('a', 'b'), height=170.34)\n man.phones = [10000, 20000, 30000, 40000]\n man.books = {'Tolstoy': ['War and Peace']}\n self.assertEqual(man.name, 'John')\n self.assertEqual(man.height, 170.34)\n self.assertEqual(man.age, 18)\n self.assertSequenceEqual(man.phones, [10000, 20000, 30000, 40000])\n\n def test_set_raise(self):\n class Sample(object, metaclass=ModelCreator):\n name = IntField()\n\n try:\n s = Sample()\n s.name = '123aaa'\n except Exception as e:\n self.assertIsInstance(e, TypeError)\n\n def test_cast_default(self):\n try:\n class Sample(object, metaclass=ModelCreator):\n name = IntField(default=[1, 2, 3])\n\n except Exception as e:\n self.assertIsInstance(e, TypeError)\n","sub_path":"kurs_3/sem_1/IGI/lb/Laboratornaya_2/Лабораторная 2/tests/test_modelcreator.py","file_name":"test_modelcreator.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"506912617","text":"#!/usr/bin/env python3\n\n# This file is part of the MicroPython project, http://micropython.org/\n# The MIT License (MIT)\n# Copyright (c) 2020 Damien P. George\n\nimport sys, os, time, re, select\nimport argparse\nimport itertools\nimport subprocess\nimport tempfile\n\nsys.path.append(\"../tools\")\nimport pyboard\n\nif os.name == \"nt\":\n CPYTHON3 = os.getenv(\"MICROPY_CPYTHON3\", \"python3.exe\")\n MICROPYTHON = os.getenv(\"MICROPY_MICROPYTHON\", \"../ports/windows/micropython.exe\")\nelse:\n CPYTHON3 = os.getenv(\"MICROPY_CPYTHON3\", \"python3\")\n MICROPYTHON = os.getenv(\"MICROPY_MICROPYTHON\", \"../ports/unix/micropython\")\n\n# For diff'ing test output\nDIFF = os.getenv(\"MICROPY_DIFF\", \"diff -u\")\n\nPYTHON_TRUTH = CPYTHON3\n\nINSTANCE_READ_TIMEOUT_S = 10\n\nAPPEND_CODE_TEMPLATE = \"\"\"\nimport sys\nclass multitest:\n @staticmethod\n def flush():\n try:\n sys.stdout.flush()\n except AttributeError:\n pass\n @staticmethod\n def skip():\n print(\"SKIP\")\n multitest.flush()\n raise SystemExit\n @staticmethod\n def next():\n print(\"NEXT\")\n multitest.flush()\n @staticmethod\n def broadcast(msg):\n print(\"BROADCAST\", msg)\n multitest.flush()\n @staticmethod\n def wait(msg):\n msg = \"BROADCAST \" + msg\n while True:\n if sys.stdin.readline().rstrip() == msg:\n return\n @staticmethod\n def globals(**gs):\n for g in gs:\n print(\"SET {{}} = {{!r}}\".format(g, gs[g]))\n multitest.flush()\n @staticmethod\n def get_network_ip():\n try:\n import network\n ip = network.WLAN().ifconfig()[0]\n except:\n ip = \"127.0.0.1\"\n return ip\n\n{}\n\ninstance{}()\nmultitest.flush()\n\"\"\"\n\n# The btstack implementation on Unix generates some spurious output that we\n# can't control.\nIGNORE_OUTPUT_MATCHES = (\n \"libusb: error \", # It tries to open devices that it doesn't have access to (libusb prints unconditionally).\n \"hci_transport_h2_libusb.c\", # Same issue. We enable LOG_ERROR in btstack.\n \"USB Path: \", # Hardcoded in btstack's libusb transport.\n \"hci_number_completed_packet\", # Warning from btstack.\n)\n\n\nclass PyInstance:\n def __init__(self):\n pass\n\n def close(self):\n pass\n\n def prepare_script_from_file(self, filename, prepend, append):\n with open(filename, \"rb\") as f:\n script = f.read()\n if prepend:\n script = bytes(prepend, \"ascii\") + b\"\\n\" + script\n if append:\n script += b\"\\n\" + bytes(append, \"ascii\")\n return script\n\n def run_file(self, filename, prepend=\"\", append=\"\"):\n return self.run_script(self.prepare_script_from_file(filename, prepend, append))\n\n def start_file(self, filename, prepend=\"\", append=\"\"):\n return self.start_script(self.prepare_script_from_file(filename, prepend, append))\n\n\nclass PyInstanceSubProcess(PyInstance):\n def __init__(self, argv, env=None):\n self.argv = argv\n self.env = {n: v for n, v in (i.split(\"=\") for i in env)} if env else None\n self.popen = None\n self.finished = True\n\n def __str__(self):\n return self.argv[0].rsplit(\"/\")[-1]\n\n def run_script(self, script):\n output = b\"\"\n err = None\n try:\n p = subprocess.run(\n self.argv,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n input=script,\n env=self.env,\n )\n output = p.stdout\n except subprocess.CalledProcessError as er:\n err = er\n return str(output.strip(), \"ascii\"), err\n\n def start_script(self, script):\n self.popen = subprocess.Popen(\n self.argv + [\"-c\", script],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n env=self.env,\n )\n self.finished = False\n\n def stop(self):\n if self.popen and self.popen.poll() is None:\n self.popen.terminate()\n\n def readline(self):\n sel = select.select([self.popen.stdout.raw], [], [], 0.001)\n if not sel[0]:\n self.finished = self.popen.poll() is not None\n return None, None\n out = self.popen.stdout.raw.readline()\n if out == b\"\":\n self.finished = self.popen.poll() is not None\n return None, None\n else:\n return str(out.rstrip(), \"ascii\"), None\n\n def write(self, data):\n self.popen.stdin.write(data)\n self.popen.stdin.flush()\n\n def is_finished(self):\n return self.finished\n\n def wait_finished(self):\n self.popen.wait()\n out = self.popen.stdout.read()\n return str(out, \"ascii\"), \"\"\n\n\nclass PyInstancePyboard(PyInstance):\n @staticmethod\n def map_device_shortcut(device):\n if device[0] == \"a\" and device[1:].isdigit():\n return \"/dev/ttyACM\" + device[1:]\n elif device[0] == \"u\" and device[1:].isdigit():\n return \"/dev/ttyUSB\" + device[1:]\n else:\n return device\n\n def __init__(self, device):\n device = self.map_device_shortcut(device)\n self.device = device\n self.pyb = pyboard.Pyboard(device)\n self.pyb.enter_raw_repl()\n self.finished = True\n\n def __str__(self):\n return self.device.rsplit(\"/\")[-1]\n\n def close(self):\n self.pyb.exit_raw_repl()\n self.pyb.close()\n\n def run_script(self, script):\n output = b\"\"\n err = None\n try:\n self.pyb.enter_raw_repl()\n output = self.pyb.exec_(script)\n except pyboard.PyboardError as er:\n err = er\n return str(output.strip(), \"ascii\"), err\n\n def start_script(self, script):\n self.pyb.enter_raw_repl()\n self.pyb.exec_raw_no_follow(script)\n self.finished = False\n\n def stop(self):\n self.pyb.serial.write(b\"\\r\\x03\")\n\n def readline(self):\n if self.finished:\n return None, None\n if self.pyb.serial.inWaiting() == 0:\n return None, None\n out = self.pyb.read_until(1, (b\"\\r\\n\", b\"\\x04\"))\n if out.endswith(b\"\\x04\"):\n self.finished = True\n out = out[:-1]\n err = str(self.pyb.read_until(1, b\"\\x04\"), \"ascii\")\n err = err[:-1]\n if not out and not err:\n return None, None\n else:\n err = None\n return str(out.rstrip(), \"ascii\"), err\n\n def write(self, data):\n self.pyb.serial.write(data)\n\n def is_finished(self):\n return self.finished\n\n def wait_finished(self):\n out, err = self.pyb.follow(10, None)\n return str(out, \"ascii\"), str(err, \"ascii\")\n\n\ndef prepare_test_file_list(test_files):\n test_files2 = []\n for test_file in sorted(test_files):\n num_instances = 0\n with open(test_file) as f:\n for line in f:\n m = re.match(r\"def instance([0-9]+)\\(\\):\", line)\n if m:\n num_instances = max(num_instances, int(m.group(1)) + 1)\n test_files2.append((test_file, num_instances))\n return test_files2\n\n\ndef trace_instance_output(instance_idx, line):\n if cmd_args.trace_output:\n t_ms = round((time.time() - trace_t0) * 1000)\n print(\"{:6} i{} :\".format(t_ms, instance_idx), line)\n sys.stdout.flush()\n\n\ndef run_test_on_instances(test_file, num_instances, instances):\n global trace_t0\n trace_t0 = time.time()\n\n error = False\n skip = False\n injected_globals = \"\"\n output = [[] for _ in range(num_instances)]\n\n if cmd_args.trace_output:\n print(\"TRACE {}:\".format(\"|\".join(str(i) for i in instances)))\n\n # Start all instances running, in order, waiting until they signal they are ready\n for idx in range(num_instances):\n append_code = APPEND_CODE_TEMPLATE.format(injected_globals, idx)\n instance = instances[idx]\n instance.start_file(test_file, append=append_code)\n last_read_time = time.time()\n while True:\n if instance.is_finished():\n break\n out, err = instance.readline()\n if out is None and err is None:\n if time.time() > last_read_time + INSTANCE_READ_TIMEOUT_S:\n output[idx].append(\"TIMEOUT\")\n error = True\n break\n time.sleep(0.1)\n continue\n last_read_time = time.time()\n if out is not None and not any(m in out for m in IGNORE_OUTPUT_MATCHES):\n trace_instance_output(idx, out)\n if out.startswith(\"SET \"):\n injected_globals += out[4:] + \"\\n\"\n elif out == \"SKIP\":\n skip = True\n break\n elif out == \"NEXT\":\n break\n else:\n output[idx].append(out)\n if err is not None:\n trace_instance_output(idx, err)\n output[idx].append(err)\n error = True\n\n if error or skip:\n break\n\n if not error and not skip:\n # Capture output and wait for all instances to finish running\n last_read_time = [time.time() for _ in range(num_instances)]\n while True:\n num_running = 0\n num_output = 0\n for idx in range(num_instances):\n instance = instances[idx]\n if instance.is_finished():\n continue\n num_running += 1\n out, err = instance.readline()\n if out is None and err is None:\n if time.time() > last_read_time[idx] + INSTANCE_READ_TIMEOUT_S:\n output[idx].append(\"TIMEOUT\")\n error = True\n continue\n num_output += 1\n last_read_time[idx] = time.time()\n if out is not None and not any(m in out for m in IGNORE_OUTPUT_MATCHES):\n trace_instance_output(idx, out)\n if out.startswith(\"BROADCAST \"):\n for instance2 in instances:\n if instance2 is not instance:\n instance2.write(bytes(out, \"ascii\") + b\"\\r\\n\")\n else:\n output[idx].append(out)\n if err is not None:\n trace_instance_output(idx, err)\n output[idx].append(err)\n error = True\n\n if not num_output:\n time.sleep(0.1)\n if not num_running or error:\n break\n\n # Stop all instances\n for idx in range(num_instances):\n instances[idx].stop()\n\n output_str = \"\"\n for idx, lines in enumerate(output):\n output_str += \"--- instance{} ---\\n\".format(idx)\n output_str += \"\\n\".join(lines) + \"\\n\"\n\n return error, skip, output_str\n\n\ndef print_diff(a, b):\n a_fd, a_path = tempfile.mkstemp(text=True)\n b_fd, b_path = tempfile.mkstemp(text=True)\n os.write(a_fd, a.encode())\n os.write(b_fd, b.encode())\n os.close(a_fd)\n os.close(b_fd)\n subprocess.run(DIFF.split(\" \") + [a_path, b_path])\n os.unlink(a_path)\n os.unlink(b_path)\n\n\ndef run_tests(test_files, instances_truth, instances_test):\n skipped_tests = []\n passed_tests = []\n failed_tests = []\n\n for test_file, num_instances in test_files:\n instances_str = \"|\".join(str(instances_test[i]) for i in range(num_instances))\n print(\"{} on {}: \".format(test_file, instances_str), end=\"\")\n if cmd_args.show_output or cmd_args.trace_output:\n print()\n sys.stdout.flush()\n\n # Run test on test instances\n error, skip, output_test = run_test_on_instances(test_file, num_instances, instances_test)\n\n if not skip:\n # Check if truth exists in a file, and read it in\n test_file_expected = test_file + \".exp\"\n if os.path.isfile(test_file_expected):\n with open(test_file_expected) as f:\n output_truth = f.read()\n else:\n # Run test on truth instances to get expected output\n _, _, output_truth = run_test_on_instances(\n test_file, num_instances, instances_truth\n )\n\n if cmd_args.show_output:\n print(\"### TEST ###\")\n print(output_test, end=\"\")\n if not skip:\n print(\"### TRUTH ###\")\n print(output_truth, end=\"\")\n\n # Print result of test\n if skip:\n print(\"skip\")\n skipped_tests.append(test_file)\n elif output_test == output_truth:\n print(\"pass\")\n passed_tests.append(test_file)\n else:\n print(\"FAIL\")\n failed_tests.append(test_file)\n if not cmd_args.show_output:\n print(\"### TEST ###\")\n print(output_test, end=\"\")\n print(\"### TRUTH ###\")\n print(output_truth, end=\"\")\n print(\"### DIFF ###\")\n print_diff(output_truth, output_test)\n\n if cmd_args.show_output:\n print()\n\n print(\"{} tests performed\".format(len(skipped_tests) + len(passed_tests) + len(failed_tests)))\n print(\"{} tests passed\".format(len(passed_tests)))\n\n if skipped_tests:\n print(\"{} tests skipped: {}\".format(len(skipped_tests), \" \".join(skipped_tests)))\n if failed_tests:\n print(\"{} tests failed: {}\".format(len(failed_tests), \" \".join(failed_tests)))\n\n return not failed_tests\n\n\ndef main():\n global cmd_args\n\n cmd_parser = argparse.ArgumentParser(description=\"Run network tests for MicroPython\")\n cmd_parser.add_argument(\n \"-s\", \"--show-output\", action=\"store_true\", help=\"show test output after running\"\n )\n cmd_parser.add_argument(\n \"-t\", \"--trace-output\", action=\"store_true\", help=\"trace test output while running\"\n )\n cmd_parser.add_argument(\n \"-i\", \"--instance\", action=\"append\", default=[], help=\"instance(s) to run the tests on\"\n )\n cmd_parser.add_argument(\n \"-p\",\n \"--permutations\",\n type=int,\n default=1,\n help=\"repeat the test with this many permutations of the instance order\",\n )\n cmd_parser.add_argument(\"files\", nargs=\"+\", help=\"input test files\")\n cmd_args = cmd_parser.parse_args()\n\n # clear search path to make sure tests use only builtin modules and those in extmod\n os.environ[\"MICROPYPATH\"] = os.pathsep + \"../extmod\"\n\n test_files = prepare_test_file_list(cmd_args.files)\n max_instances = max(t[1] for t in test_files)\n\n instances_truth = [PyInstanceSubProcess([PYTHON_TRUTH]) for _ in range(max_instances)]\n\n instances_test = []\n for i in cmd_args.instance:\n # Each instance arg is ,ENV=VAR,ENV=VAR...\n i = i.split(\",\")\n cmd = i[0]\n env = i[1:]\n if cmd.startswith(\"exec:\"):\n instances_test.append(PyInstanceSubProcess([cmd[len(\"exec:\") :]], env))\n elif cmd == \"micropython\":\n instances_test.append(PyInstanceSubProcess([MICROPYTHON], env))\n elif cmd == \"cpython\":\n instances_test.append(PyInstanceSubProcess([CPYTHON3], env))\n elif cmd.startswith(\"pyb:\"):\n instances_test.append(PyInstancePyboard(cmd[len(\"pyb:\") :]))\n else:\n print(\"unknown instance string: {}\".format(cmd), file=sys.stderr)\n sys.exit(1)\n\n for _ in range(max_instances - len(instances_test)):\n instances_test.append(PyInstanceSubProcess([MICROPYTHON]))\n\n all_pass = True\n try:\n for i, instances_test_permutation in enumerate(itertools.permutations(instances_test)):\n if i >= cmd_args.permutations:\n break\n\n all_pass &= run_tests(test_files, instances_truth, instances_test_permutation)\n\n finally:\n for i in instances_truth:\n i.close()\n for i in instances_test:\n i.close()\n\n if not all_pass:\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tests/run-multitests.py","file_name":"run-multitests.py","file_ext":"py","file_size_in_byte":16235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"630192863","text":"#!/usr/bin/env python\n\"\"\"\nGrapical User Interface for FPGA Timing System.\nAuthor: Friedrich Schotte\nDate created: 2018-12-04\nDate last modified: 2019-03-26\n\"\"\"\n__version__ = \"1.3\" # using timing_system.prefixes for choices\n\nfrom logging import debug,info,warn,error\nimport wx\n\nclass Timing_Setup_Panel(wx.Frame):\n title = \"Timing System Setup\"\n icon = \"timing-system\"\n \n def __init__(self,parent=None,name=\"TimingPanel\"):\n wx.Frame.__init__(self,parent=parent,title=self.title)\n self.name = name\n panel = wx.Panel(self)\n\n from Icon import SetIcon\n SetIcon(self,self.icon)\n\n # Controls\n from EditableControls import ComboBox\n style = wx.TE_PROCESS_ENTER\n width = 160\n \n self.Prefix = ComboBox(panel,style=style,size=(width,-1))\n \n self.Address = wx.TextCtrl(panel,style=wx.TE_READONLY,size=(width,-1))\n self.Address.Enabled = False\n \n # Callbacks\n self.Bind (wx.EVT_TEXT_ENTER,self.OnEnterPrefix,self.Prefix)\n self.Bind (wx.EVT_COMBOBOX ,self.OnEnterPrefix,self.Prefix)\n self.Bind (wx.EVT_CLOSE ,self.OnClose)\n\n # Layout\n layout = wx.GridBagSizer(1,1)\n a = wx.ALIGN_CENTRE_VERTICAL\n e = wx.EXPAND\n\n row = 0\n label = wx.StaticText(panel,label=\"EPICS Record:\")\n layout.Add (label,(row,0),flag=a)\n layout.Add (self.Prefix,(row,1),flag=a|e)\n\n row += 1\n label = wx.StaticText(panel,label=\"IP Address (auto detect):\")\n layout.Add (label,(row,0),flag=a)\n layout.Add (self.Address,(row,1),flag=a|e)\n\n # Leave a 5-pixel wide border.\n box = wx.BoxSizer(wx.VERTICAL)\n box.Add (layout,flag=wx.ALL,border=5)\n panel.SetSizer(box)\n panel.Fit()\n self.Fit()\n\n self.Show()\n self.refresh()\n\n def OnEnterPrefix(self,event):\n \"\"\"Called if EPICS record prefix is changed\"\"\"\n from timing_system import timing_system\n timing_system.prefix = self.Prefix.Value\n self.refresh()\n\n def OnRefresh(self,event=None):\n self.refresh()\n\n def refresh(self,event=None):\n \"\"\"Update the controles and indicators with current values\"\"\"\n if self.Shown:\n from timing_system import timing_system\n self.Prefix.Value = timing_system.prefix\n self.Prefix.Items = timing_system.prefixes\n self.Address.Value = timing_system.ip_address\n self.timer = wx.Timer(self)\n self.Bind(wx.EVT_TIMER,self.refresh,self.timer)\n self.timer.Start(1000,oneShot=True)\n \n def OnClose(self,event):\n self.Shown = False\n ##self.Destroy() # might crash under Windows\n wx.CallLater(2000,self.Destroy)\n\nSetupPanel = Timing_Setup_Panel # for backward compatibility\n\n\nif __name__ == '__main__':\n from pdb import pm # for debugging\n from tempfile import gettempdir\n logfile = gettempdir()+\"/Timing_Setup_Panel.log\"\n import logging # for debugging\n logging.basicConfig(\n level=logging.DEBUG,\n filename=logfile,\n format=\"%(asctime)s %(levelname)s: %(message)s\",\n )\n\n app = wx.App(redirect=False) \n panel = Timing_Setup_Panel()\n app.MainLoop()\n","sub_path":"Timing_Setup_Panel.py","file_name":"Timing_Setup_Panel.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"22119511","text":"\n\nGCLOUD_USER_ACCOUNT = \"weisburd@broadinstitute.org\"\nGCLOUD_CREDENTIALS_LOCATION = \"gs://weisburd-misc/creds\"\nGCLOUD_PROJECT = \"seqr-project\"\n\nDOCKER_IMAGE = \"weisburd/gagneurlab@sha256:be45788c8696a196bee25be269cb2de97277601bed65cbc3efcadc16acc5a764\"\n#DOCKER_IMAGE = \"weisburd/gagneurlab@sha256:75a09c7ec42185b07206eb79f1d2f532ca712e19c18008779e9ea133153c807a\"\n\n\n# https://i12g-gagneurweb.in.tum.de/public/workshops/RNAseq_ASHG19/input_data/annotations/gencode.v29lift37.annotation.txdb\nCLOUD_STORAGE_BASE_DIR = \"gs://tgg-rnaseq/gagneur\"\nGENCODE_TXDB = f\"{CLOUD_STORAGE_BASE_DIR}/gencode.v26.annotation.txdb\"\n#ALL_METADATA_TSV = f\"{CLOUD_STORAGE_BASE_DIR}/metadata_table_for_all_RDG_and_GTEX_samples.tsv\"\nOUTRIDER_COUNTS_TSV_GZ = f\"{CLOUD_STORAGE_BASE_DIR}/outrider/OUTRIDER_input_table_RDG_and_GTEX_counts_for_all_tissues.tsv.gz\"\nBAM_HEADER_PATH = f\"{CLOUD_STORAGE_BASE_DIR}/fraser/bam_header.bam\"\n\nimport gspread\nimport os\nfrom google.oauth2.service_account import Credentials\n\n_GSPREAD_CLIENT = None\n\ndef get_spreasheet(spreadsheet_name):\n global _GSPREAD_CLIENT\n if _GSPREAD_CLIENT is None:\n creds = Credentials.from_service_account_file(\n os.path.expanduser('~/.config/gcloud/seqr-project-0cb2b89f436f.json'),\n scopes=[\n 'https://www.googleapis.com/auth/spreadsheets',\n 'https://www.googleapis.com/auth/drive.file',\n 'https://www.googleapis.com/auth/drive',\n ]\n )\n\n _GSPREAD_CLIENT = gspread.authorize(creds)\n\n spreadsheet = _GSPREAD_CLIENT.open(spreadsheet_name)\n\n return spreadsheet\n\n\n_OUTRIDER_RESULTS_SPREADSHEET = None\ndef get_OUTRIDER_results_spreadsheet():\n global _OUTRIDER_RESULTS_SPREADSHEET\n _OUTRIDER_RESULTS_SPREADSHEET = get_spreasheet(\"RNA-seq OUTRIDER results\")\n return _OUTRIDER_RESULTS_SPREADSHEET\n\n\n_FRASER_RESULTS_SPREADSHEET = None\ndef get_FRASER_results_spreadsheet():\n global _FRASER_RESULTS_SPREADSHEET\n _FRASER_RESULTS_SPREADSHEET = get_spreasheet(\"RNA-seq FRASER results\")\n return _FRASER_RESULTS_SPREADSHEET\n\n\n_RNASEQ_RESULTS_2020_12_10_SPREADSHEET = None\ndef get_RNASEQ_results_spreadsheet():\n global _RNASEQ_RESULTS_2020_12_10_SPREADSHEET\n _RNASEQ_RESULTS_2020_12_10_SPREADSHEET = get_spreasheet(\"RNA-seq results: FRASER, OUTRIDER\")\n return _RNASEQ_RESULTS_2020_12_10_SPREADSHEET\n\n_RNASEQ_TRUTH_DATA_SPREADSHEET = None\ndef get_rnaseq_truth_data_spreadsheet():\n global _RNASEQ_TRUTH_DATA_SPREADSHEET\n print(\"Loading 'RNA-seq truth data'\")\n _RNASEQ_TRUTH_DATA_SPREADSHEET = get_spreasheet(\"RNA-seq truth data\")\n return _RNASEQ_TRUTH_DATA_SPREADSHEET\n\n","sub_path":"pipelines/gagneurlab/gagneur_utils.py","file_name":"gagneur_utils.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"223931913","text":"#!/usr/bin/env python\n\nimport sys\n\n\"\"\"\nCollapse word vector based on length file\n\"\"\"\n\ndef collapse(dim, origin, len_file):\n lens = []\n with open(len_file) as file:\n for line in file:\n lens.append(int(line.strip()))\n result = []\n with open(origin) as file:\n for leni in lens:\n vec = [0] * dim\n for i in range(0, leni):\n curvec = map(lambda x: int(x), file.readline().strip().split(' '))\n vec = [vec[i]+curvec[i] for i in range(0, dim)]\n result.append(vec)\n return result\n\ndef saveData(dim, data, collapse_output):\n out = open(collapse_output, 'w')\n for d in data:\n for i in range(0, dim):\n out.write(\"%s\" % (d[i]))\n if i==dim-1:\n out.write('\\n')\n else:\n out.write(' ')\n\n\nif __name__ == \"__main__\":\n dim, origin, len_file, collapse_output = sys.argv[1:]\n dim = int(dim)\n saveData(dim, collapse(dim, origin, len_file), collapse_output)\n","sub_path":"bin/collapse.py","file_name":"collapse.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"155092603","text":"from docreader import *\t \nfrom processing import *\n\n\nclass SpamRecognizer:\n\tdef __init__(self, docs):\n\t\tself.docs = docs\n\t\tself.get_spam_words()\n\tdef get_spam_words(self):\n\t\tspams = filter_by_doctype(self.docs, 'spam')\n\t\thams = filter_by_doctype(self.docs, 'ham')\n\t\tdoc_words = count_doc_words(spams, hams)\n\t\tmediana_spam_possibility = 0.8\n\t\tself.spam_markers = filter_by_min_spam_possibility_gt(doc_words, mediana_spam_possibility)\n\t\tspam_markers_dict = {}\n\t\tfor w in self.spam_markers:\n\t\t\tspam_markers_dict[w.value] = w\n\t\tself.spam_markers_dict = spam_markers_dict\n\tdef classificate(self, content):\n\t\twords = content.split(' ')\n\t\tspam_posiibility = 0\n\t\tfor w in words:\n\t\t\tw = extractOnlyLetters(w)\n\t\t\tif not self.spam_markers_dict.get(w, None) is None:\t\n\t\t\t\tspam_posiibility = spam_posiibility + self.spam_markers_dict[w].spam_posiibility()\n\t\tif(spam_posiibility > 1.1):\n\t\t\treturn 'spam'\n\t\telse:\n\t\t\treturn \"ham\"\n\ndef validate(filter):\n\tdocs = read_all_docs('C:\\workspace\\SpamRecognizer\\Data\\SMSSpamCollection')\n\trecognizer = SpamRecognizer(docs.training_set)\n\tvalidation_set = docs.validation_set\n\tif not filter is None:\n\t\tvalidation_set = list(filter(docs.validation_set)) \n\terror_count = 0\n\tfor d in validation_set:\n\t\tresponse = recognizer.classificate(d.content)\n\t\tif not response == d.doc_type:\n\t\t\terror_count = error_count + 1\n\tprint(\"total validation set: {0}\".format(len(validation_set)))\n\terror_persents = error_count * 100 / len(validation_set)\n\taccuracy = 100 - error_count * 100 / len(validation_set)\n\tmessage = \"errors: {0}; error_persents: {1}; accuracy: {2}\".format(error_count, error_persents, accuracy)\n\tprint(message)\n\nprint('...')\nfor i in range(1,10):\t\n\tprint('validation set')\n\tvalidate(None)\n\ndef hamOnlyFilter(set):\n\treturn filter_by_doctype(set, 'ham')\nprint('...')\nfor i in range(1,10):\t\n\tprint('Ham only validation set')\n\tvalidate(hamOnlyFilter)\n\ndef spamOnlyFilter(set):\n\treturn filter_by_doctype(set, 'spam')\nprint('...')\nfor i in range(1,10):\t\n\tprint('Spam only validation set')\n\tvalidate(spamOnlyFilter)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"299915875","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n##################################################################################################\n# Copyright (c) 2012 Brett Dixon\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to use,\n# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n##################################################################################################\n\n\n# check for venv and warn\n# check for dev project: prompt for overwrite\n# copy project dir\n# generate secret_key.txt\n# create static dir\n# run migrate\n# run createsuperuser\n# print further instructions\n\nimport os\nimport argparse\nimport warnings\nimport sys\nimport subprocess\nimport logging\nimport random\n\nimport path\n\nlogging.basicConfig()\n\nBASE = path.Path(os.path.abspath(__file__)).parent\nENV = path.Path(os.getenv('VIRTUAL_ENV', os.getcwd()))\nLOGGER = logging.getLogger('Frog Init')\nLOGGER.setLevel(logging.INFO)\n\n\ndef isVirtualEnv():\n return 'VIRTUAL_ENV' in os.environ\n\n\ndef projectCheck():\n \"\"\"checks to see if the project exists and propmts for overwrite\"\"\"\n project = ENV / 'dev'\n return project.exists()\n\n\ndef writeSecretKey(path):\n dest = os.path.join(path, 'secret_key.txt')\n\n if not os.path.exists(dest):\n LOGGER.info('Generating secret key at {}...'.format(dest))\n with open(dest, 'w+') as fh:\n key = ''.join([random.SystemRandom().choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])\n fh.write(key)\n\n\ndef secretKeyCommand():\n parser = argparse.ArgumentParser()\n parser.add_argument('path', help='Path to write file to')\n\n args, opts = parser.parse_known_args()\n\n writeSecretKey(args.path)\n\n\ndef quickstart():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--force', action='store_true', help='Overwrite everything without prompts')\n\n args, opts = parser.parse_known_args()\n\n if not isVirtualEnv():\n LOGGER.error('Please use a virtual environment')\n sys.exit()\n\n if not args.force:\n if projectCheck():\n LOGGER.error('Project already exists')\n sys.exit()\n\n project = BASE / 'project' / 'dev'\n dest = ENV / 'dev'\n LOGGER.info('{} > {}'.format(project, dest))\n if dest.exists():\n dest.rmtree()\n project.copytree(dest)\n\n try:\n (dest / 'static').mkdir()\n except (OSError, IOError):\n pass\n\n os.chdir(dest)\n\n writeSecretKey(os.getcwd())\n\n LOGGER.info('Running migrations...')\n subprocess.check_call(['python', 'manage.py', 'migrate'])\n LOGGER.info('Loading default data...')\n subprocess.check_call(['python', 'manage.py', 'loaddata', '--app', 'frog', 'initial_data.json'])\n\n LOGGER.info('Generating nginx conf file...')\n conf = dest / 'frog.conf'\n conf.write_text((project.parent / 'frog.conf').text().format(\n static=(dest / 'static').replace('\\\\', '/'),\n env=ENV.replace('\\\\', '/')\n ))\n (project.parent / 'mime.types').copy(dest)\n LOGGER.info(dest / 'frog.conf')\n\n LOGGER.info('Done')\n","sub_path":"frog/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"511866564","text":"## usage: python3 plot.py on.csv off.csv\n\nimport numpy as np\nimport sys\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import spline\nfrom scipy.signal import lfilter\n\n\nfont = {'family' : 'normal',\n # 'weight' : 'bold',\n 'size' : 22}\nfont_label = {'family' : 'normal',\n # 'weight' : 'bold',\n 'size' : 22}\n\nplt.rc('font', **font)\nplt.rc('legend', fontsize=22)\n\nconvertfunc = lambda x: float(x[0:-2])\n\nlatency_off_file = np.genfromtxt(sys.argv[1], delimiter=',')\nlatency_menu_file = np.genfromtxt(sys.argv[2], delimiter=',')\nlatency_yawn_file = np.genfromtxt(sys.argv[3], delimiter=',')\npower_off_file = np.genfromtxt(sys.argv[4], delimiter=',', converters={0: convertfunc})\npower_menu_file = np.genfromtxt(sys.argv[5], delimiter=',', converters={0: convertfunc})\npower_yawn_file = np.genfromtxt(sys.argv[6], delimiter=',', converters={0: convertfunc})\n\npower_off = []\npower_menu = []\npower_yawn = []\nfor i in range(0, len(power_off_file), 2):\n\tsum1 = int(power_off_file[i])\n\tsum1+= int(power_off_file[i+1])\n\tpower_off.append(sum1)\nfor i in range(0, len(power_menu_file), 2):\n\tsum1 = int(power_menu_file[i])\n\tsum1+= int(power_menu_file[i+1])\n\tpower_menu.append(sum1)\nfor i in range(0, len(power_yawn_file), 2):\n\tsum1 = int(power_yawn_file[i])\n\tsum1+= int(power_yawn_file[i+1])\n\tpower_yawn.append(sum1)\n\npower_off_file = power_off\npower_menu_file = power_menu\npower_yawn_file = power_yawn\n\nx = [i[0] for i in latency_menu_file]\noff_avg = [i[1] for i in latency_off_file]\nmenu_avg = [i[1] for i in latency_menu_file]\noff_95 = [i[2] for i in latency_off_file]\nmenu_95 = [i[2] for i in latency_menu_file]\nyawn_95 = [i[2] for i in latency_yawn_file]\noff_99 = [i[3] for i in latency_off_file]\nmenu_99 = [i[3] for i in latency_menu_file]\nyawn_99 = [i[3] for i in latency_yawn_file]\noff_999 = [i[4] for i in latency_off_file]\nmenu_999 = [i[4] for i in latency_menu_file]\nyawn_999 = [i[4] for i in latency_yawn_file]\n\n\nfig, (ax1, ax2, ax3, ax4) = plt.subplots(4,1,figsize=(14,11))\n\nfor i in range(len(x)):\n x[i] /= 1000\nx1 = np.arange(1,91,1)\nxnew = np.linspace(x1.min(),x1.max(),300)\npower_smooth = spline(x1,x,xnew)\n\n\nax1.set_ylabel(\"Request Rate\\n (x\" + r'$10^3$' + \" RPS)\", labelpad=20,**font_label)# ax1.set_xlabel(\"Min\")\nax1.grid()\n# ax1.set_xticks(np.arange(0,91,10))\nax1.set_xticklabels([])\nax1.set_yticks(np.arange(0,80,20))\nax1.set_yticklabels(np.arange(0,80,20))\nax1.set_xlim(1,90)\np1 = ax1.plot(xnew, power_smooth, 'C1', linewidth=3)\n\nn = 4 # the larger n is, the smoother curve will be\nb = [1.0 / n] * n\na = 1\noff_99_smooth = lfilter(b,a,off_99)\nmenu_99_smooth = lfilter(b,a,menu_99)\nyawn_99_smooth = lfilter(b,a,yawn_99)\noff_99_smooth2 = spline(x1,off_99_smooth,xnew)\nmenu_99_smooth2 = spline(x1,menu_99_smooth,xnew)\nyawn_99_smooth2 = spline(x1,yawn_99_smooth,xnew)\n\np2 ,p3, p4 = ax2.plot(xnew, off_99_smooth2,'b:', xnew, menu_99_smooth2, 'r', xnew, yawn_99_smooth2, 'g--', linewidth=3)\nax2.set_ylabel(r'$99^{th}$' + 'Latency (us)')\n# ax2.set_xlabel('Min')\nax2.grid()\n# ax2.set_xticks(np.arange(0,91,10))\nax2.set_xticklabels([])\n# ax2.set_yticks(np.arange(80,200,20))\n# ax2.set_xticklabels(np.arange(0,91,10))\nax2.set_xlim(1,90)\n\nn = 4 # the larger n is, the smoother curve will be\nb = [1.0 / n] * n\na = 1\noff_999_smooth = lfilter(b,a,off_999)\nmenu_999_smooth = lfilter(b,a,menu_999)\nyawn_999_smooth = lfilter(b,a,yawn_999)\noff_999_smooth2 = spline(x1,off_999_smooth,xnew)\nmenu_999_smooth2 = spline(x1,menu_999_smooth,xnew)\nyawn_999_smooth2 = spline(x1,yawn_999_smooth,xnew)\n\n# plt.plot(x, yy, linewidth=2, linestyle=\"-\", c=\"b\") # smooth by filter\n\np8 ,p9, p10 = ax3.plot(xnew, off_999_smooth2,'b:', xnew, menu_999_smooth2, 'r', xnew, yawn_999_smooth2, 'g--', linewidth=3)\nax3.set_ylabel(r'$99.9^{th}$' + 'Latency (us)', labelpad=30)\n# ax2.set_xlabel('Min')\nax3.grid()\n# ax2.set_xticks(np.arange(0,91,10))\nax3.set_xticklabels([])\n# ax3.set_yticks(np.arange(80,200,20))\n# ax2.set_xticklabels(np.arange(0,91,10))\nax3.set_xlim(1,90)\n\n\nstep2 = 90/(len(power_off_file))\nstep3 = 90/(len(power_menu_file))\nstep4 = 90/(len(power_yawn_file))\nx2 = np.arange(1, 91, step2)\nx3 = np.arange(1, 91, step3)\nx4 = np.arange(1, 91, step4)\n\noff_power_smooth = lfilter(b,a,power_off_file)\nmenu_power_smooth = lfilter(b,a,power_menu_file)\nyawn_power_smooth = lfilter(b,a,power_yawn_file)\n\np5, p6, p7 = ax4.plot(x2, off_power_smooth, 'b:', x3, menu_power_smooth, 'r', x4, yawn_power_smooth, 'g--', linewidth=3)\nax4.set_ylabel('Power (Watt)')\nax4.set_xlabel('Time (min)')\nax4.grid()\ntick = [0,10,20,30,40,50,60,70,80,90]\nax4.set_xticks(np.arange(1,101,10))\nax4.set_xticklabels(tick)\nax4.set_xlim(2,91)\nax4.set_ylim(30,150)\n# ax2.title('Request Latency of different loads')\n# plt.xticks(ind, ('20%', '30%', '40%', '50%'))\n# plt.yticks(np.arange(0, 800, 10))\nax1.legend((p2, p3, p4), ('C-states Disabled','Menu', 'Yawn'), loc='center',\n ncol=3,bbox_to_anchor=(0., 1.1, 1., .110))\n# plt.legend((p1, p2,p3,p4,p5,p6), ('menu-95', 'yawn-95','menu-99th', 'yawn-99th', 'menu-99.9', 'yawn-99.9'))\n# plt.legend((p1, p2), ('On-avg', 'Off-avg'))\nplt.savefig('trace.png', format='png', dpi=300, bbox_inches='tight')\nplt.show()\n","sub_path":"trace/plot_trace_numa.py","file_name":"plot_trace_numa.py","file_ext":"py","file_size_in_byte":5177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"358546205","text":"#!.env/bin/python\n\nimport json\nimport datetime\n\n\nclass Logger:\n def __init__(self, config):\n self.log_level = self.__log_value(config.logging_severity)\n self.build_log = lambda message, severity: self.__build_log(message, config, severity) # noqa: E501\n self.masked_values = config.logging_masked_values\n\n def log(self, message, severity='TRACE'):\n if self.log_level >= self.__log_value(severity):\n log = self.build_log(message, severity)\n formatted_log = json.dumps(log)\n print(formatted_log)\n\n def __build_log(self, log, config, severity):\n new_log = {\n 'message': log,\n 'level': severity,\n 'current_time': str(datetime.datetime.now())\n }\n\n for key, value in config.__dict__.items():\n section, config_name = key.split('_', 1)\n\n if section not in new_log:\n new_log[section] = {}\n\n log_category = new_log[section]\n log_category[config_name] = self.__mask(config_name, value)\n\n return new_log\n\n def __mask(self, key, value):\n if key in self.masked_values:\n return '*' * 8\n else:\n return value\n\n def __log_value(self, level_string):\n levels = {\n 'OFF': -1,\n 'FATAL': 1,\n 'CRITICAL': 2,\n 'ERROR': 3,\n 'WARNING': 4,\n 'INFO': 5,\n 'DEBUG': 6,\n 'TRACE': 7,\n 'ALL': 99\n }\n\n return levels[level_string.upper()]\n","sub_path":"video_rss/logging/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"560681525","text":"from strategy import Strategy\n\nimport numpy as np\nimport ta\n\n\n# Profit per 15sec: 0.002728082958054552\nclass StrategyMacdCci(Strategy):\n def __init__(self):\n Strategy.__init__(\n self,\n use_stop_loss=False,\n use_trailing_stop=True,\n trailing_stop_percent=-3,\n stoploss_percent_value=-4,\n use_roi=True,\n )\n\n self.minimal_roi = {\n # cycles (15sec) : percent return\n \"240\": 0.02,\n \"120\": 0.1,\n \"60\": 0.15,\n \"0\": 0.2,\n }\n\n def generateIndicators(self, dataframe):\n print(\"generating indicators\")\n\n # Calculate MACD and signal lines\n bacd_params = (12, 26, 9)\n period_multiplier = 49 # 175 # 25 or 111\n\n exp1 = (\n dataframe[[\"average\"]]\n .ewm(span=bacd_params[0] * period_multiplier, adjust=False)\n .mean()\n )\n exp2 = (\n dataframe[[\"average\"]]\n .ewm(span=bacd_params[1] * period_multiplier, adjust=False)\n .mean()\n )\n macd = exp1 - exp2\n signal = macd.ewm(\n span=bacd_params[2] * round(1 + period_multiplier * 0.25),\n adjust=False,\n ).mean()\n\n dataframe[\"macd\"] = macd\n dataframe[\"signal\"] = signal\n\n dataframe[\"cci\"] = ta.trend.cci(dataframe.high, dataframe.low, dataframe.close)\n return dataframe\n\n def adviseSell(self, dataframe, i, bought_at_index):\n return (dataframe[\"macd\"].iat[i] < dataframe[\"signal\"].iat[i]) and (dataframe[\"cci\"].iat[i] >= 100.0*3)\n\n def adviseBuy(self, dataframe, i):\n return (dataframe[\"macd\"].iat[i] > dataframe[\"signal\"].iat[i]) and (dataframe[\"cci\"].iat[i] <= -50.0*3)\n","sub_path":"night_trader/strategies/strategy_macd_cci.py","file_name":"strategy_macd_cci.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"602813970","text":"from .. import rman_config\nfrom .. import rman_bl_nodes\nimport bpy\nimport json\nimport pprint\nimport os\n\ndef GetConfigurablePanels():\n '''Return the names of RenderForBlender panels that are configurable.\n\n Example:\n import RenderManForBlender.rfb_api as rfb_api\n rfb_api.GetConfigurablePanels()\n\n Returns:\n (dict)\n\n '''\n\n panels = dict()\n for config_name,cfg in rman_config.__RMAN_CONFIG__.items():\n for param_name, ndp in cfg.params.items():\n panel = getattr(ndp, 'panel', '')\n if panel == '':\n continue\n if panel not in panels:\n #panels.append(ndp.panel)\n cls = getattr(bpy.types, panel)\n panels[panel] = { 'bl_label': cls.bl_label }\n print(\"RenderMan Configurable Panels\")\n print(\"------------------------------\")\n for panel, props in panels.items():\n print(\"%s (%s)\" % (panel, props['bl_label']))\n print(\"------------------------------\\n\")\n return panels\n\ndef GetConfigurablePanelProperties(panel):\n '''Return all properties in a given panel that are configurable.\n\n Example:\n import RenderManForBlender.rfb_api as rfb_api\n rfb_api.GetConfigurablePanelProperties('RENDER_PT_renderman_sampling') \n\n Args:\n panel (str) - the name of the panel caller is interested in\n\n Returns:\n (dict)\n '''\n props = dict()\n for config_name,cfg in rman_config.__RMAN_CONFIG__.items():\n for param_name, ndp in cfg.params.items():\n if not hasattr(ndp, 'panel'):\n continue\n if ndp.panel == panel:\n label = ndp.name\n if hasattr(ndp, 'label'):\n label = ndp.label\n props[label] = ndp.name\n print(\"Configurable Properties (%s)\" % panel)\n print(\"------------------------------\")\n for label, prop in props.items():\n print(\"%s (%s)\" % (prop, label))\n print(\"------------------------------\\n\")\n return props\n\ndef GetPanelPropertyAsJson(panel, prop):\n '''Get a configurable panel property as JSON\n\n Example:\n import RenderManForBlender.rfb_api as rfb_api\n rfb_api.GetPanelPropertyAsJson('RENDER_PT_renderman_sampling', 'hider_maxSamples')\n\n Args:\n panel (str) - the name of the panel caller is interested in\n prop (str) - property name caller is interested in\n '''\n\n json_str = ''\n for config_name,cfg in rman_config.__RMAN_CONFIG__.items():\n for param_name, ndp in cfg.params.items():\n if not hasattr(ndp, 'panel'):\n continue\n if ndp.panel == panel and ndp.name == prop:\n json_str = json.dumps(ndp.as_dict())\n break\n return json_str\n\ndef GetSkeletonLocaleJson(jsonfile=None):\n '''Get a skeleton JSON locale file\n\n Example:\n import RenderManForBlender.rfb_api as rfb_api\n rfb_api.GetSkeletonLocaleJson()\n\n Args:\n jsonfile (str): path to a file to also write the JSON to\n\n '''\n\n from ..rman_bl_nodes import __RMAN_NODES__\n\n json_str = ''\n jdata = dict()\n jdata['locale'] = '[name of your locale]'\n translations = dict()\n for config_name,cfg in rman_config.__RMAN_CONFIG__.items():\n for param_name, ndp in cfg.params.items():\n label = ndp.name\n label = getattr(ndp, 'label', label)\n translations[label] = {\"context\": \"*\", \"translation\": \"\"} \n help = getattr(ndp, 'help', None)\n if help:\n translations[help] = {\"context\": \"*\", \"translation\": \"\"} \n\n for nm, nodes in __RMAN_NODES__.items():\n for node_desc in nodes:\n description = getattr(node_desc, 'help', None)\n if description:\n translations[help] = {\"context\": \"*\", \"translation\": \"\"} \n\n for ndp in node_desc.params:\n label = ndp.name\n label = getattr(ndp, 'label', label)\n translations[label] = {\"context\": \"*\", \"translation\": \"\"} \n help = getattr(ndp, 'help', None)\n if help:\n translations[help] = {\"context\": \"*\", \"translation\": \"\"} \n\n jdata['translations'] = translations\n json_str = json.dumps(jdata, indent=2)\n\n if jsonfile:\n with open(jsonfile, 'w') as f:\n json.dump(jdata, f, indent=2)\n\n return json_str \n \n","sub_path":"rfb_api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"256216068","text":"import cv2\nimport os\nimport argparse\nimport yaml\nimport sys\nsys.path.append(\"..\")\n\nfrom service.region_detector import region_detect_skimage\nfrom service.s3_connector import s3_connection, upload_image, download_image\nfrom reference_gan.solver import Solver \nfrom reference_gan.data_loader import get_loader\nfrom danboo.segment import segment\n\n\ns3 = s3_connection()\n\n\ndef paint_s3_image(reference_access_key, sketch_access_key, result_access_key):\n \n try:\n # download image from s3\n download_image(s3, './reference.png', reference_access_key)\n download_image(s3, './sketch.png', sketch_access_key)\n # do paint\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', type=str, default='../reference_gan/config.yml', help='specifies config yaml file')\n params = parser.parse_args()\n\n if os.path.exists(params.config):\n config = yaml.load(open(params.config, 'r'), Loader=yaml.FullLoader)\n solver = Solver(config, get_loader(config))\n print('test start')\n solver.test()\n \n else:\n print(\"Please check your config yaml file\")\n \n image = cv2.imread('../reference_gan/colorization_gan4/results/gan_image.jpg')\n #image = cv2.imread('danboo/gan_result.png')\n skeleton, region, flatten = segment(image)\n cv2.imwrite('./result.png', flatten)\n\n # upload result image to s3\n resultUrl = upload_image(s3, './result.png', result_access_key)\n return {\n \"resultUrl\": resultUrl,\n \"success\": True\n }\n\n except Exception as e:\n print(e)\n return {\n \"success\": False\n }\n\n","sub_path":"web_app/service/painter.py","file_name":"painter.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"102409360","text":"#!/usr/bin/python\n\nimport cv2\nimport sys\n\nimg = cv2.imread(\"map_big.png\")\n\ni = 0\ntry:\n while True:\n cv2.line(img, (0, 0), (50, 110), (255, 0, 0), 2)\n cv2.line(img, (50, 110), (200, 100), (255, 255, 0), 2)\n cv2.imshow(\"title\", img)\n k = cv2.waitKey(50)\n i += 1\n\n\nexcept KeyboardInterrupt:\n cv2.destroyAllWindows()\n sys.exit(0)\n","sub_path":"src/release/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"258650956","text":"from rest_framework import status\nfrom rest_framework.test import APITestCase, URLPatternsTestCase\nfrom rest_framework.test import APIRequestFactory\nfrom coin.api.CoinViewSet import CointViewSet\nfrom rest_framework.routers import DefaultRouter\nfrom django.urls import path, include\nfrom coin.model.CoinModel import CoinModel\n\n\nclass CointTests(APITestCase, URLPatternsTestCase):\n router = DefaultRouter()\n\n urlpatterns = [\n path('api/v1/', include(router.urls))\n ]\n\n coins = [\n {\n \"coin\": \"DOLLAR\",\n \"coin_initials\": \"USD\",\n \"amount_coint_bslt\": 1,\n \"price\": 1,\n \"country\": \"United State of America\",\n \"country_initials\": \"USA\",\n \"bslt\": \"USD\"\n },\n\n {\n \"coin\": \"REAL\",\n \"coin_initials\": \"BRL\",\n \"amount_coint_bslt\": 1,\n \"price\": 0.2029,\n \"country\": \"BRAZIL\",\n \"country_initials\": \"BR\",\n \"bslt\": \"USD\"\n },\n\n {\n \"coin\": \"EURO\",\n \"coin_initials\": \"EUR\",\n \"price\": 1.1925,\n \"amount_coint_bslt\": 1,\n \"country\": \"EUROPEN UNION\",\n \"country_initials\": \"EU\",\n \"bslt\": \"USD\"\n },\n\n {\n \"coin\": \"BITCOIN\",\n \"coin_initials\": \"BTC\",\n \"price\": 34713.90,\n \"amount_coint_bslt\": 1,\n \"country\": \"GLOBAL\",\n \"country_initials\": \"GB\",\n \"bslt\": \"USD\"\n },\n\n {\n \"coin\": \"ETHERIUN\",\n \"coin_initials\": \"ETH\",\n \"price\": 1991.50,\n \"amount_coint_bslt\": 1,\n \"country\": \"GLOBAL\",\n \"country_initials\": \"GB\",\n \"bslt\": \"USD\"\n },\n\n {\n \"id\": 6,\n \"price\": 0.1433,\n \"coin\": \"BOLIVIANO\",\n \"coin_initials\": \"BOB\",\n \"amount_coint_bslt\": 1,\n \"country\": \"BOLIVIA\",\n \"country_initials\": \"BL\",\n \"bslt\": \"USD\"\n }\n ]\n def setUp(self) -> None:\n self._factory = APIRequestFactory(enforce_csrf_checks=False)\n self._data = self.coins\n\n def test_create_coin(self):\n view = CointViewSet.as_view({'post': 'create'})\n request = self._factory.post('coin/create/', self._data)\n response = view(request)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def teste_update_coin(self):\n self.test_create_coin()\n coin = CoinModel.objects.all()\n factory = APIRequestFactory()\n view = CointViewSet.as_view({'put': 'update_all_coin'})\n request = factory.put('coin/update_coin/', self._data)\n response = view(request)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_list(self):\n view = CointViewSet.as_view({'get': 'list'})\n request = self._factory.get('coin/list/?page=1&size=20')\n response = view(request)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_delete(self):\n self.test_create_coin()\n coin = CoinModel.objects.all()\n view = CointViewSet.as_view({'delete': 'delete'})\n request = self._factory.delete('coin/delete/', self._data)\n response = view(request, coin.first().id)\n self.assertEqual(response.status_code, status.HTTP_200_OK)","sub_path":"coin/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"413358730","text":"import io\nimport json\nimport os\nimport shutil\nimport subprocess\nimport tempfile\nimport unittest\nfrom contextlib import contextmanager\n\nfrom flask import Flask\nfrom nose.tools import nottest\nfrom pyflakes import api\nfrom pyflakes.reporter import Reporter\nfrom slimit import ast\nfrom viceroy.api import build_test_case\nfrom viceroy.constants import VICEROY_STATIC_ROOT\nfrom viceroy.contrib.flask import ViceroyFlaskTestCase\nfrom viceroy.contrib.qunit import QUnitScanner\n\nimport gettextjs\n\n\nGETTEXT_PY_FILE = os.path.abspath(gettextjs.__file__)\nBASE_DIR = os.path.dirname(GETTEXT_PY_FILE)\nJSHINT_RC = os.path.join(\n BASE_DIR,\n '..',\n '..',\n '.jshintrc'\n)\nGETTEXT_JS_FILE = os.path.join(\n BASE_DIR,\n '..',\n 'js',\n 'gettext.js',\n)\nGETTEXT_JS_COMPILED_FILE = os.path.join(\n BASE_DIR,\n '..',\n '..',\n 'dist',\n 'gettext.js',\n)\nTHIS_FILE = os.path.abspath(__file__)\nTHIS_DIR = os.path.dirname(THIS_FILE)\nTESTS_JS_FILE = os.path.join(THIS_DIR, 'tests.js')\nTESTDATA_DIR = os.path.join(\n THIS_DIR,\n 'data',\n)\nLOCALE_PATH = os.path.join(\n TESTDATA_DIR,\n 'locale',\n)\n\n\nclass FixedQUnitScanner(QUnitScanner):\n def visit_FunctionCall(self, node):\n if isinstance(node.identifier, ast.DotAccessor):\n if isinstance(node.identifier.node, ast.Identifier):\n if node.identifier.node.value == 'QUnit':\n if node.identifier.identifier.value in ['test', 'asyncTest']:\n yield self.extract_name(node.args[0])\n\n\n@contextmanager\ndef tempdir():\n dirname = tempfile.mkdtemp()\n try:\n yield dirname\n finally:\n shutil.rmtree(dirname)\n\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return \"\"\"\n \n \n \n \n \n \n \n \n \n \n
\n
\n \n \n\"\"\"\n\n\n@app.route('/gettext.js')\ndef static_gettext():\n with open(GETTEXT_JS_COMPILED_FILE) as fobj:\n return fobj.read()\n\n\n@app.route('/tests.js')\ndef static_tests():\n with open(TESTS_JS_FILE) as fobj:\n return fobj.read()\n\n\n@app.route('/data/')\ndef static_data(filename):\n with open(os.path.join(TESTDATA_DIR, filename)) as fobj:\n return fobj.read()\n\n\n@app.route('/viceroy/')\ndef static_viceroy(filename):\n with open(os.path.join(VICEROY_STATIC_ROOT, filename)) as fobj:\n return fobj.read()\n\n\n@app.route('/locale//LC_MESSAGES/')\ndef message_catalog(locale, filename):\n path = os.path.join(app.locale_dir, locale, 'LC_MESSAGES', filename)\n with open(path) as fobj:\n return fobj.read()\n\n\nclass IntegrationTests(unittest.TestCase):\n def test_compile_to_json(self):\n with tempdir() as workspace:\n gettextjs.cli([LOCALE_PATH, workspace])\n en_json_path = os.path.join(\n workspace,\n 'en',\n 'LC_MESSAGES',\n 'messages.mo.json'\n )\n with open(en_json_path) as fobj:\n en_data = json.load(fobj)\n self.assertIn('plural', en_data)\n self.assertEqual(en_data['plural'], '(n != 1)')\n self.assertIn('catalog', en_data)\n self.assertEqual(en_data['catalog'], {\n 'simple-string': 'A simple string',\n 'singular-string': [\n 'Singular form!',\n 'Plural form!'\n ]\n })\n\n ja_json_path = os.path.join(\n workspace,\n 'ja',\n 'LC_MESSAGES',\n 'messages.mo.json'\n )\n with open(ja_json_path) as fobj:\n ja_data = json.load(fobj)\n self.assertIn('plural', ja_data)\n self.assertEqual(ja_data['plural'], None)\n self.assertIn('catalog', ja_data)\n self.assertEqual(ja_data['catalog'], {\n 'simple-string': '簡単なストリング',\n 'singular-string': '日本語には複数形がありません。'\n })\n\n def test_compile_to_js(self):\n with tempdir() as workspace:\n gettextjs.cli(['--js', LOCALE_PATH, workspace])\n en_js_path = os.path.join(\n workspace,\n 'en',\n 'LC_MESSAGES',\n 'messages.mo.js'\n )\n with open(en_js_path) as fobj:\n en_content = fobj.read()\n\n self.assertTrue(en_content.startswith('var EN_MESSAGES = '))\n self.assertTrue(en_content.endswith(';'))\n en_data = json.loads(en_content[len('var EN_MESSAGES = '):-1])\n self.assertIn('plural', en_data)\n self.assertEqual(en_data['plural'], '(n != 1)')\n self.assertIn('catalog', en_data)\n self.assertEqual(en_data['catalog'], {\n 'simple-string': 'A simple string',\n 'singular-string': [\n 'Singular form!',\n 'Plural form!'\n ]\n })\n\n ja_js_path = os.path.join(\n workspace,\n 'ja',\n 'LC_MESSAGES',\n 'messages.mo.js'\n )\n with open(ja_js_path) as fobj:\n ja_content = fobj.read()\n self.assertTrue(ja_content.startswith('var JA_MESSAGES = '))\n self.assertTrue(ja_content.endswith(';'))\n ja_data = json.loads(ja_content[len('var JA_MESSAGES = '):-1])\n self.assertIn('plural', ja_data)\n self.assertEqual(ja_data['plural'], None)\n self.assertIn('catalog', ja_data)\n self.assertEqual(ja_data['catalog'], {\n 'simple-string': '簡単なストリング',\n 'singular-string': '日本語には複数形がありません。'\n })\n\n\nclass CodeQualityTests(unittest.TestCase):\n def test_pyflakes(self):\n files = [\n GETTEXT_PY_FILE,\n THIS_FILE,\n ]\n out = io.StringIO()\n reporter = Reporter(out, out)\n errors = sum(map(lambda f: api.checkPath(f, reporter), files))\n self.assertEqual(errors, 0, '\\n' + out.getvalue())\n\n @unittest.skipIf(not shutil.which('jshint'), \"jshint not installed\")\n def test_jshint(self):\n files = [\n GETTEXT_JS_FILE,\n TESTS_JS_FILE\n ]\n process = subprocess.Popen(\n ['jshint', '-c', JSHINT_RC] + files,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n messages, _ = process.communicate(timeout=5)\n self.assertEqual(process.returncode, 0, '\\n' + messages.decode('utf-8'))\n\n\nclass JSTestsBase(ViceroyFlaskTestCase):\n viceroy_flask_app = app\n\n @classmethod\n def setUpClass(cls):\n app.locale_dir = tempfile.mkdtemp()\n gettextjs.compile_locale_path(\n LOCALE_PATH,\n app.locale_dir,\n gettextjs.JS_MODE\n )\n gettextjs.compile_locale_path(\n LOCALE_PATH,\n app.locale_dir,\n gettextjs.JSON_MODE\n )\n super().setUpClass()\n\n @classmethod\n def tearDownClass(cls):\n shutil.rmtree(app.locale_dir)\n super().tearDownClass()\n\n\nJSTests = nottest(build_test_case)(\n 'ViceroySuccessTests',\n TESTS_JS_FILE,\n FixedQUnitScanner,\n JSTestsBase\n)\n","sub_path":"src/py/tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":7886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"568374433","text":"import dynet\nimport numpy\n\n\nclass NeuralNetwork(object):\n def __init__(self, embeded_vector_size, window_size, input_layer_size, hidden_layer_size, output_layer_size,\n learning_rate, embedding_vectors=None):\n self.parameter_collection = dynet.ParameterCollection()\n if embedding_vectors is None:\n self.E = self.parameter_collection.add_lookup_parameters((input_layer_size, embeded_vector_size))\n else:\n self.E = self.parameter_collection.add_lookup_parameters((input_layer_size, embeded_vector_size), init=embedding_vectors)\n self.W1 = self.parameter_collection.add_parameters((hidden_layer_size, embeded_vector_size * (window_size * 2 + 1)))\n self.b1 = self.parameter_collection.add_parameters(hidden_layer_size)\n self.W2 = self.parameter_collection.add_parameters((output_layer_size, hidden_layer_size))\n self.b2 = self.parameter_collection.add_parameters(output_layer_size)\n\n self.trainer = dynet.SimpleSGDTrainer(self.parameter_collection, learning_rate)\n\n def train(self, data, label):\n dynet.renew_cg()\n\n output = self._get_output_expression(data)\n loss = -dynet.log(dynet.pick(output, label))\n\n loss.value()\n loss.backward()\n self.trainer.update()\n\n def _get_output_expression(self, data):\n input_vec = dynet.concatenate([self.E[j] for j in data])\n output = dynet.softmax(self.W2 * dynet.tanh(self.W1 * input_vec + self.b1) + self.b2)\n return output\n\n def classify(self, data):\n dynet.renew_cg()\n return numpy.array(self._get_output_expression(data).value()).argmax()\n\n def get_loss_and_output(self, data, label):\n dynet.renew_cg()\n output = self._get_output_expression(data)\n loss = -dynet.log(dynet.pick(output, label))\n return loss.value(), numpy.array(output.value()).argmax()","sub_path":"neural_network.py","file_name":"neural_network.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"255161318","text":"import numpy as np\r\nimport cv2\r\nimport time\r\nimport os\r\nfrom socket import *\r\n\r\ndata=0\r\nhost = \"192.168.43.105\"\r\nport = 13000\r\nbuf = 1024\r\naddr = (host, port)\r\nUDPSock = socket(AF_INET, SOCK_DGRAM)\r\nUDPSock.bind(addr)\r\nwhile 1:\r\n print(\"Waiting to receive messages...\")\r\n (data, addr) = UDPSock.recvfrom(buf)\r\n#print( \"Received message: \" + data)\r\n\r\n if(data==b'1'):\r\n for a in range(1,11):\r\n img=cv2.imread('DbNew/A_'+str(a)+'.jpg')\r\n img=cv2.resize(img,(1350,710))\r\n cv2.imshow('pic '+str(a),img)\r\n cv2.namedWindow('pic '+str(a))\r\n cv2.moveWindow('pic '+str(a),0,0)\r\n k = cv2.waitKey(2000) & 0xff\r\n cv2.destroyAllWindows()\r\n if(k==ord('q')):\r\n break\r\n if(data==b'2'):\r\n\r\n for a in range(1,11):\r\n img=cv2.imread('DbNew/B_'+str(a)+'.jpg')\r\n img=cv2.resize(img,(1350,710))\r\n cv2.imshow('pic '+str(a),img)\r\n cv2.namedWindow('pic '+str(a))\r\n cv2.moveWindow('pic '+str(a),0,0)\r\n k = cv2.waitKey(2000) & 0xff\r\n cv2.destroyAllWindows()\r\n if(k==ord('q')):\r\n break\r\n if(k==ord('q')):\r\n break\r\n","sub_path":"PicShow.py","file_name":"PicShow.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"394027129","text":"'''\nTrain supervised object matching task; use dataset downstream for now. This script creates\npairs that are half the same object paired with itself (w/random rigid body transform) and half \nobject paired with different object and then trains a CNN to determine whether the objects\nare the same object or not.\n'''\n\nimport numpy as np\nimport argparse\nimport os\nimport matplotlib.pyplot as plt\nimport itertools\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport pickle\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom random import shuffle\n\nfrom autolab_core import YamlConfig, RigidTransform\nfrom unsupervised_rbt import TensorDataset\nfrom unsupervised_rbt.models import ResNetSiameseNetwork\nfrom perception import DepthImage, RgbdImage\n\ndef generate_data(dataset, num_pairs = 10000):\n \"\"\"\n Generates a pair of depth images. Half will be from the same object, half will be from different objects.\n Chooses a random orientation from 20 different orientations. Binary Labels for if same object or not.\n \"\"\"\n im1s, im2s, labels = [], [], []\n for _ in range(num_pairs):\n dp1_idx = np.random.randint(dataset.num_datapoints)\n dp2_idx, label = dp1_idx, 1 # same object\n \n im1_idx = np.random.randint(20)\n im2_idx = np.random.randint(20)\n \n im1s.append(255 * dataset[dp1_idx]['depth_images'][im1_idx])\n\n if np.random.random() < 0.5: # Makes half of the training data to be different objects\n while dp2_idx == dp1_idx:\n dp2_idx = np.random.randint(dataset.num_datapoints)\n label = 0\n\n im2s.append(255 * dataset[dp2_idx]['depth_images'][im2_idx])\n labels.append(label)\n im1s, im2s, labels = np.array(im1s), np.array(im2s), np.array(labels)\n return np.expand_dims(im1s, 1), np.expand_dims(im2s, 1), labels\n\ndef train(im1s, im2s, labels, batch_size):\n \"\"\"\n Train the model specified in main, then return the loss and classification accuracy on\n 80% of the training data. Uses tqdm to visualize progress.\n \"\"\"\n model.train() \n train_loss, correct, total = 0, 0, 0\n \n N_train = int(0.8 * im1s.shape[0])\n n_train_steps = N_train//batch_size\n for step in tqdm(range(n_train_steps)):\n im1_batch = Variable(torch.from_numpy(im1s[step*batch_size : (step+1)*batch_size]).float()).to(device)\n im2_batch = Variable(torch.from_numpy(im2s[step*batch_size : (step+1)*batch_size]).float()).to(device)\n label_batch = Variable(torch.from_numpy(labels[step*batch_size : (step+1)*batch_size]).float()).to(device)\n\n # for i in range(batch_size):\n # title = 'Same Object' if labels[step*batch_size + i] else 'Different Object'\n # plt.title(title)\n # plt.subplot(121)\n # depth_image_show1 = im1s[step*batch_size + i][0]\n # plt.axis('off')\n # plt.imshow(depth_image_show1, cmap='gray')\n # plt.subplot(122)\n # depth_image_show2 = im2s[step*batch_size + i][0]\n # plt.axis('off')\n # plt.imshow(depth_image_show2, cmap='gray')\n # plt.show()\n \n optimizer.zero_grad()\n prob = model(im1_batch, im2_batch)\n loss = criterion(prob, label_batch.long())\n _, predicted = torch.max(prob, 1)\n# output1, output2 = model(im1_batch, im2_batch)\n# loss = criterion(output1, output2, label_batch)\n \n# predicted = (prob > 0.5).float().flatten()\n# correct += (predicted == label_batch).sum().item()\n# total += label_batch.size(0)\n\n correct += (predicted == label_batch.long()).sum().item()\n total += label_batch.size(0)\n \n loss.backward()\n train_loss += loss.item()\n optimizer.step()\n \n class_acc = 100 * correct/total\n return train_loss/n_train_steps, class_acc\n\ndef test(im1s, im2s, labels, batch_size):\n model.eval()\n test_loss, correct, total = 0, 0, 0\n\n N_test = int(0.2 * im1s.shape[0])\n N_train = int(0.8 * im1s.shape[0])\n n_test_steps = N_test // batch_size\n im1s, im2s, labels = im1s[N_train:], im2s[N_train:], labels[N_train:]\n with torch.no_grad():\n for step in tqdm(range(n_test_steps)):\n im1_batch = Variable(torch.from_numpy(im1s[step*batch_size : (step+1)*batch_size]).float()).to(device)\n im2_batch = Variable(torch.from_numpy(im2s[step*batch_size : (step+1)*batch_size]).float()).to(device)\n label_batch = Variable(torch.from_numpy(labels[step*batch_size : (step+1)*batch_size]).float()).to(device)\n \n optimizer.zero_grad()\n prob = model(im1_batch, im2_batch)\n loss = criterion(prob, label_batch.long())\n _, predicted = torch.max(prob, 1)\n# output1, output2 = model(im1_batch, im2_batch)\n# loss = criterion(output1, output2, label_batch)\n# print(\"LABELS\")\n# print(label_batch)\n# print(\"PREDICTED\")\n# print(prob)\n# predicted = (prob > 0.5).float().flatten()\n# correct += (predicted == label_batch).sum().item()\n# total += label_batch.size(0)\n correct += (predicted == label_batch.long()).sum().item()\n total += label_batch.size(0)\n \n test_loss += loss.item()\n \n class_acc = 100 * correct/total\n# class_acc = 0\n return test_loss/n_test_steps, class_acc\n\ndef display_conv_layers(model):\n def imshow(img):\n img = img / 2 + 0.5 # unnormalize\n npimg = img.cpu().numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.show()\n with torch.no_grad():\n imshow(torchvision.utils.make_grid(model.resnet.conv1.weight))\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--test', action='store_true')\n default_config_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n '..',\n 'cfg/tools/sup_obj_matching.yaml')\n parser.add_argument('-config', type=str, default=default_config_filename)\n parser.add_argument('-dataset', type=str, required=True)\n args = parser.parse_args()\n args.dataset = os.path.join('/nfs/diskstation/projects/unsupervised_rbt', args.dataset)\n return args\n\nif __name__ == '__main__':\n args = parse_args()\n config = YamlConfig(args.config)\n\n if not args.test:\n dataset = TensorDataset.open(args.dataset)\n im1s, im2s, labels = generate_data(dataset)\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = ResNetSiameseNetwork(config['pred_dim']).to(device)\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters())\n \n if not os.path.exists(args.dataset + \"/splits/train\"):\n print(\"Created Train Split\")\n dataset.make_split(\"train\", train_pct=0.8)\n\n train_losses, test_losses, train_accs, test_accs = [], [], [], []\n for epoch in range(config['num_epochs']):\n train_loss, train_acc = train(im1s, im2s, labels, config['batch_size'])\n test_loss, test_acc = test(im1s, im2s, labels, config['batch_size'])\n train_losses.append(train_loss)\n test_losses.append(test_loss)\n train_accs.append(train_acc)\n test_accs.append(test_acc)\n print(\"Epoch %d, Train Loss = %f, Train Acc = %.2f %%, Test Loss = %f, Test Acc = %.2f %%\" % (epoch, train_loss, train_acc, test_loss, test_acc))\n pickle.dump({\"train_loss\" : train_losses, \"train_acc\" : train_accs, \"test_loss\" : test_losses, \"test_acc\" : test_accs}, open( config['losses_f_name'], \"wb\"))\n torch.save(model.state_dict(), config['model_save_dir'])\n \n else:\n# model = ResNetDownstreamSiameseNetwork(config['pred_dim'])\n\n losses = pickle.load( open( config['losses_f_name'], \"rb\" ) )\n train_returns = np.array(losses[\"train_loss\"])\n test_returns = np.array(losses[\"test_loss\"])\n train_accs = np.array(losses[\"train_acc\"])\n test_accs = np.array(losses[\"test_acc\"])\n \n plt.plot(np.arange(len(train_returns)) + 1, train_returns, label=\"Training Loss\")\n plt.plot(np.arange(len(test_returns)) + 1, test_returns, label=\"Testing Loss\")\n plt.xlabel(\"Training Iteration\")\n plt.ylabel(\"Loss\")\n plt.title(\"Training Curve\")\n plt.legend(loc='best')\n plt.savefig(config['losses_plot_f_name'])\n plt.close()\n \n plt.plot(np.arange(len(train_accs)) + 1, train_accs, label=\"Training Acc\")\n plt.plot(np.arange(len(test_accs)) + 1, test_accs, label=\"Testing Acc\")\n plt.xlabel(\"Training Iteration\")\n plt.ylabel(\"Classification Accuracy\")\n plt.title(\"Training Curve\")\n plt.legend(loc='best')\n plt.savefig(config['accs_plot_f_name'])\n plt.close()\n","sub_path":"tools/CASE/old/sup_obj_matching.py","file_name":"sup_obj_matching.py","file_ext":"py","file_size_in_byte":9084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"537080128","text":"import requests\nimport shutil\nimport os\nimport re\nfrom fpdf import FPDF\n\n\nprint('[*] What chapter to crawl?')\nchapter = input()\npath = 'https://rawkuma.com/one-piece-chapter-{}/'.format(chapter)\npdf = FPDF()\n\nraw = requests.get(path).text\n\nall_image_urls = re.findall(' 0:\n print(\"Verbose mode on\")\n\n # Install the signal handler to catch SIGTERM\n signal.signal(signal.SIGTERM, sigterm)\n\n for dbClient in args.clients:\n workContext = WorkContext(dbClient)\n doWork(workContext, verbose)\n for label in sorted(labelCounts.keys()):\n count = labelCounts[label]\n print(\"%s\\t%s\" % (label, count))\n return 0\n \n except KeyboardInterrupt:\n ### handle keyboard interrupt ###\n return 0\n except Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print(''.join(traceback.format_exception(exc_type, exc_value, exc_traceback)))\n if not(DEBUG or TESTRUN):\n indent = len(program_name) * \" \"\n sys.stderr.write(program_name + \": \" + repr(e) + \"\\n\")\n sys.stderr.write(indent + \" for help use --help\")\n return 2\n\nif __name__ == \"__main__\":\n if DEBUG:\n sys.argv.append(\"-v\")\n if TESTRUN:\n import doctest\n doctest.testmod()\n if PROFILE:\n import cProfile\n import pstats\n profile_filename = 'countlabels.countlabels_profile.txt'\n cProfile.run('main()', profile_filename)\n statsfile = open(\"profile_stats.txt\", \"wb\")\n p = pstats.Stats(profile_filename, stream=statsfile)\n stats = p.strip_dirs().sort_stats('cumulative')\n stats.print_stats()\n statsfile.close()\n sys.exit(0)\n main()\n","sub_path":"src/countlabels/countlabels.py","file_name":"countlabels.py","file_ext":"py","file_size_in_byte":4780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"87985786","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef weights_init(w):\n \"\"\"\n Initializes the weights of the layer, w.\n \"\"\"\n classname = w.__class__.__name__\n if classname.find('conv') != -1:\n nn.init.normal_(w.weight.data, 0.0, 0.02)\n elif classname.find('bn') != -1:\n nn.init.normal_(w.weight.data, 1.0, 0.02)\n nn.init.constant_(w.bias.data, 0)\n\n\n# Define the Generator Network\nclass Generator(nn.Module):\n def __init__(self, params):\n super().__init__()\n self.layers_list = nn.ModuleList()\n input_dim = params['nz']\n net_structure = params['net_structure']\n activations = activations_to_torch(net_structure['activations'])\n\n ch_in = input_dim\n for i, cnn_dim in enumerate(net_structure['dim']):\n ch_out, kernel_size, stride, padding = cnn_dim\n conv_layer = nn.ConvTranspose2d(ch_in, ch_out, kernel_size, stride, padding=padding, bias=False)\n self.layers_list.append(conv_layer)\n bn_layer = nn.BatchNorm2d(ch_out)\n self.layers_list.append(bn_layer)\n activation = activations[0] if i < len(net_structure['dim']) - 1 else activations[1]\n self.layers_list.append(activation)\n ch_in = ch_out\n\n def forward(self, x):\n a = x\n for layer in self.layers_list:\n a = layer(a)\n return a\n\n\n# Define the Discriminator Network\nclass Discriminator(nn.Module):\n def __init__(self, params):\n super().__init__()\n\n # Input Dimension: (nc) x 64 x 64\n self.conv1 = nn.Conv2d(params['nc'], params['ndf'],\n 4, 2, 1, bias=False)\n\n # Input Dimension: (ndf) x 32 x 32\n self.conv2 = nn.Conv2d(params['ndf'], params['ndf']*2,\n 4, 2, 1, bias=False)\n self.bn2 = nn.BatchNorm2d(params['ndf']*2)\n\n # Input Dimension: (ndf*2) x 16 x 16\n self.conv3 = nn.Conv2d(params['ndf']*2, params['ndf']*4,\n 4, 2, 1, bias=False)\n self.bn3 = nn.BatchNorm2d(params['ndf']*4)\n\n # Input Dimension: (ndf*4) x 8 x 8\n self.conv4 = nn.Conv2d(params['ndf']*4, params['ndf']*8,\n 4, 2, 1, bias=False)\n self.bn4 = nn.BatchNorm2d(params['ndf']*8)\n\n # Input Dimension: (ndf*8) x 4 x 4\n self.conv5 = nn.Conv2d(params['ndf']*8, 1, 4, 1, 0, bias=False)\n\n def forward(self, x):\n x = F.leaky_relu(self.conv1(x), 0.2, True)\n x = F.leaky_relu(self.bn2(self.conv2(x)), 0.2, True)\n x = F.leaky_relu(self.bn3(self.conv3(x)), 0.2, True)\n x = F.leaky_relu(self.bn4(self.conv4(x)), 0.2, True)\n\n x = F.sigmoid(self.conv5(x))\n\n return x\n\n\ndef activations_to_torch(activations):\n for i, i_act in enumerate(activations):\n if i_act[0] == 'tanh':\n activations[i] = nn.Tanh()\n elif i_act[0] == 'sigmoid':\n activations[i] = nn.Sigmoid()\n elif i_act[0] == 'relu':\n activations[i] = nn.ReLU()\n elif i_act[0] == 'lrelu':\n activations[i] = nn.LeakyReLU(negative_slope=i_act[1])\n elif i_act[0] is None:\n pass\n else:\n raise SystemExit('Error: Unknown activation function \\'{0}\\''.format(i_act[0]))\n return activations\n","sub_path":"dcgan.py","file_name":"dcgan.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"543506335","text":"import tensorflow.keras\r\nfrom tensorflow.keras.layers import Input, Dense, Reshape, Dropout\r\nfrom tensorflow.keras.models import Model\r\nimport numpy as np\r\nimport os\r\nimport tensorflow.keras.backend as K\r\n\r\n\r\nclass EmbeddingAutoEncoder:\r\n def __init__(self, input_set, D, weights_path, encoding_dim=3, batch_size=50, emb_alpha=0.1):\r\n self.encoding_dim = encoding_dim\r\n self.batch_size = batch_size\r\n self.x = input_set\r\n self.input_shape = len(input_set[0])\r\n self.D = D\r\n self.weights_path = weights_path\r\n # embedding\r\n self.emb_alpha = emb_alpha\r\n print(self.x)\r\n\r\n def _encoder(self):\r\n inputs = Input(shape=self.x[0].shape)\r\n print(self.x[0].shape)\r\n encoded1 = Dense(300, activation='elu')(inputs)\r\n dropout1 = Dropout(0.1)(encoded1)\r\n encoded2 = Dense(100, activation='elu')(dropout1)\r\n dropout2 = Dropout(0.1)(encoded2)\r\n encoded3 = Dense(self.encoding_dim, activation='elu')(dropout2)\r\n model = Model(inputs, encoded3)\r\n self.encoder = model\r\n print(model.summary())\r\n return model\r\n\r\n def _decoder(self):\r\n inputs = Input(shape=(self.encoding_dim,))\r\n decoded1 = Dense(100, activation='elu')(inputs)\r\n dropout1 = Dropout(0.1)(decoded1)\r\n decoded2 = Dense(300, activation='elu')(dropout1)\r\n dropout2 = Dropout(0.1)(decoded2)\r\n decoded3 = Dense(self.input_shape, activation='elu')(dropout2)\r\n reshape = Reshape((int(self.input_shape / 21), 21))(decoded3)\r\n decoded3 = Dense(21, activation='softmax')(reshape)\r\n reshape2 = Reshape(self.x[0].shape)(decoded3)\r\n model = Model(inputs, reshape2)\r\n print(model.summary())\r\n\r\n self.decoder = model\r\n return model\r\n\r\n def encoder_decoder(self):\r\n ec = self._encoder()\r\n dc = self._decoder()\r\n inputs = Input(shape=self.x[0].shape)\r\n ec_out = ec(inputs)\r\n dc_out = dc(ec_out)\r\n # create the model using our input (cdr3 sequences set) and\r\n # two separate outputs -- one for the reconstruction of the\r\n # data and another for the representations, respectively\r\n model = Model(inputs=inputs, outputs=[dc_out, ec_out])\r\n self.model = model\r\n return model\r\n\r\n # 1. y_true 2. y_pred\r\n def vae_loss(self, D, ec_out):\r\n emb = 0\r\n for i in range(self.batch_size):\r\n for j in range(self.batch_size):\r\n if i < j:\r\n # norm 2\r\n dis_z = K.sqrt(K.sum(K.square(ec_out[i] - ec_out[j])))\r\n emb += K.square(dis_z - D[i][j])\r\n emb = self.emb_alpha * emb\r\n return emb\r\n\r\n def generator(self, X, D, batch):\r\n # generate: A. batches of x B. distances matrix within batch\r\n while True:\r\n inds = []\r\n for i in range(batch):\r\n # choose random index in features\r\n index = np.random.choice(X.shape[0], 1)[0]\r\n if i == 0:\r\n batch_X = np.array([X[index]])\r\n else:\r\n batch_X = np.concatenate((batch_X, np.array([X[index]])), axis=0)\r\n inds.append(index)\r\n tmp = D[np.array(inds)]\r\n batch_D = tmp[:, np.array(inds)]\r\n # 1. training data-features 2. target data-labels\r\n yield batch_X, [batch_X, batch_D]\r\n\r\n def fit_generator(self, epochs=300):\r\n # self.model = multi_gpu_model(self.model, gpus=3)\r\n adam = tensorflow.keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)\r\n self.model.compile(optimizer=adam, loss=['mse', self.vae_loss])\r\n results = self.model.fit_generator(self.generator(self.x, self.D, self.batch_size),\r\n steps_per_epoch=self.x.shape[0] / self.batch_size,\r\n epochs=epochs, verbose=2)\r\n return results\r\n\r\n def fit_autoencoder(self, epochs=300):\r\n # self.model = multi_gpu_model(self.model, gpus=3)\r\n adam = tensorflow.keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)\r\n self.model.compile(optimizer=adam, loss=['mse', self.vae_loss], metrics=['mae'])\r\n log_dir = './log/'\r\n tb_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=0,\r\n write_graph=True, write_images=True)\r\n es_callback = tensorflow.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0,\r\n patience=20, verbose=0, mode='auto')\r\n self.model.fit(x=self.x, y=[self.x, self.D], validation_split=0.2, verbose=2,\r\n epochs=epochs, batch_size=self.batch_size,\r\n callbacks=[tb_callback, es_callback])\r\n\r\n def save_ae(self):\r\n if not os.path.exists(r'./weights_' + self.weights_path):\r\n os.mkdir(r'./weights_' + self.weights_path)\r\n self.encoder.save(r'./weights_' + self.weights_path + '/embedding_encoder_weights.h5')\r\n self.decoder.save(r'./weights_' + self.weights_path + '/embedding_decoder_weights.h5')\r\n self.model.save(r'./weights_' + self.weights_path + '/embedding_ae_weights.h5')\r\n","sub_path":"embedding_auto_encoder.py","file_name":"embedding_auto_encoder.py","file_ext":"py","file_size_in_byte":5359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"438837902","text":"import getpass\nimport os\nfrom pathlib import Path, PurePosixPath\nimport secrets\nimport urllib.parse\n\nimport appdirs\nimport httpx\nimport msgpack\n\nfrom .utils import (\n ASYNC_EVENT_HOOKS,\n DEFAULT_ACCEPTED_ENCODINGS,\n EVENT_HOOKS,\n handle_error,\n NotAvailableOffline,\n UNSET,\n)\n\n\nDEFAULT_TOKEN_CACHE = os.getenv(\n \"TILED_TOKEN_CACHE\", os.path.join(appdirs.user_config_dir(\"tiled\"), \"tokens\")\n)\n\n\ndef _token_directory(token_cache, netloc, username):\n return Path(\n token_cache,\n urllib.parse.quote_plus(\n netloc.decode()\n ), # Make a valid filename out of hostname:port.\n username,\n )\n\n\ndef login(\n uri_or_profile,\n username=None,\n authentication_uri=None,\n verify=True,\n *,\n token_cache=DEFAULT_TOKEN_CACHE,\n):\n context = _context_from_uri_or_profile(\n uri_or_profile, username, authentication_uri, token_cache, verify\n )\n # This has a side effect of storing the refresh token in the token_cache, if set.\n return context.authenticate()\n\n\ndef _context_from_uri_or_profile(\n uri_or_profile,\n username,\n authentication_uri,\n token_cache,\n verify,\n headers=None,\n):\n headers = headers or {}\n headers.setdefault(\"accept-encoding\", \",\".join(DEFAULT_ACCEPTED_ENCODINGS))\n if uri_or_profile.startswith(\"http://\") or uri_or_profile.startswith(\"https://\"):\n # This looks like a URI.\n uri = uri_or_profile\n client = httpx.Client(\n base_url=uri,\n verify=verify,\n event_hooks=EVENT_HOOKS,\n headers=headers,\n timeout=httpx.Timeout(5.0, read=20.0),\n )\n context = Context(\n client,\n username=username,\n authentication_uri=authentication_uri,\n token_cache=token_cache,\n )\n else:\n from ..profiles import load_profiles\n\n # Is this a profile name?\n profiles = load_profiles()\n if uri_or_profile in profiles:\n profile_name = uri_or_profile\n filepath, profile_content = profiles[profile_name]\n if \"uri\" in profile_content:\n uri = profile_content[\"uri\"]\n verify = profile_content.get(\"verify\", True)\n headers.update(profile_content.get(\"headers\", {}))\n client = httpx.Client(\n base_url=uri,\n verify=verify,\n event_hooks=EVENT_HOOKS,\n headers=headers,\n timeout=httpx.Timeout(5.0, read=20.0),\n )\n context = Context(\n client,\n username=profile_content.get(\"username\"),\n authentication_uri=profile_content.get(\"authentication_uri\"),\n cache=profile_content.get(\"cache\"),\n offline=profile_content.get(\"offline\", False),\n token_cache=profile_content.get(\"token_cache\", DEFAULT_TOKEN_CACHE),\n )\n elif \"direct\" in profile_content:\n # The profiles specifies that there is no server. We should create\n # an app ourselves and use it directly via ASGI.\n from ..config import construct_serve_tree_kwargs\n\n serve_tree_kwargs = construct_serve_tree_kwargs(\n profile_content.pop(\"direct\", None), source_filepath=filepath\n )\n context = context_from_tree(**serve_tree_kwargs, **profile_content)\n else:\n raise ValueError(\"Invalid profile content\")\n else:\n raise TreeValueError(\n f\"Not sure what to do with tree {uri_or_profile!r}. \"\n \"It does not look like a URI (it does not start with http[s]://) \"\n \"and it does not match any profiles.\"\n )\n return context\n\n\nclass TreeValueError(ValueError):\n pass\n\n\nclass CannotRefreshAuthentication(Exception):\n pass\n\n\nclass Context:\n \"\"\"\n Wrap an httpx.Client with an optional cache and authentication functionality.\n \"\"\"\n\n def __init__(\n self,\n client,\n authentication_uri=None,\n username=None,\n cache=None,\n offline=False,\n token_cache=DEFAULT_TOKEN_CACHE,\n app=None,\n ):\n authentication_uri = authentication_uri or \"/\"\n if not authentication_uri.endswith(\"/\"):\n authentication_uri += \"/\"\n self._client = client\n self._authentication_uri = authentication_uri\n self._cache = cache\n self._username = username\n self._offline = offline\n if (username is not None) and isinstance(token_cache, (str, Path)):\n directory = _token_directory(\n token_cache, self._client.base_url.netloc, username\n )\n token_cache = TokenCache(directory)\n self._token_cache = token_cache\n self._app = app\n\n # Authenticate. If a valid refresh_token is available in the token_cache,\n # it will be used. Otherwise, this will prompt for a password.\n if (username is not None) and not offline:\n tokens = self.reauthenticate()\n access_token = tokens[\"access_token\"]\n client.headers[\"Authorization\"] = f\"Bearer {access_token}\"\n\n # Ask the server what its root_path is.\n handshake_request = self._client.build_request(\n \"GET\", \"/\", params={\"root_path\": None}\n )\n handshake_response = self._client.send(handshake_request)\n handle_error(handshake_response)\n data = handshake_response.json()\n base_path = data[\"meta\"][\"root_path\"]\n url = httpx.URL(self._client.base_url)\n base_url = urllib.parse.urlunsplit(\n (url.scheme, url.netloc.decode(), base_path, {}, url.fragment)\n )\n client.base_url = base_url\n client.headers[\"x-base-url\"] = base_url\n path_parts = list(PurePosixPath(url.path).relative_to(base_path).parts)\n if path_parts:\n # Strip \"/metadata\"\n path_parts.pop(0)\n self._path_parts = path_parts\n\n @property\n def offline(self):\n return self._offline\n\n @property\n def app(self):\n return self._app\n\n @offline.setter\n def offline(self, value):\n self._offline = bool(value)\n\n @property\n def path_parts(self):\n return self._path_parts\n\n @property\n def base_url(self):\n return self._client.base_url\n\n @property\n def event_hooks(self):\n \"httpx.Client event hooks. This is exposed for testing.\"\n return self._client.event_hooks\n\n def get_content(self, path, accept=None, timeout=UNSET, stream=False, **kwargs):\n request = self._client.build_request(\"GET\", path, **kwargs)\n if accept:\n request.headers[\"Accept\"] = accept\n url = request.url.raw # URL as tuple\n if self._offline:\n # We must rely on the cache alone.\n reservation = self._cache.get_reservation(url)\n if reservation is None:\n raise NotAvailableOffline(url)\n content = reservation.load_content()\n if content is None:\n # TODO Do we ever get here?\n raise NotAvailableOffline(url)\n return content\n if self._cache is None:\n # No cache, so we can use the client straightforwardly.\n response = self._send(request, stream=stream, timeout=timeout)\n handle_error(response)\n if response.headers.get(\"content-encoding\") == \"blosc\":\n import blosc\n\n return blosc.decompress(response.content)\n return response.content\n # If we get this far, we have an online client and a cache.\n reservation = self._cache.get_reservation(url)\n try:\n if reservation is not None:\n request.headers[\"If-None-Match\"] = reservation.etag\n response = self._send(request, stream=stream, timeout=timeout)\n handle_error(response)\n if response.status_code == 304: # HTTP 304 Not Modified\n # Read from the cache\n content = reservation.load_content()\n elif response.status_code == 200:\n etag = response.headers.get(\"ETag\")\n content = response.content\n if response.headers.get(\"content-encoding\") == \"blosc\":\n import blosc\n\n content = blosc.decompress(content)\n # TODO Respect Cache-control headers (e.g. \"no-store\")\n if etag is not None:\n # Write to cache.\n self._cache.put_etag_for_url(url, etag)\n self._cache.put_content(etag, content)\n else:\n raise NotImplementedError(\n f\"Unexpected status_code {response.status_code}\"\n )\n finally:\n if reservation is not None:\n reservation.ensure_released()\n return content\n\n def get_json(self, path, stream=False, **kwargs):\n return msgpack.unpackb(\n self.get_content(\n path, accept=\"application/x-msgpack\", stream=stream, **kwargs\n ),\n timestamp=3, # Decode msgpack Timestamp as datetime.datetime object.\n )\n\n def _send(self, request, timeout=UNSET, stream=False, attempts=0):\n \"\"\"\n Handle httpx's timeout API, which uses a special internal sentinel to mean\n \"no timeout\" and therefore must not be passed any value (including None)\n if we want no timeout.\n \"\"\"\n if timeout is UNSET:\n response = self._client.send(request, stream=stream)\n else:\n response = self._client.send(request, stream=stream, timeout=timeout)\n if (response.status_code == 401) and (attempts == 0):\n # Try refreshing the token.\n # TODO Use a more targeted signal to know that refreshing the token will help.\n # Parse the error message? Add a special header from the server?\n if self._username is not None:\n tokens = self.reauthenticate()\n access_token = tokens[\"access_token\"]\n auth_header = f\"Bearer {access_token}\"\n # Patch in the Authorization header for this request...\n request.headers[\"authorization\"] = auth_header\n # And update the default headers for future requests.\n self._client.headers[\"Authorization\"] = auth_header\n return self._send(request, timeout, stream=stream, attempts=1)\n return response\n\n def authenticate(self):\n # Make an initial \"safe\" request to let the server set the CSRF cookie.\n # TODO: Skip this if we already have a valid CSRF cookie for the authentication domain.\n # TODO: The server should support HEAD requests so we can do this more cheaply.\n handshake_request = self._client.build_request(\"GET\", self._authentication_uri)\n # If an Authorization header is set, that's for the Resource server.\n # Do not include it in the request to the Authentication server.\n handshake_request.headers.pop(\"Authorization\", None)\n handshake_response = self._send(handshake_request)\n handle_error(handshake_response)\n username = self._username or input(\"Username: \")\n password = getpass.getpass()\n form_data = {\n \"grant_type\": \"password\",\n \"username\": username,\n \"password\": password,\n }\n token_request = self._client.build_request(\n \"POST\", f\"{self._authentication_uri}token\", data=form_data, headers={}\n )\n token_request.headers.pop(\"Authorization\", None)\n token_response = self._client.send(token_request)\n handle_error(token_response)\n tokens = token_response.json()\n if self._token_cache is not None:\n # We are using a token cache. Store the new refresh token.\n self._token_cache[\"refresh_token\"] = tokens[\"refresh_token\"]\n return tokens\n\n def reauthenticate(self, prompt_on_failure=True):\n try:\n return self._refresh()\n except CannotRefreshAuthentication:\n if prompt_on_failure:\n return self.authenticate()\n raise\n\n def _refresh(self):\n # https://cheatsheetseries.owasp.org/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#double-submit-cookie\n # Make an initial \"safe\" request to let the server set the CSRF cookie.\n # TODO: Skip this if we already have a valid CSRF cookie for the authentication domain.\n # TODO: The server should support HEAD requests so we can do this more cheaply.\n handshake_request = self._client.build_request(\"GET\", self._authentication_uri)\n # If an Authorization header is set, that's for the Resource server.\n # Do not include it in the request to the Authentication server.\n handshake_request.headers.pop(\"Authorization\", None)\n handshake_response = self._client.send(handshake_request)\n handle_error(handshake_response)\n if self._token_cache is None:\n # We are not using a token cache.\n raise CannotRefreshAuthentication(\"No token cache was given\")\n # We are using a token_cache.\n try:\n refresh_token = self._token_cache[\"refresh_token\"]\n except KeyError:\n raise CannotRefreshAuthentication(\n \"No refresh token was found in token cache\"\n )\n # There is a refresh token in the cache.\n token_request = self._client.build_request(\n \"POST\",\n f\"{self._authentication_uri}token/refresh\",\n json={\"refresh_token\": refresh_token},\n headers={\"x-csrf\": self._client.cookies[\"tiled_csrf\"]},\n )\n token_request.headers.pop(\"Authorization\", None)\n token_response = self._client.send(token_request)\n if token_response.status_code == 401:\n # Refreshing the token failed.\n # Discard the expired (or otherwise invalid) refresh_token file.\n self._token_cache.pop(\"refresh_token\", None)\n raise CannotRefreshAuthentication(\n \"Server rejected attempt to refresh token\"\n )\n handle_error(token_response)\n tokens = token_response.json()\n # If we get this far, reauthentication worked.\n # Store the new refresh token.\n self._token_cache[\"refresh_token\"] = tokens[\"refresh_token\"]\n return tokens\n\n\ndef context_from_tree(\n tree,\n authentication,\n server_settings,\n *,\n query_registry=None,\n serialization_registry=None,\n compression_registry=None,\n cache=None,\n offline=False,\n token_cache=DEFAULT_TOKEN_CACHE,\n username=None,\n headers=None,\n):\n from ..server.app import serve_tree\n\n # By default make it \"public\" because there is no way to\n # secure access from inside the same process anyway.\n authentication = authentication or {\"allow_anonymous_access\": True}\n server_settings = server_settings or {}\n params = {}\n headers = headers or {}\n headers.setdefault(\"accept-encoding\", \",\".join(DEFAULT_ACCEPTED_ENCODINGS))\n # If a single-user API key will be used, generate the key here instead of\n # letting serve_tree do it for us, so that we can give it to the client\n # below.\n if (\n (authentication.get(\"authenticator\") is None)\n and (not authentication.get(\"allow_anonymous_access\", False))\n and (authentication.get(\"single_user_api_key\") is None)\n ):\n single_user_api_key = os.getenv(\n \"TILED_SINGLE_USER_API_KEY\", secrets.token_hex(32)\n )\n authentication[\"single_user_api_key\"] = single_user_api_key\n params[\"api_key\"] = single_user_api_key\n app = serve_tree(\n tree,\n authentication,\n server_settings,\n query_registry=query_registry,\n serialization_registry=serialization_registry,\n compression_registry=compression_registry,\n )\n\n # Only an AsyncClient can be used over ASGI.\n # We wrap all the async methods in a call to asyncio.run(...).\n # Someday we should explore asynchronous Tiled Client objects.\n from ._async_bridge import AsyncClientBridge\n\n async def startup():\n # Note: This is important. The Tiled server routes are defined lazily on\n # startup.\n await app.router.startup()\n\n client = AsyncClientBridge(\n base_url=\"http://local-tiled-app\",\n params=params,\n app=app,\n _startup_hook=startup,\n event_hooks=ASYNC_EVENT_HOOKS,\n headers=headers,\n timeout=httpx.Timeout(5.0, read=20.0),\n )\n # Block for application startup.\n try:\n client.wait_until_ready(10)\n except TimeoutError:\n raise TimeoutError(\"Application startup has timed out.\")\n # TODO How to close the httpx.AsyncClient more cleanly?\n import atexit\n\n atexit.register(client.close)\n return Context(\n client,\n cache=cache,\n offline=offline,\n token_cache=token_cache,\n username=username,\n app=app,\n )\n\n\nclass TokenCache:\n \"A (partial) dict interface backed by files with restrictive permissions\"\n\n def __init__(self, directory):\n self._directory = Path(directory)\n self._directory.mkdir(exist_ok=True, parents=True)\n\n def __getitem__(self, key):\n filepath = self._directory / key\n try:\n with open(filepath, \"r\") as file:\n return file.read()\n except FileNotFoundError:\n raise KeyError(key)\n\n def __setitem__(self, key, value):\n if not isinstance(value, str):\n raise ValueError(\"Expected string value, got {value!r}\")\n filepath = self._directory / key\n filepath.touch(mode=0o600) # Set permissions.\n with open(filepath, \"w\") as file:\n file.write(value)\n\n def __delitem__(self, key):\n filepath = self._directory / key\n filepath.unlink(missing_ok=False)\n\n def pop(self, key, fallback=None):\n filepath = self._directory / key\n try:\n with open(filepath, \"r\") as file:\n content = file.read()\n except FileNotFoundError:\n content = fallback\n filepath.unlink(missing_ok=True)\n return content\n","sub_path":"tiled/client/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":18533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"411595300","text":"#!/usr/bin/env python3\n#\n# Copyright (C) 2018 Linus Jahn \n# Copyright (C) 2019 Hiroshi Miura \n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport argparse\nimport sys\n\nfrom aqt.archives import QtArchives\nfrom aqt.installer import QtInstaller\n\n\nclass Cli():\n\n __slot__ = ['parser']\n\n COMBINATION = [\n {'os_name': 'linux', 'target': 'desktop', 'arch': 'gcc_64'},\n {'os_name': 'linux', 'target': 'android', 'arch': 'android_x86'},\n {'os_name': 'linux', 'target': 'android', 'arch': 'android_armv7'},\n {'os_name': 'mac', 'target': 'desktop', 'arch': 'clang_64'},\n {'os_name': 'mac', 'target': 'ios', 'arch': 'ios'},\n {'os_name': 'windows', 'target': 'desktop', 'arch': 'win64_msvc2017_64'},\n {'os_name': 'windows', 'target': 'desktop', 'arch': 'win32_msvc2017'},\n {'os_name': 'windows', 'target': 'desktop', 'arch': 'win64_msvc2015_64'},\n {'os_name': 'windows', 'target': 'desktop', 'arch': 'win32_msvc2015'},\n {'os_name': 'windows', 'target': 'desktop', 'arch': 'win64_mingw73'},\n {'os_name': 'windows', 'target': 'desktop', 'arch': 'win32_mingw73'},\n {'os_name': 'windows', 'target': 'desktop', 'arch': 'win64_mingw53'},\n {'os_name': 'windows', 'target': 'desktop', 'arch': 'win32_mingw53'},\n {'os_name': 'windows', 'target': 'winrt', 'arch': 'win64_msvc2017_winrt_x64'},\n {'os_name': 'windows', 'target': 'winrt', 'arch': 'win64_msvc2017_winrt_x86'},\n {'os_name': 'windows', 'target': 'winrt', 'arch': 'win64_msvc2017_winrt_armv7'},\n {'os_name': 'windows', 'target': 'android', 'arch': 'android_x86'},\n {'os_name': 'windows', 'target': 'android', 'arch': 'android_armv7'},\n ]\n\n def check_arg_combination(self, qt_version, os_name, target, arch):\n for c in self.COMBINATION:\n if c['os_name'] == os_name and c['target'] == target and c['arch'] == arch:\n return True\n return False\n\n def run_install(self, args):\n arch = args.arch\n target = args.target\n os_name = args.host\n output_dir = args.outputdir\n mirror = args.base\n if arch is None:\n if os_name == \"linux\" and target == \"desktop\":\n arch = \"gcc_64\"\n elif os_name == \"mac\" and target == \"desktop\":\n arch = \"clang_64\"\n elif os_name == \"mac\" and target == \"ios\":\n arch = \"ios\"\n if arch == \"\":\n print(\"Please supply a target architecture.\")\n args.print_help()\n exit(1)\n qt_version = args.qt_version\n if not self.check_arg_combination(qt_version, os_name, target, arch):\n print(\"Specified target combination is not valid: {} {} {}\".format(os_name, target, arch))\n exit(1)\n if mirror is not None:\n if not mirror.startswith('http://') or mirror.startswith('https://') or mirror.startswith('ftp://'):\n args.print_help()\n exit(1)\n if output_dir is not None:\n QtInstaller(QtArchives(os_name, qt_version, target, arch, mirror=mirror)).install(target_dir=output_dir)\n else:\n QtInstaller(QtArchives(os_name, qt_version, target, arch, mirror=mirror)).install()\n\n sys.stdout.write(\"\\033[K\")\n print(\"Finished installation\")\n\n def run_list(self, args):\n print('List Qt packages for %s' % args.qt_version)\n\n def show_help(self, args):\n print(\"show help\")\n self.parser.print_help()\n\n def __init__(self):\n parser = argparse.ArgumentParser(prog='aqt', description='Installer for Qt SDK.',\n formatter_class=argparse.RawTextHelpFormatter, add_help=True)\n subparsers = parser.add_subparsers(title='subcommands', description='Valid subcommands',\n help='subcommand for aqt Qt installer')\n install_parser = subparsers.add_parser('install')\n install_parser.set_defaults(func=self.run_install)\n install_parser.add_argument(\"qt_version\", help=\"Qt version in the format of \\\"5.X.Y\\\"\")\n install_parser.add_argument('host', choices=['linux', 'mac', 'windows'], help=\"host os name\")\n install_parser.add_argument('target', choices=['desktop', 'winrt', 'android', 'ios', 'tool'], help=\"target sdk\")\n install_parser.add_argument('arch', nargs='?', help=\"\\ntarget linux/desktop: gcc_64\"\n \"\\ntarget mac/desktop: clang_64\"\n \"\\ntarget mac/ios: ios\"\n \"\\nwindows/desktop: win64_msvc2017_64, win64_msvc2015_64\"\n \"\\n win32_msvc2015, win32_mingw53\"\n \"\\n win64_mingw73, win32_mingw73\"\n \"\\nwindows/winrt: win64_msvc2017_winrt_x64, win64_msvc2017_winrt_x86\"\n \"\\n win64_msvc2017_winrt_armv7\"\n \"\\nandroid: android_x86, android_armv7\")\n install_parser.add_argument('-O', '--outputdir', nargs='?',\n help='Target output directory(default current directory)')\n install_parser.add_argument('-b', '--base', nargs='?',\n help=\"Specify mirror base url such as http://mirrors.ocf.berkeley.edu/qt/, \"\n \"where 'online' folder exist.\")\n list_parser = subparsers.add_parser('list')\n list_parser.set_defaults(func=self.run_list)\n list_parser.add_argument(\"qt_version\", help=\"Qt version in the format of \\\"5.X.Y\\\"\")\n help_parser = subparsers.add_parser('help')\n help_parser.set_defaults(func=self.show_help)\n self.parser = parser\n\n def run(self):\n args = self.parser.parse_args()\n args.func(args)\n","sub_path":"aqt/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":7052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"210454973","text":"import tqdm\nimport torch\nimport os\nfrom torch import nn\nfrom load_dataset import *\nfrom Models import BertClassifier\nfrom KoBERT.kobert.pytorch_kobert_adapter import get_pytorch_kobert_model_adapter\nfrom transformers import AdamW\n#from transformers.optimization import WarmupLinearSchedule\nfrom transformers.optimization import get_linear_schedule_with_warmup\nfrom tensorboardX import SummaryWriter\n\nTENSORBOARD_DIR = \"./tensorboard\"\nif not os.path.exists(TENSORBOARD_DIR):\n os.mkdir(TENSORBOARD_DIR)\ntask = \"4wayAdapter128\"\nwriterDIR = os.path.join(TENSORBOARD_DIR, task)\nif not os.path.exists(writerDIR):\n os.mkdir(writerDIR)\nwriter = SummaryWriter(writerDIR)\n\nif not os.path.exists(\"./ckpt/{}\".format(task)):\n os.makedirs(\"./ckpt/{}\".format(task))\n\ndef calc_accuracy(X,Y):\n max_vals, max_indices = torch.max(X, 1)\n train_acc = (max_indices == Y).sum().data.cpu().numpy()/max_indices.size()[0]\n return train_acc\n\ndef prepare_train_adapter(model):\n model.train()\n for name, param in model.named_parameters():\n if 'adapter' in name:\n param.requires_grad = True\n else:\n param.requires_grad = False\n\ndef save_checkpoint(model, save_pth):\n if not os.path.exists(os.path.dirname(save_pth)):\n os.makedirs(os.path.dirname(save_pth))\n torch.save(model.cpu().state_dict(), save_pth)\n model.cuda()\n\n## Setting parameters\nbatch_size = 64\nwarmup_ratio = 0.1\nnum_epochs = 250\nmax_grad_norm = 1\nlog_interval = 200\nlearning_rate = 5e-5\ndr_rate = 0.5\n\ndevice = torch.device(\"cuda:1\")\ntorch.cuda.set_device(device)\n\nbertmodel, vocab = get_pytorch_kobert_model_adapter()\nmodel = BertClassifier.BERTClassifier4way(bertmodel, dr_rate=dr_rate).to(device)\n\nprepare_train_adapter(model)\n\nno_decay = ['bias', 'LayerNorm.weight']\noptimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n]\noptimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate)\nloss_fn = nn.CrossEntropyLoss()\n\ntrain_d = load_4way_train(vocab)\nprint(\"finished loading 4way\")\n# print(train_d[0])\n\ntest_d = load_4way_test(vocab)\n# print(test_d)\n\nt_total = len(train_d) * num_epochs\nwarmup_step = int(t_total * warmup_ratio)\n#scheduler = WarmupLinearSchedule(optimizer, warmup_steps=warmup_step, t_total=t_total)\nscheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_step, num_training_steps=t_total)\n\n#sequence_output, pooled_output = model(input_ids, input_mask, token_type_ids)\n#pooled_output.shape\n\nprint(\"num of trainable parameters\")\nmodel_parameters = filter(lambda p: p.requires_grad, model.parameters())\nparams = sum([np.prod(p.size()) for p in model_parameters])\nprint(params)\n\n\nfor e in range(num_epochs):\n train_acc = 0.0\n test_acc = 0.0\n model.train()\n steps = len(train_d) // batch_size\n print(\"total train data : %d\" % len(train_d))\n print(\"total steps %d\" % steps)\n #for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(tqdm.tqdm(train_d)):\n for batch_id in tqdm.tqdm(range(steps)):\n batch = train_d[batch_size*batch_id:batch_size*(batch_id+1)]\n token_ids, valid_length, segment_ids, labels = [], [], [], []\n\n for el in range(len(batch)):\n token_id, val_len, segment_id = batch[el][0]\n token_ids.append(token_id)\n valid_length.append(int(val_len))\n segment_ids.append(segment_id)\n\n label = batch[el][1]\n labels.append(label)\n\n # print(\"token_ids: {}, valid_length: {}, segment_ids: {}\".format(token_ids.shape, valid_length.shape, segment_ids.shape))\n token_ids = torch.LongTensor(token_ids)\n valid_length = torch.LongTensor(valid_length)\n segment_ids = torch.LongTensor(segment_ids)\n labels = torch.LongTensor(labels)\n\n optimizer.zero_grad()\n token_ids = token_ids.long().to(device)\n segment_ids = segment_ids.long().to(device)\n valid_length= valid_length\n labels = labels.long().to(device)\n\n out = model(token_ids, valid_length, segment_ids)\n loss = loss_fn(out, labels)\n #loss.requires_grad = True\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n train_acc += calc_accuracy(out, labels)\n if batch_id % log_interval == 0:\n print(\"epoch {} batch id {} loss {} train acc {}\".format(e, batch_id+1, loss.data.cpu().numpy(), train_acc / (batch_id+1)))\n # tensorboard\n writer.add_scalar(\"train/loss\", loss, batch_id + steps*e)\n print(\"epoch {} train acc {}\".format(e, train_acc / (batch_id+1)))\n writer.add_scalar(\"train/accuracy\", train_acc/(batch_id+1), e)\n\n # save model\n if (e%50 == 0):\n model_name = \"{}_ckpt.pth\".format(e)\n print(\"saving the model.. {}\".format(model_name))\n save_checkpoint(model, \"./ckpt/{}\".format(model_name))\n\n\n model.eval()\n steps = len(test_d) // batch_size\n for batch_id in tqdm.tqdm(range(steps)):\n batch = test_d[batch_size*batch_id:batch_size*(batch_id+1)]\n token_ids, valid_length, segment_ids, labels = [], [], [], []\n\n for el in range(len(batch)):\n token_id, val_len, segment_id = batch[el][0]\n token_ids.append(token_id)\n valid_length.append(int(val_len))\n segment_ids.append(segment_id)\n\n label = batch[el][1]\n labels.append(label)\n\n token_ids = torch.LongTensor(token_ids).to(device)\n valid_length = torch.LongTensor(valid_length).to(device)\n segment_ids = torch.LongTensor(segment_ids).to(device)\n labels = torch.LongTensor(labels).to(device)\n\n out = model(token_ids, valid_length, segment_ids)\n test_acc += calc_accuracy(out, labels)\n print(\"epoch {} test acc {}\".format(e, test_acc / (batch_id+1)))\n writer.add_scalar(\"test/accuracy\", test_acc/(batch_id+1), e)\n writer.close()\n print(\"done writing\")\n","sub_path":"kobert-adapter/train-4way-adapter.py","file_name":"train-4way-adapter.py","file_ext":"py","file_size_in_byte":6193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"415855342","text":"# from tensorflow.python.client import device_lib\n#\n# print ( device_lib.list_local_devices() )\n\nfrom matplotlib import pyplot as plt\nimport time\nimport os, sys, cv2\n\nf = plt.figure()\nax = f.gca()\nf.show()\n\nfor i in range(100):\n im = cv2.imread('/data/share/nfs/40/latest.jpg')\n ax.imshow(im)\n f.canvas.draw()\n #raw_input('pause : press any key ...')\n time.sleep(0.3)\n","sub_path":"cifar10/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"238183184","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport os\n# import random\nfrom player import Player\nfrom drugs import Drugs\n\ndef waitscr():\n if os.name == 'nt':\n from msvcrt import getch\n getch()\n else:\n input()\n\ndef osclean():\n if os.name == 'nt':\n return \"cls\"\n else:\n return \"clear\"\n\ndef logo():\n print(\" ____ __ __\")\n print(\" / __ \\_______ ______ _ / / ____ _________/ /\")\n print(\" / / / / ___/ / / / __ `/ / / / __ \\/ ___/ __ / \")\n print(\" / /_/ / / / /_/ / /_/ / / /___/ /_/ / / / /_/ / \")\n print(\"/_____/_/ \\__,_/\\__, / /_____/\\____/_/ \\__,_/ \")\n print(\" /____/ \")\n\nhelplist = ['buy', 'sell', 'inventory', 'travel', 'crime', 'finances', 'skipday (Use this with caution!)', 'exit']\nrun_once = 0\nos.system(osclean())\nplayerobj = Player()\ndrugobj = Drugs(playerobj)\nplayerobj.drugs = drugobj\n\nwhile True:\n\n if run_once == 0:\n playerobj.firstloc()\n run_once = 1\n\n logo()\n print(\"\\nDrugLord v0.1 | 'help' for commands\\n\")\n playerobj.getdata()\n print()\n bashline = input('What to do? ')\n\n if bashline == \"exit\":\n print('\\nThanks for playing DrugLord v0.1!')\n break\n\n elif bashline == \"help\":\n print('\\nAvailable commands:\\n')\n for element in helplist:\n print(element)\n print('\\nHit any key to continue...')\n waitscr()\n\n elif bashline == \"inventory\":\n if not playerobj.loot:\n print('\\nInventory is empty.')\n waitscr()\n else:\n print('\\nInventory:\\n')\n playerobj.getloot()\n print('\\nHit any key to continue...')\n waitscr()\n\n elif bashline == \"skipday\":\n drugobj.daychange()\n print(\"\\nIt's a new day!\\n\")\n waitscr()\n playerobj.happening()\n playerobj.checklife()\n\n elif bashline == \"travel\":\n print('\\nWhere do yo want to go?\\n')\n playerobj.getlocations()\n print('\\nPrice: $70')\n print()\n travelto = input('Travelling to: ').title()\n if travelto in playerobj.locations:\n if travelto == playerobj.locations[playerobj.checklocation()]:\n print('You are already in ' + travelto)\n waitscr()\n else:\n playerobj.setlocation(travelto)\n drugobj.daychange()\n playerobj.happening()\n playerobj.checklife()\n else:\n print('\\nWhere is that? ')\n waitscr()\n\n elif bashline == \"sell\":\n if not playerobj.loot:\n print(\"\\nDude, you got nothing to sell!\")\n waitscr()\n else:\n print()\n playerobj.getloot(showprice=True)\n print()\n sellinput = input('What do you want to sell? ').title()\n if sellinput in playerobj.lootkeys:\n sellamount = input('How much do you want to sell? ')\n try:\n sellamount = int(sellamount)\n except ValueError:\n sellamount = 0\n print('\\nHas to be a number man!')\n waitscr()\n if sellamount <= playerobj.loot[sellinput]:\n sellprice = drugobj.getdrug(sellinput)\n selltotal = sellprice * sellamount\n playerobj.lootsell(sellinput, sellamount)\n playerobj.payment(selltotal, \"sell\")\n playerobj.emptykey()\n waitscr()\n else:\n print('\\n You do not have ' + str(sellamount) + ' of ' + sellinput)\n waitscr()\n else:\n print('\\nYou need to sell something that you already have.')\n waitscr()\n\n elif bashline == \"buy\":\n print()\n drugobj.getval()\n buyinput = input('What do you need? ').title()\n if buyinput in drugobj.drugparams:\n buyprice = drugobj.getdrug(buyinput)\n amount = input(\"How much \" + buyinput + \" do you need? \")\n try:\n amount = int(amount)\n except ValueError:\n amount = 0\n print('\\nHas to be a number man!')\n waitscr()\n if amount:\n total = buyprice * amount\n if int(playerobj.data['Money']) >= int(total):\n playerobj.payment(total, \"buy\")\n playerobj.lootbuy(buyinput, amount)\n print(\"You bought {0} grams of {1} for ${2}\".format(\n amount,\n buyinput,\n total\n ))\n waitscr()\n else:\n print(\"\\nYou ain't got no money, get lost!\")\n waitscr()\n else:\n print(\"\\nWhat?\")\n waitscr()\n\n os.system(osclean())\n","sub_path":"druglordnewest.py","file_name":"druglordnewest.py","file_ext":"py","file_size_in_byte":5020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"291897590","text":"import torch\nimport numpy as np\n\n#segments = get_grid(img_tensor_for_slic, width=10) + 1\ndef get_grid(img_tensor, width=10):\n if len(img_tensor.shape) == 3:\n w = img_tensor.shape[0]\n h = img_tensor.shape[1]\n elif len(img_tensor.shape) == 2:\n w = img_tensor.shape[0]\n h = img_tensor.shape[1]\n grid_map = torch.Tensor(w,h).fill_(0)\n no = 0\n for i in range(int(w/width)):\n for j in range(int(h/width)):\n grid_map[i*width:(i+1)*width, j*width:(j+1)*width] = no\n no = no + 1\n return grid_map.numpy().astype(int)\n\n\ndef get_grid_mini(img_tensor, width=10):\n if len(img_tensor.shape) == 3:\n w = img_tensor.shape[0]\n h = img_tensor.shape[1]\n elif len(img_tensor.shape) == 2:\n w = img_tensor.shape[0]\n h = img_tensor.shape[1]\n grid_map = torch.Tensor(int(w/width),int(h/width)).fill_(0)\n no = 0\n for i in range(int(w/width)):\n for j in range(int(h/width)):\n grid_map[i:(i+1), j:(j+1)] = no\n no = no + 1\n return grid_map.numpy().astype(int)\n\n\ndef get_grid_neighbor(segments_mini, selected_node, unique_segments_cover):\n # selected_node = 1\n x,y = np.where(segments_mini == selected_node)\n neighbor_list = []\n for i in range(3):\n for j in range(3):\n x_now = x - 1 + i\n y_now = y - 1 + j\n if i!=1 and j!=1:\n continue\n if 0 <= x_now < segments_mini.shape[0]:\n if 0 <= y_now < segments_mini.shape[1]:\n if int(segments_mini[x_now,y_now]) in unique_segments_cover:\n neighbor_list.append(int(segments_mini[x_now,y_now]))\n neighbor_list.remove(selected_node)\n return neighbor_list\n","sub_path":"grid_generate.py","file_name":"grid_generate.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"380550022","text":"import pandas as pd\nimport numpy as np\nfrom sklearn import linear_model\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import load_boston\n\nboston = load_boston()\n\ndf = pd.DataFrame(boston.data, columns= boston.feature_names)\ntarget = pd.DataFrame(boston.target, columns=[\"MEDV\"])\ndf['MEDV'] = target['MEDV']\n\n# print(boston.DESCR)\n# print(df.shape)\n# print(target.describe())\n# print(target.count())\n# print(target.head())\n# print(df['MEDV'].describe())\n\n# check for missing data\n# nans = len(df)-df.count()\n# print(nans)\n\n# for column in df:\n# a = df[column].corr(target['MEDV'])\n# print(a)\n\nfig = plt.figure(figsize=(12, 6))\n\n# RM vs Price\nplt.subplot2grid((2, 3), (0, 0))\nplt.scatter(df['RM'], df['MEDV'], c='DarkBlue', alpha =0.4)\nplt.title(\"Corr.=\" + str(round(df['RM'].corr(df['MEDV']), 3)))\nplt.xlabel('RM')\nplt.ylabel('Home price (100k)')\n\n# CRIM vs Price\nplt.subplot2grid((2, 3), (0, 1))\nplt.scatter(df['CRIM'], df['MEDV'], c='DarkBlue', alpha =0.4)\nplt.title(\"Corr.=\" + str(round(df['CRIM'].corr(df['MEDV']), 3)))\nplt.xlabel('Per capita crime')\nplt.ylabel('Home price (100k)')\n\n# AGE vs Price\nplt.subplot2grid((2, 3), (0, 2))\nplt.scatter(df['AGE'], df['MEDV'], c='DarkBlue', alpha =0.4)\nplt.title(\"Corr.=\" + str(round(df['AGE'].corr(df['MEDV']), 3)))\nplt.xlabel('Age')\nplt.ylabel('Home price (100k)')\n\n# DIS vs Price\nplt.subplot2grid((2, 3), (1, 0))\nplt.scatter(df['PTRATIO'], df['MEDV'], c='DarkBlue', alpha =0.4)\nplt.title(\"Corr.=\" + str(round(df['PTRATIO'].corr(df['MEDV']), 3)))\nplt.xlabel('pupil-teacher ratio by town')\nplt.ylabel('Home price (100k)')\n\n# AGE vs Price\nplt.subplot2grid((2, 3), (1, 1))\nplt.scatter(df['TAX'], df['MEDV'], c='DarkBlue', alpha =0.4)\nplt.title(\"Corr.=\" + str(round(df['TAX'].corr(df['MEDV']), 3)))\nplt.xlabel('TAX')\nplt.ylabel('Home price (100k)')\n\nplt.subplot2grid((2, 3), (1, 1))\nplt.scatter(df['LSTAT'], df['MEDV'], c='DarkBlue', alpha =0.4)\nplt.title(\"Corr.=\" + str(round(df['LSTAT'].corr(df['MEDV']), 3)))\nplt.xlabel('Working poor proportion')\nplt.ylabel('Home price (100k)')\n\nplt.subplot2grid((2, 3), (1, 2))\nplt.scatter(df['B'], df['MEDV'], c='DarkBlue', alpha =0.4)\nplt.title(\"Corr.=\" + str(round(df['B'].corr(df['MEDV']), 3)))\nplt.xlabel('B')\nplt.ylabel('Home price (100k)')\n\n\nplt.tight_layout(pad=0.5)\nplt.show()","sub_path":"DataVisualization.py","file_name":"DataVisualization.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"609457432","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport glob\nimport keras\nimport random\nimport sklearn\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport seaborn as sn\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nimport matplotlib.pyplot as plt\nfrom keras.preprocessing import image\nfrom PIL import Image, ImageTk, ImageDraw\nfrom keras.applications.vgg16 import VGG16\nfrom keras.applications.vgg19 import VGG19\nfrom keras.applications.resnet50 import ResNet50\nfrom keras.applications.densenet import DenseNet201\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.preprocessing.image import array_to_img, img_to_array, load_img\nfrom sklearn.metrics import classification_report, confusion_matrix\n\n# colors for the bboxes\nCOLORS = ['red', 'blue','pink', 'cyan', 'green', 'black']\n\n# image sizes for the examples\nSIZE = 256, 256\n\nparser = argparse.ArgumentParser(description='Object Detection Neural Network Dataset Labeling')\nparser.add_argument('-b', '--bbox', action='store_true', help='Open the BBox image labeling tool')\nparser.add_argument('-t', '--translate', action='store_true', help='Translate .txt file to .xml file')\nparser.add_argument('-k', '--check', action='store_true', help='Check the images for correct annotation')\nparser.add_argument('-e', '--eval', action='store_true', help='Evaluate the accuracy of the trained neural network')\nparser.add_argument('-a', '--augment', action='store_true', help='Augments images')\nparser.add_argument('-cp','--cnn_predict', action='store_true', help='Predict from CNN')\nparser.add_argument('-r', '--rename', nargs='+', help='Rename a label')\nparser.add_argument('-ct','--cnn_train', nargs='+', help='Train on CNN')\nparser.add_argument('-c', '--convert', nargs='+', help='Convert Bash terminal output to .txt files')\nargs = parser.parse_args()\n\nclass LabelTool():\n\t''' GUI bounding box annotation tool '''\n\t'''\n\tMIT License\n\t\n\tCopyright (c) 2017 Shi Qiu\n\t\n\tPermission is hereby granted, free of charge, to any person obtaining a copy\n\tof this software and associated documentation files (the \"Software\"), to\n\tdeal in the Software without restriction, including without limitation the\n\trights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n\tsell copies of the Software, and to permit persons to whom the Software is\n\tfurnished to do so, subject to the following conditions:\n\t\n\tThe above copyright notice and this permission notice shall be included in\n\tall copies or substantial portions of the Software.\n\t\n\tTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\tIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\tFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\tAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\tLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n\tFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n\tIN THE SOFTWARE.\n\t\n\tThis script is modified from https://github.com/xiaqunfeng/BBox-Label-Tool\n\twhich is in turn adopted from https://github.com/puzzledqs/BBox-Label-Tool\n\t'''\n\tdef __init__(self, master, LABELS):\n\t\tself.parent = master\n\t\tself.parent.title('BBox Label Tool')\n\t\tself.frame = Frame(self.parent)\n\t\tself.frame.pack(fill=BOTH, expand=1)\n\t\tself.parent.resizable(width = FALSE, height = FALSE)\n\t\tself.imageDir = ''\n\t\tself.imageList= []\n\t\tself.egDir = ''\n\t\tself.egList = []\n\t\tself.outDir = ''\n\t\tself.cur = 0\n\t\tself.total = 0\n\t\tself.category = 0\n\t\tself.imagename = ''\n\t\tself.labelfilename = ''\n\t\tself.tkimg = None\n\t\tself.currentLabelclass = ''\n\t\tself.cla_can_temp = LABELS\n\t\tself.STATE = {}\n\t\tself.STATE['click'] = 0\n\t\tself.STATE['x'], self.STATE['y'] = 0, 0\n\t\tself.bboxIdList = []\n\t\tself.bboxId = None\n\t\tself.bboxList = []\n\t\tself.hl = None\n\t\tself.vl = None\n\t\tself.srcDirBtn = Button(self.frame, text='Image input folder',\n\t\t\t\t\t\t\t\tcommand=self.selectSrcDir)\n\t\tself.srcDirBtn.grid(row=0, column=0)\n\t\tself.svSourcePath = StringVar()\n\t\tself.entrySrc = Entry(self.frame, textvariable=self.svSourcePath)\n\t\tself.entrySrc.grid(row=0, column=1, sticky=W+E)\n\t\tself.svSourcePath.set(os.path.join(os.getcwd(),'./dataset/Train'))\n\t\tself.ldBtn = Button(self.frame, text=\"Load Dir\", command=self.loadDir)\n\t\tself.ldBtn.grid(row=0, column=2, rowspan=2,\n\t\t\t\t\t\tcolumnspan=2, padx=2, pady=2, ipadx=5, ipady=5)\n\t\tself.desDirBtn = Button(self.frame, text='Label output folder',\n\t\t\t\t\t\t\t\tcommand=self.selectDesDir)\n\t\tself.desDirBtn.grid(row=1, column=0)\n\t\tself.svDestinationPath = StringVar()\n\t\tself.entryDes = Entry(self.frame, textvariable=self.svDestinationPath)\n\t\tself.entryDes.grid(row=1, column=1, sticky=W+E)\n\t\tself.svDestinationPath.set(os.path.join(os.getcwd(),\n\t\t\t\t\t\t\t\t\t'./dataset/BBox_Annotations'))\n\t\tself.mainPanel = Canvas(self.frame, cursor='tcross')\n\t\tself.mainPanel.bind('', self.mouseClick)\n\t\tself.mainPanel.bind('', self.mouseMove)\n\t\tself.parent.bind('', self.cancelBBox)\n\t\tself.parent.bind('s', self.cancelBBox)\n\t\tself.parent.bind('p', self.prevImage)\n\t\tself.parent.bind('n', self.nextImage)\n\t\tself.mainPanel.grid(row=2, column=1, rowspan=4, sticky=W+N)\n\t\tself.classname = StringVar()\n\t\tself.classcandidate = ttk.Combobox(self.frame, state='readonly',\n\t\t\t\t\t\t\t\ttextvariable=self.classname)\n\t\tself.classcandidate.grid(row=2, column=2)\n\t\tself.classcandidate['values'] = self.cla_can_temp\n\t\tself.classcandidate.current(0)\n\t\tself.currentLabelclass = self.classcandidate.get()\n\t\tself.btnclass = Button(self.frame, text='Confirm Class',\n\t\t\t\t\t\t\t\tcommand=self.setClass)\n\t\tself.btnclass.grid(row=2, column=3, sticky=W+E)\n\t\tself.lb1 = Label(self.frame, text='Bounding boxes:')\n\t\tself.lb1.grid(row=3, column=2, sticky=W+N)\n\t\tself.listbox = Listbox(self.frame, width=22, height=12)\n\t\tself.listbox.grid(row=4, column=2, sticky=N+S)\n\t\tself.btnDel = Button(self.frame, text='Delete', command=self.delBBox)\n\t\tself.btnDel.grid(row=4, column=3, sticky=W+E+N)\n\t\tself.btnClear = Button(self.frame, text='Clear All',\n\t\t\t\t\t\t\t\tcommand=self.clearBBox)\n\t\tself.btnClear.grid(row=4, column=3, sticky=W+E+S)\n\t\tself.ctrPanel = Frame(self.frame)\n\t\tself.ctrPanel.grid(row=6, column=1, columnspan=2, sticky=W+E)\n\t\tself.prevBtn = Button(self.ctrPanel, text='<< Prev', width=10,\n\t\t\t\t\t\t\t\tcommand=self.prevImage)\n\t\tself.prevBtn.pack(side = LEFT, padx = 5, pady = 3)\n\t\tself.nextBtn = Button(self.ctrPanel, text='Next >>', width=10,\n\t\t\t\t\t\t\t\tcommand=self.nextImage)\n\t\tself.nextBtn.pack(side=LEFT, padx=5, pady=3)\n\t\tself.progLabel = Label(self.ctrPanel, text='Progress: / ')\n\t\tself.progLabel.pack(side=LEFT, padx=5)\n\t\tself.tmpLabel = Label(self.ctrPanel, text='Go to Image No.')\n\t\tself.tmpLabel.pack(side=LEFT, padx=5)\n\t\tself.idxEntry = Entry(self.ctrPanel, width=5)\n\t\tself.idxEntry.pack(side=LEFT)\n\t\tself.goBtn = Button(self.ctrPanel, text='Go', command=self.gotoImage)\n\t\tself.goBtn.pack(side=LEFT)\n\t\tself.disp = Label(self.ctrPanel, text='')\n\t\tself.disp.pack(side=RIGHT)\n\t\tself.frame.columnconfigure(1, weight=1)\n\t\tself.frame.rowconfigure(4, weight=1)\n\tdef selectSrcDir(self):\n\t\tpath = filedialog.askdirectory(title='Select image source folder',\n\t\t\t\t\t\t\t\tinitialdir=self.svSourcePath.get())\n\t\tself.svSourcePath.set(path)\n\t\treturn\n\tdef selectDesDir(self):\n\t\tpath = filedialog.askdirectory(title='Select label output folder',\n\t\t\t\t\t\t\t\tinitialdir=self.svDestinationPath.get())\n\t\tself.svDestinationPath.set(path)\n\t\treturn\n\tdef loadDir(self):\n\t\tself.parent.focus()\n\t\tself.imageDir = self.svSourcePath.get()\n\t\tif not os.path.isdir(self.imageDir):\n\t\t\tmessagebox.showerror('Error!',\n\t\t\t\t\t\t\t\tmessage='The specified dir does not exist!')\n\t\t\treturn\n\t\textlist = [\t'*.JPEG', '*.jpeg', '*JPG' ,\n\t\t\t\t\t'*.jpg' , '*.PNG' , '*.png',\n\t\t\t\t\t'*.BMP' , '*.bmp']\n\t\tfor e in extlist:\n\t\t\tfilelist = glob.glob(os.path.join(self.imageDir, e))\n\t\t\tself.imageList.extend(filelist)\n\t\tif len(self.imageList) == 0:\n\t\t\tprint('No .JPEG images found in the specified dir!')\n\t\t\treturn\n\t\tself.cur = 1\n\t\tself.total = len(self.imageList)\n\t\tself.outDir = self.svDestinationPath.get()\n\t\tif not os.path.exists(self.outDir): os.mkdir(self.outDir)\n\tdef loadImage(self):\n\t\timagepath = self.imageList[self.cur-1]\n\t\tself.img = Image.open(imagepath)\n\t\tsize = self.img.size\n\t\tself.factor = max(size[0]/700, size[1]/700., 1.)\n\t\tself.img = self.img.resize((int(size[0]/self.factor),\n\t\t\t\t\t\t\t\tint(size[1]/self.factor)))\n\t\tself.tkimg = ImageTk.PhotoImage(self.img)\n\t\tself.mainPanel.config(width = max(self.tkimg.width(), 10),\n\t\t\t\t\t\t\t\theight=max(self.tkimg.height(), 10))\n\t\tself.mainPanel.create_image(0, 0, image=self.tkimg, anchor=NW)\n\t\tself.progLabel.config(text=\"%04d/%04d\" %(self.cur, self.total))\n\t\tself.clearBBox()\n\t\tfullfilename = os.path.basename(imagepath)\n\t\tself.imagename, _ = os.path.splitext(fullfilename)\n\t\tlabelname = self.imagename + '.txt'\n\t\tself.labelfilename = os.path.join(self.outDir, labelname)\n\t\tbbox_cnt = 0\n\t\tif os.path.exists(self.labelfilename):\n\t\t\twith open(self.labelfilename) as f:\n\t\t\t\tfor (i, line) in enumerate(f):\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\tbbox_cnt = int(line.strip())\n\t\t\t\t\t\tcontinue\n\t\t\t\t\ttmp = line.split()\n\t\t\t\t\ttmp[0] = int(int(tmp[0])/self.factor)\n\t\t\t\t\ttmp[1] = int(int(tmp[1])/self.factor)\n\t\t\t\t\ttmp[2] = int(int(tmp[2])/self.factor)\n\t\t\t\t\ttmp[3] = int(int(tmp[3])/self.factor)\n\t\t\t\t\tself.bboxList.append(tuple(tmp))\n\t\t\t\t\tcolor_index = (len(self.bboxList)-1) % len(COLORS)\n\t\t\t\t\ttmpId = self.mainPanel.create_rectangle(\n\t\t\t\t\t\t\t\ttmp[0], tmp[1],\n\t\t\t\t\t\t\t\ttmp[2], tmp[3],\n\t\t\t\t\t\t\t\twidth = 2,\n\t\t\t\t\t\t\t\toutline = COLORS[color_index])\n\t\t\t\t\tself.bboxIdList.append(tmpId)\n\t\t\t\t\tself.listbox.insert(END, '%s : (%d, %d) -> (%d, %d)'\n\t\t\t\t\t\t\t\t%(tmp[4], tmp[0], tmp[1], tmp[2], tmp[3]))\n\t\t\t\t\tself.listbox.itemconfig(len(self.bboxIdList)-1,\n\t\t\t\t\t\t\t\tfg=COLORS[color_index])\n\tdef saveImage(self):\n\t\tif self.labelfilename == '': return\n\t\twith open(self.labelfilename, 'w') as f:\n\t\t\tf.write('%d\\n' %len(self.bboxList))\n\t\t\tfor bbox in self.bboxList:\n\t\t\t\tf.write(\"{} {} {} {} {}\\n\".format(int(int(bbox[0])*self.factor),\n\t\t\t\t\t\t\t\tint(int(bbox[1])*self.factor),\n\t\t\t\t\t\t\t\tint(int(bbox[2])*self.factor),\n\t\t\t\t\t\t\t\tint(int(bbox[3])*self.factor),\n\t\t\t\t\t\t\t\tbbox[4]))\n\t\tprint('Image No. %d saved' %(self.cur))\n\tdef mouseClick(self, event):\n\t\tif self.STATE['click'] == 0:\n\t\t\tself.STATE['x'], self.STATE['y'] = event.x, event.y\n\t\telse:\n\t\t\tx1, x2 = min(self.STATE['x'], event.x), max(self.STATE['x'],event.x)\n\t\t\ty1, y2 = min(self.STATE['y'], event.y), max(self.STATE['y'],event.y)\n\t\t\tself.bboxList.append((x1, y1, x2, y2, self.currentLabelclass))\n\t\t\tself.bboxIdList.append(self.bboxId)\n\t\t\tself.bboxId = None\n\t\t\tself.listbox.insert(END, '%s : (%d, %d) -> (%d, %d)'\n\t\t\t\t\t\t\t\t%(self.currentLabelclass, x1, y1, x2, y2))\n\t\t\tself.listbox.itemconfig(len(self.bboxIdList) - 1,\n\t\t\t\t\t\tfg = COLORS[(len(self.bboxIdList) - 1) % len(COLORS)])\n\t\tself.STATE['click'] = 1 - self.STATE['click']\n\tdef mouseMove(self, event):\n\t\tself.disp.config(text='x: %d, y: %d' %(event.x, event.y))\n\t\tif self.tkimg:\n\t\t\tif self.hl:\n\t\t\t\tself.mainPanel.delete(self.hl)\n\t\t\tself.hl = self.mainPanel.create_line(0, event.y,\n\t\t\t\t\t\t\t\tself.tkimg.width(), event.y, width=2)\n\t\t\tif self.vl:\n\t\t\t\tself.mainPanel.delete(self.vl)\n\t\t\tself.vl = self.mainPanel.create_line(event.x, 0,\n\t\t\t\t\t\t\t\tevent.x, self.tkimg.height(), width = 2)\n\t\tif 1 == self.STATE['click']:\n\t\t\tif self.bboxId: self.mainPanel.delete(self.bboxId)\n\t\t\tCOLOR_INDEX = len(self.bboxIdList) % len(COLORS)\n\t\t\tself.bboxId = self.mainPanel.create_rectangle(\n\t\t\t\t\t\t\tself.STATE['x'],\n\t\t\t\t\t\t\tself.STATE['y'],\n\t\t\t\t\t\t\tevent.x, event.y,\n\t\t\t\t\t\t\twidth=2,\n\t\t\t\t\t\t\toutline=COLORS[len(self.bboxList) % len(COLORS)])\n\tdef cancelBBox(self, event):\n\t\tif 1 == self.STATE['click']:\n\t\t\tif self.bboxId:\n\t\t\t\tself.mainPanel.delete(self.bboxId)\n\t\t\t\tself.bboxId = None\n\t\t\t\tself.STATE['click'] = 0\n\tdef delBBox(self):\n\t\tsel = self.listbox.curselection()\n\t\tif len(sel) != 1: return\n\t\tidx = int(sel[0])\n\t\tself.mainPanel.delete(self.bboxIdList[idx])\n\t\tself.bboxIdList.pop(idx)\n\t\tself.bboxList.pop(idx)\n\t\tself.listbox.delete(idx)\n\tdef clearBBox(self):\n\t\tfor idx in range(len(self.bboxIdList)):\n\t\t\tself.mainPanel.delete(self.bboxIdList[idx])\n\t\tself.listbox.delete(0, len(self.bboxList))\n\t\tself.bboxIdList = []\n\t\tself.bboxList = []\n\tdef prevImage(self, event = None):\n\t\tself.saveImage()\n\t\tif self.cur > 1:\n\t\t\tself.cur -= 1\n\t\t\tself.loadImage()\n\tdef nextImage(self, event = None):\n\t\tself.saveImage()\n\t\tif self.cur < self.total:\n\t\t\tself.cur += 1\n\t\t\tself.loadImage()\n\tdef gotoImage(self):\n\t\tidx = int(self.idxEntry.get())\n\t\tif 1 <= idx and idx <= self.total:\n\t\t\tself.saveImage()\n\t\t\tself.cur = idx\n\t\t\tself.loadImage()\n\tdef setClass(self):\n\t\tself.currentLabelclass = self.classcandidate.get()\n\t\tprint('set label class to : {}'.format(self.currentLabelclass))\n\n'''\nMIT License\n\nCopyright (c) 2018 Sari Sabban\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\ndef translate(texts, images):\n\t''' Translates .txt annotations to .xml annotations '''\n\tsource = 'https://github.com/sarisabban/ProtiClass'\n\tfor thefile in os.listdir(texts):\n\t\tfilename = thefile.split('.')[0]\n\t\twith open('{}/{}.xml'.format(texts, filename), 'w') as f:\n\t\t\tdata = open('{}/{}'.format(texts, thefile), 'r')\n\t\t\timg = Image.open('{}/{}.jpg'.format(images, filename))\n\t\t\tW, H = img.size\n\t\t\tf.write('\\n')\n\t\t\tf.write('\\t{}.jpg\\n'.format(filename))\n\t\t\tf.write('\\t{}\\n'.format(source))\n\t\t\tf.write('\\t../dataset/Train/{}.jpg'.format(filename))\n\t\t\tf.write('\\t\\n')\n\t\t\tf.write('\\t\\t{}\\n'.format(W))\n\t\t\tf.write('\\t\\t{}\\n'.format(H))\n\t\t\tf.write('\\t\\t3\\n')\n\t\t\tf.write('\\t\\n')\n\t\t\tf.write('\\t{}\\n'.format(next(data).strip()))\n\t\t\tfor line in data:\n\t\t\t\tline = line.split()\n\t\t\t\txmin = line[0]\n\t\t\t\tymin = line[1]\n\t\t\t\txmax = line[2]\n\t\t\t\tymax = line[3]\n\t\t\t\tlabel = line[4]\n\t\t\t\tf.write('\\t\\n')\n\t\t\t\tf.write('\\t\\t{}\\n'.format(label))\n\t\t\t\tf.write('\\t\\t\\n')\n\t\t\t\tf.write('\\t\\t\\t{}\\n'.format(xmin))\n\t\t\t\tf.write('\\t\\t\\t{}\\n'.format(ymin))\n\t\t\t\tf.write('\\t\\t\\t{}\\n'.format(xmax))\n\t\t\t\tf.write('\\t\\t\\t{}\\n'.format(ymax))\n\t\t\t\tf.write('\\t\\t\\n')\n\t\t\t\tf.write('\\t\\n')\n\t\t\tf.write('')\n\t\tprint('[+] Generated file: {}.xml'.format(filename))\n\ndef txt_xml(txt_dir, img_dir):\n\t''' Translate and move .xml files to relevent directory '''\n\ttranslate(txt_dir, img_dir)\n\tos.makedirs('./dataset/Annotations', exist_ok=True)\n\tprint('\\n[+] Generated Annotations directory')\n\tos.system('mv ./dataset/BBox_Annotations/*.xml ./dataset/Annotations')\n\tprint('\\n[+] Moved files')\n\tprint('-----------------------')\n\tprint('[+] Done')\n\ndef box(text, image):\n\t''' Get box value from .txt file '''\n\timg = Image.open(image)\n\ttext = open(text, 'r')\n\tnext(text)\n\tfor line in text:\n\t\tline = line.split()\n\t\tL = int(line[0])\n\t\tU = int(line[1])\n\t\tR = int(line[2])\n\t\tD = int(line[3])\n\t\tbox = [L, U, R, D]\n\t\tdraw = ImageDraw.Draw(img)\n\t\tdraw.rectangle(box, outline='red')\n\tnewfilename = image.split('.')[1].split('/')[-1]\n\timg.save('./{}_out.jpg'.format(newfilename), 'JPEG')\n\tprint('[+] Saved file ... {}'.format(newfilename))\n\ndef check_dir():\n\t'''\n\tCheck to see if the dataset directory exist\n\tand move files to the relevent directory\n\t'''\n\tcount = 0\n\tfor Afile in os.listdir('./dataset/Train'):\n\t\tAfile = Afile.split('.')[0]\n\t\tfile_img = './dataset/Train/{}.jpg'.format(Afile)\n\t\tfile_txt = './dataset/BBox_Annotations/{}.txt'.format(Afile)\n\t\tbox(file_txt, file_img)\n\t\tcount += 1\n\t\tos.system('mv ./{}_out.jpg ./dataset/Check'.format(Afile))\n\tprint('\\n[+] Total of {} files'.format(count))\n\tos.makedirs('./dataset/Check', exist_ok=True)\n\tprint('-----------------------')\n\tprint('[+] Done')\n\ndef rename(Old, New):\n\t''' Rename a label in the whole dataset '''\n\tdirectory = './dataset/BBox_Annotations'\n\tfor afile in os.listdir(directory):\n\t\tdata_in = open('{}/{}'.format(directory, afile), 'r')\n\t\tnext(data_in)\n\t\tcount = 0\n\t\tlines = []\n\t\tfor line in data_in:\n\t\t\tcount += 1\n\t\t\tline = line.split()\n\t\t\tif line[-1] == Old: line[-1] = New\n\t\t\tcomb = ' '.join(line)+'\\n'\n\t\t\tlines.append(comb)\n\t\tprint(count)\n\t\tprint(lines)\n\t\tdata_out = open(afile, 'w')\n\t\tdata_out.write('{}\\n'.format(str(count)))\n\t\tfor i in lines: data_out.write(i)\n\t\tdata_out.close()\n\ndef BOX(BBOX_line1, BBOX_line2):\n\t''' Compair two bounding boxes '''\n\tline1 = BBOX_line1\n\tline1 = line1.strip().split()\n\txmin1 =int(line1[0])\n\tymin1 =int(line1[1])\n\txmax1 =int(line1[2])\n\tymax1 =int(line1[3])\n\tlabel1= line1[4]\n\tline2 = BBOX_line2\n\tline2 = line2.strip().split()\n\txmin2 =int(line2[0])\n\tymin2 =int(line2[1])\n\txmax2 =int(line2[2])\n\tymax2 =int(line2[3])\n\tlabel2= line2[4]\n\tbb1 = {'x1':xmin1, 'x2':xmax1, 'y1':ymin1, 'y2':ymax1}\n\tbb2 = {'x1':xmin2, 'x2':xmax2, 'y1':ymin2, 'y2':ymax2}\n\tx_left = max(bb1['x1'], bb2['x1'])\n\ty_top = max(bb1['y1'], bb2['y1'])\n\tx_right = min(bb1['x2'], bb2['x2'])\n\ty_bottom = min(bb1['y2'], bb2['y2'])\n\tif x_right < x_left or y_bottom < y_top: return(False)\n\tint_area = (x_right - x_left) * (y_bottom - y_top)\n\tbb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])\n\tbb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])\n\tIOU = round((int_area / float(bb1_area + bb2_area - int_area)), 3)\n\tif IOU > 0.5 and label1 == label2:\n\t\tprint(IOU, label1, label2, 'True')\n\t\treturn(True)\n\telse: return(False)\n\ndef convert(directory):\n\t''' Converts Bash terminal output to .txt file for Cell auto detection '''\n\tItems = []\n\ttemp = None\n\twith open(directory, 'r') as f:\n\t\tcount = 0\n\t\tfor line in f:\n\t\t\tline = line.strip().split()\n\t\t\tif line == []: pass\n\t\t\telif directory in line[0].split('/'):\n\t\t\t\tItems.append(temp)\n\t\t\t\ttemp = []\n\t\t\t\tname = line[0].split('/')[-1]\n\t\t\t\tcount = 0\n\t\t\t\ttemp.append(name.split('.')[0])\n\t\t\telse:\n\t\t\t\tcoord = line[:4]\n\t\t\t\tcoord.append(directory)\n\t\t\t\tcoord = ' '.join(coord)\n\t\t\t\tcount += 1\n\t\t\t\ttemp.append('{}\\n'.format(coord))\n\t\tItems.append(temp)\n\tItems = Items[1:]\n\tfor item in Items:\n\t\tname = '{}.txt'.format(item[0])\n\t\tcoords = item[1:]\n\t\tcoords = ''.join(coords)\n\t\tcount = '{}\\n'.format(len(item)-1)\n\t\twith open(name, 'w') as F:\n\t\t\tF.write(count)\n\t\t\tF.write(coords)\n\t\tprint('Completed {}'.format(name))\n\t\ndef eval(dir_test, dir_pred):\n\t'''\n\tEvaluates the Test set annotations against the network's\n\tpredictions\n\t'''\n\tfor fT, fP in zip(os.listdir(dir_test), os.listdir(dir_pred)):\n\t\tFileT = open('{}/{}'.format(dir_test, fT), 'r')\n\t\tnext(FileT)\n\t\tFileP = open('{}/{}'.format(dir_pred, fP), 'r')\n\t\tfor lineT in FileT:\n\t\t\tFileP.seek(0)\n\t\t\tnext(FileP)\n\t\t\tfor lineP in FileP:\n\t\t\t\tT = lineT.strip()\n\t\t\t\tP = lineP.strip()\n\t\t\t\tBOX(lineT, lineP)\n\ndef augment(input_path='./dataset/Train',\n\t\t\toutput_path='./dataset/Augmented',\n\t\t\tcount=10):\n\t''' Augments images and saves them into a new directory '''\n\tos.makedirs('./dataset/Augmented', exist_ok=True)\n\tfor Image in os.listdir(input_path):\n\t\tgen = ImageDataGenerator(\tfeaturewise_center=True,\n\t\t\t\t\t\t\t\t\tsamplewise_center=True,\n\t\t\t\t\t\t\t\t\tfeaturewise_std_normalization=False,\n\t\t\t\t\t\t\t\t\tsamplewise_std_normalization=False,\n\t\t\t\t\t\t\t\t\tzca_whitening=True,\n\t\t\t\t\t\t\t\t\tzca_epsilon=1e-06,\n\t\t\t\t\t\t\t\t\trotation_range=10,\n\t\t\t\t\t\t\t\t\twidth_shift_range=30,\n\t\t\t\t\t\t\t\t\theight_shift_range=30,\n\t\t\t\t\t\t\t\t\tbrightness_range=None,\n\t\t\t\t\t\t\t\t\tshear_range=0.0,\n\t\t\t\t\t\t\t\t\tzoom_range=0.0,\n\t\t\t\t\t\t\t\t\tchannel_shift_range=0.0,\n\t\t\t\t\t\t\t\t\tfill_mode='nearest',\n\t\t\t\t\t\t\t\t\tcval=0.0,\n\t\t\t\t\t\t\t\t\thorizontal_flip=True,\n\t\t\t\t\t\t\t\t\tvertical_flip=True,\n\t\t\t\t\t\t\t\t\trescale=None,\n\t\t\t\t\t\t\t\t\tpreprocessing_function=None,\n\t\t\t\t\t\t\t\t\tdata_format='channels_last',\n\t\t\t\t\t\t\t\t\tvalidation_split=0.0,\n\t\t\t\t\t\t\t\t\t#interpolation_order=1,\n\t\t\t\t\t\t\t\t\tdtype='float32')\n\t\timg = load_img('{}/{}'.format(input_path, Image))\n\t\tname = Image.split('.')[0]\n\t\tsize = img.size\n\t\timage = img_to_array(img)\n\t\timage = image.reshape(1, size[0], size[1], 3)\n\t\timage = image.astype('float32')\n\t\tgen.fit(image)\n\t\timages_flow = gen.flow(image, batch_size=1)\n\t\tfor i, new_images in enumerate(images_flow):\n\t\t\tnew_image = array_to_img(new_images[0], scale=True)\n\t\t\toutput = '{}/Aug_{}-{}.jpg'.format(output_path, name, i+1)\n\t\t\tprint(output)\n\t\t\tnew_image.save(output)\n\t\t\tif i >= count-1: break\n\ndef CNN(CNN='VGG16', choice='predict', prediction='./dataset/Test/image.jpg'):\n\t''' Train images using one of several CNNs '''\n\tTrain = './dataset/Train'\n\tTests = './dataset/Test'\n\tshape = (224, 224)\n\tepochs = 30\n\tbatches = 16\n\tclasses = []\n\tfor c in os.listdir(Train): classes.append(c)\n\tIDG = keras.preprocessing.image.ImageDataGenerator(validation_split=0.2)\n\ttrain = IDG.flow_from_directory(Train, target_size=shape, color_mode='rgb',\n\t classes=classes, batch_size=batches, shuffle=True, subset='training')\n\ttests = IDG.flow_from_directory(Tests, target_size=shape, color_mode='rgb',\n\t classes=classes, batch_size=batches, shuffle=True)\n\tvalid = IDG.flow_from_directory(Train, target_size=shape, color_mode='rgb',\n\t classes=classes, batch_size=batches, shuffle=True, subset='validation')\n\tinput_shape = train.image_shape\n\tif CNN == 'VGG16' or 'vgg16':\n\t\tmodel = VGG16(weights=None, input_shape=input_shape,\n\t\t\tclasses=len(classes))\n\telif CNN == 'VGG19' or 'vgg19':\n\t\tmodel = VGG19(weights=None, input_shape=input_shape,\n\t\t\tclasses=len(classes))\n\telif CNN == 'ResNet50' or 'resnet50':\n\t\tmodel = ResNet50(weights=None, input_shape=input_shape,\n\t\t\tclasses=len(classes))\n\telif CNN == 'DenseNet201' or 'densenet201':\n\t\tmodel = DenseNet201(weights=None, input_shape=input_shape,\n\t\t\tclasses=len(classes))\n\tmodel.compile(optimizer=keras.optimizers.SGD(\n\t\tlr=1e-3,\n\t\tdecay=1e-6,\n\t\tmomentum=0.9,\n\t\tnesterov=True),\n\t\tloss='categorical_crossentropy',\n\t\tmetrics=['accuracy'])\n\tEsteps = int(train.samples/train.next()[0].shape[0])\n\tVsteps = int(valid.samples/valid.next()[0].shape[0])\n\tif choice == 'train':\n\t\thistory= model.fit_generator(train,\n\t\t\tsteps_per_epoch=Esteps,\n\t\t\tepochs=epochs,\n\t\t\tvalidation_data=valid,\n\t\t\tvalidation_steps=Vsteps,\n\t\t\tverbose=1)\n\t\tplt.plot(history.history['loss'])\n\t\tplt.plot(history.history['val_loss'])\n\t\tplt.title('Model Loss')\n\t\tplt.ylabel('Loss')\n\t\tplt.xlabel('Epoch')\n\t\tplt.legend(['Train', 'Validation'], loc='upper left')\n\t\tplt.show()\n\t\tplt.plot(history.history['acc'])\n\t\tplt.plot(history.history['val_acc'])\n\t\tplt.title('Model Accuracy')\n\t\tplt.ylabel('Accuracy')\n\t\tplt.xlabel('Epoch')\n\t\tplt.legend(['Train', 'Validation'], loc='upper left')\n\t\tplt.show()\n\t\tY_pred = model.predict_generator(tests, verbose=1)\n\t\ty_pred = np.argmax(Y_pred, axis=1)\n\t\tmatrix = confusion_matrix(tests.classes, y_pred)\n\t\tdf_cm = pd.DataFrame(matrix, index=classes, columns=classes)\n\t\tplt.figure(figsize=(10,7))\n\t\tsn.heatmap(df_cm, annot=True)\n\t\tprint(classification_report(tests.classes,y_pred,target_names=classes))\n\t\tmodel.save_weights('weights.h5')\n\telif choice == 'predict':\n\t\tmodel.load_weights('./weights.h5')\n\t\timg = image.load_img(prediction, target_size=shape)\n\t\tim = image.img_to_array(img)\n\t\tim = np.expand_dims(im, axis=0)\n\t\tif CNN == 'VGG16' or 'vgg16':\n\t\t\tim = keras.applications.vgg16.preprocess_input(im)\n\t\t\tprediction = model.predict(im)\n\t\t\tprint(prediction)\n\t\telif CNN == 'VGG19' or 'vgg19':\n\t\t\tim = keras.applications.vgg19.preprocess_input(im)\n\t\t\tprediction = model.predict(im)\n\t\t\tprint(prediction)\n\t\telif CNN == 'ResNet50' or 'resnet50':\n\t\t\tim = keras.applications.resnet50.preprocess_input(im)\n\t\t\tprediction = model.predict(im)\n\t\t\tprint(prediction)\n\t\t\tprint(keras.applications.resnet50.decode_predictions(prediction))\n\t\telif CNN == 'DenseNet201' or 'densenet201':\n\t\t\tim = keras.applications.densenet201.preprocess_input(im)\n\t\t\tprediction = model.predict(im)\n\t\t\tprint(prediction)\n\t\t\tprint(keras.applications.densenet201.decode_predictions(prediction))\n\ndef main():\n\tif args.bbox:\n\t\tP1 = 'Enter label and press enter to enter a new label.\\n'\n\t\tP2 = 'Type `end` to end label entry and continue to annotation.'\n\t\tprint(P1+P2)\n\t\tprint('-----')\n\t\tLABELS = []\n\t\twhile True:\n\t\t\tL = input('Input Label>')\n\t\t\tif L == 'end' or L == 'End' or L == 'END': break\n\t\t\telse: LABELS.append(L)\n\t\tprint('Labels are:', LABELS)\n\t\troot = Tk()\n\t\ttool = LabelTool(root, LABELS)\n\t\troot.resizable(width=True, height=True)\n\t\troot.mainloop()\n\telif args.translate:txt_xml('./dataset/BBox_Annotations', './dataset/Train')\n\telif args.check: check_dir()\n\telif args.rename: rename(sys.argv[2], sys.argv[3])\n\telif args.eval: eval('Valid', 'Predictions')\n\telif args.augment: augment()\n\telif args.cnn_train: CNN(choice='predict', CNN=sys.argv[2])\n\telif args.cnn_predict: CNN(CNN=sys.argv[2], prediction=sys.argv[3])\n\telif args.convert: convert(sys.argv[2])\n\nif __name__ == '__main__': main()\n","sub_path":"ProtiClass.py","file_name":"ProtiClass.py","file_ext":"py","file_size_in_byte":25442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"586744759","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 25 00:42:38 2017\r\n\r\n@author: Krishna Govind\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport math as m\r\nimport numpy as np\r\nfrom datetime import datetime\r\nfrom scipy.stats import norm as n\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n#Function to calculate the Option price using BSM\r\ndef BSMOption(S,K,t,r,sigma,type):\r\n d1 = (m.log(S/K)+(r+(sigma**2/2))*t)/(sigma*m.sqrt(t))\r\n d2=d1-sigma*m.sqrt(t)\r\n \r\n if (type=='c'): \r\n C = S*n.cdf(d1)-(K*m.exp(-r*t)*n.cdf(d2))\r\n return C\r\n else: \r\n P = K*m.exp(-r*t)*n.cdf(-d2)-S*n.cdf(-d1)\r\n return P\r\n\r\n#Calculating the implied Volatility using the Bisection Method\r\ndef impVol(S,K,r,t,type,MP):\r\n \r\n x0 = 0.09\r\n xx = 0.01\r\n tolerance = 10**(-7)\r\n epsilon = 10**(-14)\r\n \r\n maxIterations = 200\r\n SolutionFound = False\r\n \r\n #Anonymous function to calculate the Implied volatility using the Newton Method\r\n f = lambda s:BSMOption(S,K,t,r,s,type)-MP \r\n \r\n for i in range(1,maxIterations+1):\r\n y = f(x0)\r\n yprime = (f(x0)-f(xx))/(x0-xx) \r\n \r\n if (abs(yprime) FHIRAbstractModel:\n try:\n klass = get_fhir_model_class(element_type)\n except KeyError:\n raise LookupError(\n f\"'{element_type}' is not valid FHIRModel (element type) name!\"\n )\n if isinstance(data, (str, bytes)):\n return klass.parse_raw(data, content_type=\"application/json\")\n elif isinstance(data, Path):\n return klass.parse_file(data)\n return klass.parse_obj(data)\n\n\n__all__ = [\"get_fhir_model_class\", \"construct_fhir_element\"]\n","sub_path":"fhir/resources/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"437639168","text":"'''\n@author: fan\n\nspecific *point* invoke structure, copying largely from b_FC\n'''\n\nimport numpy as np\n\nimport parameters.loop_param_combo_list.loops_gen as paramloops\n\n\ndef get_combo_list(combo_type=['e', '20200901'], compesti_specs=None):\n \"\"\"\n For ITG integration problems, do not specify each element to be integrated over as\n as param_combo in param_list. These will be auto-generated for each param_combo\n that is contained in the param_list by\n `solusteady.simu_integrate_loop.gen_integrate_param_list`.\n\n For GE problems, do not specify each interest rate to be looped over during\n bisection as param_combo in param_list. These will be auto-generated for each\n param_combo that is contained in the param_list by\n `soluequi/param_loop_r_loop.py:331` inside function\n `soluequi.param_loop_r_loop.demand_supply_interest`.\n\n For GE + ITG, first proceeds as GE in description above, and then it will detect\n ITG and use the ITG step described above. So GE + ITG solution at a particular\n combination of parameters is one element in the param_list. In another word,\n the GE and ITG related parameters do not need to be specified as varying elements of\n the param_list loop. However, they could be specified as such, if they are,\n should not add ITG substring to combo_type[1] string. GE is specified as a\n parameter for the run function directly, `invoke\n\n Parameters\n ----------\n compesti_specs : dict\n see `parameters.combo.gen_compesti_spec` for example for `compesti_specs`.\n \"\"\"\n\n module = combo_type[0]\n sub_type = combo_type[1]\n\n if \"20181025\" in sub_type:\n \"\"\"\n 20180801 re-testing model, borrowing and savings\n\n - for integration, needs dist_type, otherwise even add _ITG_ does not integrate\n - need to specify minmax_type, even when no grid, otherwise graph for parameters does not work.\n\n \"\"\"\n int_rate_counts = 1\n min_int = 1.05\n max_int = 1.05\n A = 0.25\n std = 0.75\n interpolant_type = ['a', 11, {'maxinter': 15}]\n\n combo_list = \\\n [{'param_update_dict': {'grid_type': ['a', 20200801,\n {'std_eps': std, 'std_eps_E': std}],\n 'esti_type': ['a', 20180512,\n {'R_INFORM_SAVE': cur_rate,\n 'R_INFORM_BORR': cur_rate}],\n 'data_type': ['b', 20180512,\n {'A': A - ((std ** 2) / 2), 'Region': 0,\n 'Year': 0}],\n 'model_type': ['a', 1],\n 'dist_type': ['a', 20200801, {'epsA_frac_A': 0.15}],\n 'minmax_type': ['a', 20180801],\n 'interpolant_type': interpolant_type,\n 'support_arg': {}},\n 'title': 'Borrow Save Testing ' + str(int(cur_rate * 100)) + ', A=' + str(\n A) + ',S=' + str(std) + ')',\n 'combo_desc': 'Borrow Save Testing' + str(int(cur_rate * 100)),\n 'file_save_suffix': '_i15r' + str(int(cur_rate * 100)) + 'A' + str(\n int(A * 100)) + 's' + str(\n int(std * 100))}\n for cur_rate in np.linspace(min_int, max_int, num=int_rate_counts)]\n\n main_type_str_list = ['20201025']\n if any([main_type_str in sub_type for main_type_str in main_type_str_list]):\n\n # A. Invocation precision, changing grid tyep and interpolant types\n if any([main_type_str + 'x' in sub_type for main_type_str in main_type_str_list]):\n st_common_subtype = '20201025x'\n elif any([main_type_str + 'd' in sub_type for main_type_str in\n main_type_str_list]):\n st_common_subtype = '20201025d'\n else:\n st_common_subtype = '20201025'\n\n # B. Integrate or not\n if \"_ITG_\" in sub_type:\n dist_t = st_common_subtype\n else:\n dist_t = None\n\n # C. Model type\n if \"_1j7\" in sub_type:\n model_t = '20181011'\n elif \"_1ja7\" in sub_type:\n # 1j7 does not work, 1ja7 approximates 1j7 by making fixed cost very high\n # for saving option\n model_t = '20181013j16'\n elif \"_2j127\" in sub_type:\n model_t = '20181013j016'\n elif \"_5j12347\" in sub_type:\n model_t = '20180613'\n elif \"_7jAll\" in sub_type:\n model_t = '20180701'\n else:\n raise Exception('bad _j12347 etc not in sub_type')\n\n # D. Generate list of param combos and create combo_list\n if len(combo_type) >= 3 and combo_type[2] is not None:\n combo_list = paramloops.combo_list_auto(\n combo_type=combo_type,\n compesti_specs=compesti_specs,\n minmax_f='a', minmax_t=minmax_t,\n data_f='a', data_t='20180607',\n esti_f='a', esti_t='20180815',\n model_f='a', model_t=model_t,\n grid_f='a', grid_t=st_common_subtype,\n interpolant_f='a', interpolant_t=st_common_subtype,\n dist_f='a', dist_t=dist_t)\n else:\n # support_arg below will be filled out by other functions with string other\n # calibration and estimation information.\n combo_list = \\\n [{'param_update_dict': {'model_type': ['a', model_t],\n 'grid_type': ['a', st_common_subtype],\n 'esti_type': ['a', st_common_subtype],\n 'data_type': ['b', st_common_subtype],\n 'dist_type': ['a', dist_t],\n 'interpolant_type': ['a', st_common_subtype],\n 'support_arg': {}},\n 'title': '20201025 Single Simu',\n 'combo_desc': 'Base Combo 20201025',\n 'file_save_suffix': ''}]\n\n return combo_list\n","sub_path":"prjforinfcreditvilfw/parameters/paramset/combo_list_e_test_point.py","file_name":"combo_list_e_test_point.py","file_ext":"py","file_size_in_byte":6260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"577970887","text":"from scrapy import Spider\nfrom scrapy import Request\nfrom scrapy.selector import Selector\nfrom dbmeizi.items import DbmeiziItem\nfrom scrapy import log\n\nclass dbmeiziSpider(Spider):\n\tname = \"dbmeiziSpider\"\n\tallowed_domains = [\"dbmeinv.com\"]\n\tstart_urls = [\"http://www.dbmeinv.com\"]\n\t# for i in xrange(1201, 1202):\n\t# \tstr = \"http://www.dbmeinv.com/?pager_offset=%d\" % i\n\t\t# start_urls.append(str)\n\tdef __init__(self):\n\t\tlogfile = open(self.name + '.log', 'w')\n\t\tlog_observer = log.ScrapyFileLogObserver(logfile, level=log.DEBUG)\n\t\tlog_observer.start()\n\n\tdef parse(self, response):\n\t\tself.log('parsing ' + response.url, level = log.INFO)\n\n\t\tResults = Selector(response).xpath('//li[@class=\"span3\"]')\n\t\tfor div in Results:\n\t\t\titem = DbmeiziItem()\n\t\t\t#img_class \n\t\t\timg_class = div.xpath('.//div[@class=\"img_single\"]')\n\t\t\thref = img_class.xpath('.//a')[0]\n\t\t\timg = img_class.xpath('.//img')[0]\n\t\t\titem['title'] = img.xpath('@title').extract()[0]\n\t\t\titem['topiclink'] = href.xpath('@href').extract()[0]\n\t\t\titem['imgsrc'] = img.xpath('@src').extract()[0]\n\t\t\t#starcount\n\t\t\tbottom_class = div.xpath('.//div[@class=\"bottombar\"]')\n\t\t\tstarcount = bottom_class.xpath('.//span[@class=\"starcount\"]/text()').extract()[0]\n\t\t\titem['starcount'] = int(starcount)\n\t\t\tyield item\n\n\t\tfor url in response.xpath('//a/@href').extract():\n\t\t\tif url.startswith('http://'):\n\t\t\t\tself.log('Find url:' + url, log.DEBUG)\n\t\t\telse:\n\t\t\t\tself.log('Find url:' + response.url+url, log.DEBUG)\n\t\t\t# yield Request(url, callback=self.parse)","sub_path":"Spider/scrapy-01/dbmeizi/dbmeizi/spiders/dbmeizi_spider.py","file_name":"dbmeizi_spider.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"493270314","text":"from PIL import Image\n\nsource = 'wire.png'\nimage = Image.open(source)\ndata = image.getdata()\n\nn = 100\n\nsteps = [n, n - 1, n - 1, n - 2]\n\nspiral = [0] * (n * n)\n\ndirection = 0\ncurrent = steps[direction]\n\nx = -1\ny = 0\n\nfor i in range(n * n):\n\tif current == 0:\n\t\tsteps[direction] -= 2\n\t\tdirection = (direction + 1) % 4\n\t\tcurrent = steps[direction]\n\n\tif direction == 0:\n\t\tx += 1\n\telif direction == 1:\n\t\ty += 1\n\telif direction == 2:\n\t\tx -= 1\n\telif direction == 3:\n\t\ty -= 1\n\n\tspiral[y * n + x] = data[i]\n\tcurrent -= 1\n\n\nanswer = Image.new('RGB', (n, n))\nanswer.putdata(spiral)\nanswer.save('answer.png', 'PNG')","sub_path":"ctf/the-python-challenge/level-14/level-14.py","file_name":"level-14.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"155401815","text":"import mysql.connector as ms\nimport datetime\nimport mm as mm\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport matplotlib\nmatplotlib.use('Agg')\n\n\ndef getStockData(stock_name):\n cnx = ms.connect(user='root', password='mypassword', host='mydb.cwtgu3tqnwx8.us-east-2.rds.amazonaws.com',\n database='mydb')\n mycursor = cnx.cursor()\n\n query = \"SELECT datetime, open, high, low, close FROM day_price WHERE stock_code = '\" + stock_name + \"' ORDER BY datetime DESC\"\n mycursor.execute(query)\n result = mycursor.fetchall()\n\n time_stamps = [i[0] for i in result]\n open_prices = [i[1] for i in result]\n close_prices = [i[4] for i in result]\n high_prices = [i[2] for i in result]\n low_prices = [i[3] for i in result]\n\n cnx.close()\n return (time_stamps, high_prices, low_prices, open_prices, close_prices)\n\n\ndef graph_data(time_stamps, stock_data, stock_name):\n prediction = stock_data[-1]\n print(prediction)\n\n ts = []\n latest = []\n ticks = range(101)\n for i in range(101):\n latest.append(stock_data[-1 * (102 - i)])\n ts.append(time_stamps[101 - (i + 1)].strftime(\"%b-%d %I:%M%p\"))\n\n print(\"length: \", len(latest))\n np_stock_data = np.array(latest)\n plt.plot(ticks, np_stock_data, 'b', linewidth=2)\n print(latest[-1])\n if prediction >= latest[-1]:\n plt.plot(102, prediction, 'g+', linestyle='dashed')\n else:\n plt.plot(102, prediction, 'rx', linestyle='dashed')\n\n plt.xticks(ticks[::20], ts[::20], rotation=45)\n plt.grid(color='k', linestyle='--', linewidth=1, axis='x')\n plt.xlabel('Date')\n plt.ylabel('Price')\n plt.title(stock_name)\n plt.tight_layout()\n plt.savefig('static/assets/img/a.png')\n plt.clf()\n return\n\n\ndef getPrediction(stock_name):\n stock_data = getStockData(stock_name)\n\n if not stock_data[0]:\n print(\"Sorry, the stock you requested is not in our database.\")\n print(\"Please enter another stock.\")\n return []\n roc = mm.getRateOfChange(stock_data) # array\n if not roc:\n roc = [0];\n stoch_os = mm.getStochasticOscillator(stock_data) # array\n asi = mm.getASI(stock_data) # array\n curPrice = mm.getCurPrice(stock_data[4])\n\n data = []\n for i in stock_data[4]:\n data = [i] + data\n\n arima_prediction = mm.getARIMA(data) # double\n fourier_prediction = mm.getFourier(data)\n\n prediction = mm.aggregatePrediction(roc, stoch_os, asi, curPrice, arima_prediction, fourier_prediction)\n\n data.append(prediction[0])\n graph_data(stock_data[0], data, stock_name)\n\n return [roc[0], stoch_os[0], asi[0], curPrice, arima_prediction, fourier_prediction[0], prediction[0], prediction[1], stock_name]\ndef getPrediction2(stock_name):\n stock_data = getStockData(stock_name)\n\n if not stock_data[0]:\n print(\"Sorry, the stock you requested is not in our database.\")\n print(\"Please enter another stock.\")\n return []\n roc = mm.getRateOfChange(stock_data) # array\n if not roc:\n roc = [0];\n stoch_os = mm.getStochasticOscillator(stock_data) # array\n asi = mm.getASI(stock_data) # array\n curPrice = mm.getCurPrice(stock_data[4])\n\n data = []\n for i in stock_data[4]:\n data = [i] + data\n\n arima_prediction = mm.getARIMA(data) # double\n fourier_prediction = mm.getFourier(data)\n\n prediction = mm.aggregatePrediction(roc, stoch_os, asi, curPrice, arima_prediction, fourier_prediction)\n\n data.append(prediction[0])\n #graph_data(stock_data[0], data, stock_name)\n\n return [roc[0], stoch_os[0], asi[0], curPrice, arima_prediction, fourier_prediction[0], prediction[0], prediction[1], stock_name]","sub_path":"DEMO2/webbb/techcontroller.py","file_name":"techcontroller.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"478103021","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom nose.tools import *\nimport os\nimport sys\nimport time\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nfrom pastee import datastore\nfrom pastee import paste\nfrom pastee import scrubber\n\n\nclass Test_Scrubber:\n '''Test for scrubber.Scrubber.\n\n This performs live tests. See the datastore tests for more information.\n '''\n def setup(self):\n self._ds = datastore.Datastore()\n self._ds.prefix_is('pastee:test')\n self._scrubber = scrubber.Scrubber(self._ds)\n\n def teardown(self):\n '''Clean up.'''\n keys = self._ds.keys() # only keys starting with the testing prefix\n for key in keys:\n self._ds.delete(key)\n\n def test_scrub(self):\n '''Scrub many pastes'''\n ttl = 3600\n content = u'This is the content'\n ip_address = '4.2.2.2'\n lexer_alias = 'py'\n\n # Create an unexpired paste.\n active = paste.Paste(self._ds)\n active.ttl_is(ttl)\n active.content_is(content)\n active.ip_address_is(ip_address)\n active.lexer_alias_is(lexer_alias)\n active.save_state_is(paste.Paste.SaveStates.CLEAN)\n\n # Create two expired pastes.\n expired_a = paste.Paste(self._ds)\n expired_a.ttl_is(0)\n expired_a.content_is(content)\n expired_a.ip_address_is(ip_address)\n expired_a.lexer_alias_is(lexer_alias)\n expired_a.save_state_is(paste.Paste.SaveStates.CLEAN)\n\n expired_b = paste.Paste(self._ds)\n expired_b.ttl_is(0)\n expired_b.content_is(content)\n expired_b.ip_address_is(ip_address)\n expired_b.lexer_alias_is(lexer_alias)\n expired_b.save_state_is(paste.Paste.SaveStates.CLEAN)\n\n # Before scrubbing, all pastes are considered unexpired.\n expected_ids = (active.id(), expired_a.id(), expired_b.id())\n actual_ids = self._scrubber.active_paste_ids()\n assert_equal(sorted(actual_ids), sorted(actual_ids))\n\n # Try scrubbing all pastes.\n all_ids = expected_ids\n for id in all_ids:\n pst = paste.Paste(self._ds, id=id)\n self._scrubber.scrub(pst)\n\n # Only the expired pastes should have been scrubbed. Further, all sensitive\n # information should have been removed.\n reloaded_a = paste.Paste(self._ds, id=expired_a.id())\n assert_equal(reloaded_a.ttl(), 0)\n assert_equal(reloaded_a.content(), None)\n assert_equal(reloaded_a.ip_address(), None)\n assert_equal(reloaded_a.lexer_alias(), lexer_alias)\n\n reloaded_b = paste.Paste(self._ds, id=expired_b.id())\n assert_equal(reloaded_b.ttl(), 0)\n assert_equal(reloaded_b.content(), None)\n assert_equal(reloaded_b.ip_address(), None)\n assert_equal(reloaded_b.lexer_alias(), lexer_alias)\n\n # The active paste should have been untouched.\n reloaded_active = paste.Paste(self._ds, id=active.id())\n assert_equal(reloaded_active.ttl(), ttl)\n assert_equal(reloaded_active.content(), content)\n assert_equal(reloaded_active.ip_address(), ip_address)\n assert_equal(reloaded_active.lexer_alias(), lexer_alias)\n\n # After scrubbing, only the active paste should remain 'unexpired'.\n expected_ids = (active.id(),)\n actual_ids = self._scrubber.active_paste_ids()\n assert_equal(sorted(actual_ids), sorted(actual_ids))\n","sub_path":"backend/tests/scrubber_test.py","file_name":"scrubber_test.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"229908199","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\n\nimport cv2\nimport numpy as np\n\n\ndef python_color2sepia(imagefile, outfile=None):\n \"\"\"\n Sepia image filter.\n\n Turn a colorful image of choice into a nostalgic sepia image with a pure\n Python implementation. The new image can be saved to a specified location.\n By default the new image is saved in the same destination as the original\n with the transformation added to the filename.\n\n Arguments\n ---------\n imagefile : str\n Image filename (with path included) of image with shape (H, W, c) to\n transform\n outfile : str, optional, default None\n Image filename (with path included) if transformed image with shape\n (H, W, c) should be saved to a particular location\n \"\"\"\n bgr_image = cv2.imread(imagefile)\n H, W = bgr_image.shape[:2]\n sepia_image = np.empty_like(bgr_image)\n for i in range(H):\n for j in range(W):\n B = bgr_image[i, j, 0] * 0.131 + \\\n bgr_image[i, j, 1] * 0.534 + bgr_image[i, j, 2] * 0.272\n G = bgr_image[i, j, 0] * 0.168 + \\\n bgr_image[i, j, 1] * 0.686 + bgr_image[i, j, 2] * 0.349\n R = bgr_image[i, j, 0] * 0.189 + \\\n bgr_image[i, j, 1] * 0.769 + bgr_image[i, j, 2] * 0.393\n if B > 255:\n sepia_image[i, j, 0] = 255\n else:\n sepia_image[i, j, 0] = B\n if G > 255:\n sepia_image[i, j, 1] = 255\n else:\n sepia_image[i, j, 1] = G\n if R > 255:\n sepia_image[i, j, 2] = 255\n else:\n sepia_image[i, j, 2] = R\n\n sepia_image = sepia_image.astype(\"uint8\")\n\n if outfile is None:\n filename, file_extension = os.path.splitext(imagefile)\n cv2.imwrite(filename + \"_sepia\" + file_extension, sepia_image)\n else:\n cv2.imwrite(outfile, sepia_image)\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.path.insert(0, '../profiling/')\n from manual_timing_ import timer_results\n\n # Visual verification of implementation\n python_color2sepia(\"../images/rain.jpg\")\n\n # Profile time\n imagefile = \"../images/laperm_kitten.jpg\" # shape: (1537, 2305, 3)\n n_experiments = 5\n funcs = [python_color2sepia]\n args = [imagefile]\n\n timer_results(\n n_experiments, \"./reports/python_report_color2sepia.txt\", funcs, args)\n","sub_path":"assignment4/sepia_filter/python_color2sepia.py","file_name":"python_color2sepia.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"470398955","text":"# Autor: Ricardo Medeiros da Costa Junior\n# Titulo: Exercicio 2\n# Data: 22/03/2016\n# Objetivo: Dados 4 notas, calcular a media aritmetica\n# Entrada: nota1, nota2, nota3, nota4 (numeros reais)\n# Saida: A media aritmetica\n# Obs.: Verificar se os quatro numeros sao numeros reais e depois mostrar a media aritmetica deles\n\ndef obter_entrada(qtd_notas):\n notas = []\n for i in range(qtd_notas):\n nota = float(input(\"Digite a \" + str(i+1) + \" nota: \"))\n if (nota < 0):\n raise ValueError\n notas.append(nota)\n return notas\n\ndef calcular(notas):\n somatorio = 0.0\n for nota in notas:\n somatorio += nota\n return (somatorio / len(notas))\n\ndef main():\n try:\n print(\"A media aritmetica e:\", calcular(obter_entrada(qtd_notas=4)))\n \n except ValueError:\n print(\"Numero invalido. Programa sera reiniciado. POR FAVOR! \\n\"\n \"Informe um real positivo.\")\n main()\n\nmain()\n","sub_path":"bancos_dados_para_biologia/t3/python/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"19470004","text":"thatlist = []\nbca = open('bca_questions.txt', 'r')\nmeme = open('meme_questions.txt', 'r')\nnumber = open('number_questions.txt', 'r')\npython = open('python_questions.txt', 'r')\nrandom = open('random_questions.txt', 'r')\nfilelist = [bca, meme, number, python, random]\ndbca = {}\ndmeme = {}\ndnumber = {}\ndpython = {}\ndrandom = {}\nthatlist = [dbca, dmeme, dnumber, dpython, drandom]\nfor file in filelist:\n for line in file:\n line = line.split(';')\n if file == bca:\n dbca[line[0]] = line[1]\n elif file == meme:\n dmeme[line[0]] = line[1]\n elif file == number:\n dnumber[line[0]] = line[1]\n elif file == python:\n dpython[line[0]] = line[1]\n else:\n drandom[line[0].strip()] = line[1].strip()\n","sub_path":"Dictionaries.py","file_name":"Dictionaries.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"640936740","text":"# -*-coding:utf-8-*-\nimport numpy as np\nimport nnvm.compiler\nimport nnvm.testing\nimport tvm\nfrom tvm.contrib import graph_runtime\n\n# 定义神经网络\nbatch_size = 1\nnum_class = 1000\nimage_shape = (3, 224, 224)\ndata_shape = (batch_size,) + image_shape\nout_shape = (batch_size, num_class)\n# net:Symbol\nnet, params = nnvm.testing.resnet.get_workload(num_layers=18, batch_size=batch_size,\n image_shape=image_shape)\n# 输出网络的定义\nprint(net.debug_str())\n\n# 编译\nopt_level = 3\ntarget = tvm.target.cuda()\n# graph:Graph lib:Module params:dict()\nwith nnvm.compiler.build_config(opt_level=opt_level):\n graph, lib, params = nnvm.compiler.build(net, target, shape={'data': data_shape},\n params=params)\n\n# 运行生成的库\nctx = tvm.gpu()\ndata = np.random.uniform(-1, 1, size=data_shape).astype(np.float32)\n# 创建一个module\n# module:GraphModule\nmodule = graph_runtime.create(graph, lib, ctx)\nmodule.set_input('data', data)\nmodule.set_input(**params)\n\nmodule.run()\n# out:tvm.NDarray\nout = module.get_output(0, tvm.nd.empty(out_shape))\nprint(out.asnumpy().flatten()[0:10])\n\n# save and load compiled module\nfrom tvm.contrib import util\n\ntemp = util.tempdir()\n\n# path_lib = temp.relpath('deploy_lib.so')\n# lib.export_library(path_lib)\n# with open(temp.relpath('deploy_graph.json'),'w')as fo:\n# fo.write(graph.json())\n# with open(temp.relpath('deploy_params.params'), 'wb') as fo:\n# fo.write(nnvm.compiler.save_param_dict(params))\n# print(temp.listdir())\n\n\nlib.export_library('./model/deploy_lib.so')\nwith open('./model/deploy_graph.json', 'w')as fo:\n fo.write(graph.json())\nwith open('./model/deploy_params.params', 'wb') as fo:\n fo.write(nnvm.compiler.save_param_dict(params))\n\nloaded_json = open('./model/deploy_graph.json').read()\nloaded_lib = tvm.module.load('./model/deploy_lib.so')\nload_params = bytearray(open('./model/deploy_params.params', 'rb').read())\ninput_data = tvm.nd.array(np.random.uniform(size=data_shape).astype(np.float32))\nmodule = graph_runtime.create(loaded_json, loaded_lib, tvm.gpu())\nmodule.load_params(load_params)\nmodule.run(data=input_data)\nout = module.get_output(0, out=tvm.nd.empty(out_shape))\nprint(out.asnumpy().flatten()[0:10])\n","sub_path":"tvm_handson/tutorial/compliling_deep_learning_model.py","file_name":"compliling_deep_learning_model.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"372000594","text":"import argparse\n\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data\nimport torch.utils.data.distributed\nfrom option import Option\nfrom pruning import ResModelPrune, SeqModelPrune\nfrom torch import nn\nfrom trainer import NetworkWiseTrainer\n\nfrom dcp.checkpoint import CheckPoint\nfrom dcp.dataloader import *\nfrom dcp.mask_conv import MaskConv2d, MaskLinear\nfrom dcp.model_builder import get_model\nfrom dcp.models.preresnet import PreBasicBlock\nfrom dcp.models.resnet import BasicBlock, Bottleneck\nfrom dcp.utils.logger import get_logger\nfrom dcp.utils.tensorboard_logger import TensorboardLogger\nfrom dcp.utils.write_log import write_log, write_settings\nfrom dcp.utils.model_analyse import ModelAnalyse\n\nfrom dcp.models.insightface_resnet import IRBlock\nfrom dcp.arcface import Arcface\n\nfrom dcp.models.mobilefacenet import Bottleneck_mobilefacenet\n\nclass Experiment(object):\n \"\"\"\n Run experiments with pre-defined pipeline\n \"\"\"\n\n def __init__(self, options=None, conf_path=None):\n self.settings = options or Option(conf_path)\n self.checkpoint = None\n self.train_loader = None\n self.val_loader = None\n self.pruned_model = None\n self.aux_fc = None\n self.network_wise_trainer = None\n self.optimizer_state = None\n\n os.environ['CUDA_VISIBLE_DEVICES'] = self.settings.gpu\n\n self.settings.set_save_path()\n write_settings(self.settings)\n self.logger = get_logger(self.settings.save_path, \"finetune\")\n self.tensorboard_logger = TensorboardLogger(self.settings.save_path)\n self.logger.info(\"|===>Result will be saved at {}\".format(self.settings.save_path))\n self.epoch = 0\n self.test_input = None\n\n self.prepare()\n\n def write_settings(self):\n \"\"\"\n Save expriment settings to a file\n \"\"\"\n\n with open(os.path.join(self.settings.save_path, \"settings.log\"), \"w\") as f:\n for k, v in self.settings.__dict__.items():\n f.write(str(k) + \": \" + str(v) + \"\\n\")\n\n def prepare(self):\n \"\"\"\n Preparing experiments\n \"\"\"\n\n self._set_gpu()\n self._set_dataloader()\n self._set_model()\n self._set_checkpoint()\n self._set_trainier()\n\n def _set_gpu(self):\n \"\"\"\n Initialize the seed of random number generator\n \"\"\"\n\n # set torch seed\n # init random seed\n torch.manual_seed(self.settings.seed)\n torch.cuda.manual_seed(self.settings.seed)\n torch.cuda.set_device(0)\n cudnn.benchmark = True\n\n def _set_dataloader(self):\n \"\"\"\n Create train loader and validation loader for channel pruning\n \"\"\"\n\n if 'cifar' in self.settings.dataset:\n self.train_loader, self.val_loader = get_cifar_dataloader(self.settings.dataset,\n self.settings.batch_size,\n self.settings.n_threads,\n self.settings.data_path,\n self.logger)\n elif self.settings.dataset == 'imagenet':\n self.train_loader, self.val_loader = get_imagenet_dataloader(self.settings.dataset,\n self.settings.batch_size,\n self.settings.n_threads,\n self.settings.data_path,\n self.logger)\n\n elif self.settings.dataset in ['ms1m_v2', 'iccv_ms1m']:\n self.train_loader, self.val_loader, class_num = get_ms1m_dataloader(self.settings.dataset,\n self.settings.batch_size,\n self.settings.n_threads,\n self.settings.data_path,\n self.logger)\n self.settings.n_classes = class_num # class number\n self.logger.info('self.settings.n_classes={:d}'.format(self.settings.n_classes))\n\n def replace_layer_with_mask_conv_resnet(self):\n \"\"\"\n Replace the conv layer in resnet with mask_conv for ResNet\n \"\"\"\n\n for module in self.pruned_model.modules():\n if isinstance(module, (PreBasicBlock, BasicBlock, Bottleneck, IRBlock)):\n # replace conv2\n temp_conv = MaskConv2d(\n in_channels=module.conv2.in_channels,\n out_channels=module.conv2.out_channels,\n kernel_size=module.conv2.kernel_size,\n stride=module.conv2.stride,\n padding=module.conv2.padding,\n bias=(module.conv2.bias is not None))\n\n temp_conv.weight.data.copy_(module.conv2.weight.data)\n if module.conv2.bias is not None:\n temp_conv.bias.data.copy_(module.conv2.bias.data)\n module.conv2 = temp_conv\n\n if isinstance(module, Bottleneck):\n # replace conv3\n temp_conv = MaskConv2d(\n in_channels=module.conv3.in_channels,\n out_channels=module.conv3.out_channels,\n kernel_size=module.conv3.kernel_size,\n stride=module.conv3.stride,\n padding=module.conv3.padding,\n bias=(module.conv3.bias is not None))\n\n temp_conv.weight.data.copy_(module.conv3.weight.data)\n if module.conv3.bias is not None:\n temp_conv.bias.data.copy_(module.conv3.bias.data)\n module.conv3 = temp_conv\n self.logger.info(\"pruned model replace to mask_conv finished!!!\")\n\n elif isinstance(module, (Bottleneck_mobilefacenet)):\n # replace conv1\n # temp_conv = MaskConv2d(\n # in_channels=module.conv1.in_channels,\n # out_channels=module.conv1.out_channels,\n # kernel_size=module.conv1.kernel_size,\n # stride=module.conv1.stride,\n # padding=module.conv1.padding,\n # bias=(module.conv1.bias is not None))\n #\n # temp_conv.weight.data.copy_(module.conv1.weight.data)\n # if module.conv1.bias is not None:\n # temp_conv.bias.data.copy_(module.conv1.bias.data)\n # module.conv1 = temp_conv\n\n # replace conv3\n temp_conv = MaskConv2d(\n in_channels=module.conv3.in_channels,\n out_channels=module.conv3.out_channels,\n kernel_size=module.conv3.kernel_size,\n stride=module.conv3.stride,\n padding=module.conv3.padding,\n bias=(module.conv3.bias is not None))\n\n temp_conv.weight.data.copy_(module.conv3.weight.data)\n if module.conv3.bias is not None:\n temp_conv.bias.data.copy_(module.conv3.bias.data)\n module.conv3 = temp_conv\n self.logger.info(\"pruned model replace to mask_conv finished!!!\")\n\n def replace_layer_with_mask_linear(self):\n for module in self.pruned_model.modules():\n # replace maskLinear\n if self.settings.net_type in [\"LResnetxE-IR\", \"mobilefacenet_v1\"] and isinstance(module, nn.Linear):\n temp_conv = MaskLinear(\n in_features=module.in_features,\n out_features=module.out_features,\n bias=(module.bias is not None))\n temp_conv.weight.data.copy_(module.weight.data)\n if module.bias is not None:\n temp_conv.bias.data.copy_(module.bias.data)\n\n if self.settings.net_type in [\"LResnetxE-IR\"]:\n self.pruned_model.fc = temp_conv\n elif self.settings.net_type in [\"mobilefacenet_v1\"]:\n self.pruned_model.linear = temp_conv\n self.logger.info(\"pruned model replace to mask_linear finished!!!\")\n\n def replace_layer_with_mask_conv_vgg(self):\n \"\"\"\n Replace the conv layer in resnet with mask_conv for VGG\n \"\"\"\n new_net = None\n for layer in self.pruned_model.features.modules():\n if isinstance(layer, nn.Conv2d):\n if new_net is None:\n new_net = nn.Sequential(layer)\n continue\n temp_conv = MaskConv2d(\n in_channels=layer.in_channels,\n out_channels=layer.out_channels,\n kernel_size=layer.kernel_size,\n stride=layer.stride,\n padding=layer.padding,\n bias=(layer.bias is not None))\n temp_conv.weight.data.copy_(layer.weight.data)\n if layer.bias is not None:\n temp_conv.bias.data.copy_(layer.bias.data)\n\n new_net.add_module(str(len(new_net)), temp_conv)\n\n elif isinstance(layer, (nn.ReLU, nn.BatchNorm2d, nn.MaxPool2d, nn.AvgPool2d)):\n if new_net is None:\n new_net = nn.Sequential(layer)\n else:\n new_net.add_module(str(len(new_net)), layer)\n # print self.model.features\n # print new_net\n self.pruned_model.features = new_net\n\n def replace_layer_with_mask_conv(self):\n \"\"\"\n Replace the conv layer in resnet with mask_conv\n \"\"\"\n\n if self.settings.net_type in [\"preresnet\", \"resnet\", \"LResnetxE-IR\", \"mobilefacenet_v1\"]:\n self.replace_layer_with_mask_conv_resnet()\n\n elif self.settings.net_type in [\"vgg\"]:\n self.replace_layer_with_mask_conv_vgg()\n\n def _set_model(self):\n \"\"\"\n Get model\n \"\"\"\n\n self.pruned_model = get_model(self.settings,\n self.settings.dataset,\n self.settings.net_type,\n self.settings.depth,\n self.settings.n_classes)\n # self.logger.info(\"before replace: {}\".format(self.pruned_model))\n\n self.replace_layer_with_mask_conv()\n # self.replace_layer_with_mask_linear()\n\n self.logger.info(\"after replace: {}\".format(self.pruned_model))\n self.aux_fc = Arcface(self.settings.embed_size, self.settings.n_classes)\n\n def _set_checkpoint(self):\n \"\"\"\n Load pre-trained model or resume checkpoint\n \"\"\"\n\n assert self.pruned_model is not None, \"please create model first\"\n\n self.checkpoint = CheckPoint(self.settings.save_path, self.logger)\n self._load_pretrained()\n self._load_resume()\n\n def _load_pretrained(self):\n \"\"\"\n Load pre-trained model\n \"\"\"\n\n if self.settings.pretrained is not None:\n check_point_params = torch.load(self.settings.pretrained)\n model_state = check_point_params[\"pruned_model\"]\n self.pruned_model = self.checkpoint.load_state(self.pruned_model, model_state)\n self.logger.info(\"load pruned_model state finished!!\")\n\n # xz codes\n aux_fc_state = check_point_params[\"aux_fc\"]\n self.aux_fc.load_state_dict(aux_fc_state[-1])\n self.logger.info(\"load aux_fc[-1] state finished!!\")\n\n self.logger.info(\"|===>load restrain file: {}\".format(self.settings.pretrained))\n\n def _load_resume(self):\n \"\"\"\n Load resume checkpoint\n \"\"\"\n # To do\n if self.settings.resume is not None:\n check_point_params = torch.load(self.settings.resume)\n\n pruned_model_state = check_point_params[\"model\"]\n aux_fc_state = check_point_params[\"aux_fc\"]\n self.optimizer_state = check_point_params[\"optimizer\"]\n self.epoch = check_point_params[\"epoch\"]+1\n val_acc = check_point_params[\"val_acc\"]\n\n self.pruned_model = self.checkpoint.load_state(self.pruned_model, pruned_model_state)\n\n self.logger.info(\"|===>load resume file: {}\".format(self.settings.resume))\n\n def _set_trainier(self):\n \"\"\"\n Initialize network-wise trainer\n \"\"\"\n self.network_wise_trainer = NetworkWiseTrainer(pruned_model=self.pruned_model,\n aux_fc = self.aux_fc,\n train_loader=self.train_loader,\n val_loader=self.val_loader,\n settings=self.settings,\n logger=self.logger,\n tensorboard_logger=self.tensorboard_logger,\n run_count=self.epoch)\n\n def pruning(self):\n \"\"\"\n Prune channels\n \"\"\"\n\n if self.settings.dataset in [\"ms1m_v2\"]:\n self.test_input = torch.randn(1, 3, 112, 112).cuda()\n\n self.logger.info(\"Before pruning:\")\n self.logger.info(self.pruned_model)\n\n self.network_wise_trainer.face_before_val(0)\n model_analyse = ModelAnalyse(self.pruned_model, self.logger, self.settings)\n params_num = model_analyse.params_count()\n zero_num = model_analyse.zero_count()\n zero_rate = zero_num * 1.0 / params_num\n self.logger.info(\"zero rate is: {}\".format(zero_rate))\n model_analyse.flops_compute(self.test_input)\n\n if self.settings.net_type in [\"preresnet\", \"resnet\", \"LResnetxE-IR\", \"mobilefacenet_v1\"]:\n model_prune = ResModelPrune(model=self.pruned_model,\n net_type=self.settings.net_type,\n depth=self.settings.depth)\n elif self.settings.net_type == \"vgg\":\n model_prune = SeqModelPrune(model=self.pruned_model, net_type=self.settings.net_type)\n else:\n assert False, \"unsupport net_type: {}\".format(self.settings.net_type)\n\n model_prune.run()\n # After channel pruning\n self.network_wise_trainer.update_model(model_prune.model, self.optimizer_state)\n\n self.logger.info(\"After pruning:\")\n self.logger.info(self.pruned_model)\n\n self.network_wise_trainer.face_before_val(0)\n model_analyse = ModelAnalyse(self.pruned_model, self.logger, self.settings)\n params_num = model_analyse.params_count(pruned=True)\n # params_num = model_analyse.params_count()\n zero_num = model_analyse.zero_count()\n zero_rate = zero_num * 1.0 / params_num\n self.logger.info(\"zero rate is: {}\".format(zero_rate))\n model_analyse.flops_compute(self.test_input, pruned=True)\n\n # xz codes\n # save dcp_model with aux_fc_state\n self.checkpoint.save_aux_model(model=self.pruned_model, aux_fc=self.aux_fc)\n self.checkpoint.save_face_model(model=self.pruned_model, best_flag=True)\n\n def fine_tuning(self):\n \"\"\"\n Conduct network-wise fine-tuning after channel selection\n \"\"\"\n\n best_top1_acc = 0\n\n start_epoch = 0\n if self.epoch != 0:\n start_epoch = self.epoch + 1\n self.epoch = 0\n\n for epoch in range(start_epoch, self.settings.n_epochs):\n train_error, train_loss, train5_error = self.network_wise_trainer.face_train(epoch)\n val_acc = self.network_wise_trainer.face_val(epoch)\n\n # write and print result\n log_str = \"{:d}\\t{:.4f}\\t{:.4f}\\t{:.4f}\".format(epoch, train_error, train_loss, val_acc)\n write_log(self.settings.save_path, 'log.txt', log_str)\n\n best_flag = False\n if best_top1_acc <= val_acc:\n best_top1_acc = val_acc\n best_flag = True\n\n if best_flag:\n self.checkpoint.save_face_model(self.network_wise_trainer.pruned_model, best_flag)\n\n self.logger.info(\"|===>Best Result is: Top1 acc: {:f}\\n\".format(best_top1_acc))\n if epoch <= self.settings.n_epochs/2:\n if epoch%2 ==0:\n self.checkpoint.save_face_checkpoint(self.network_wise_trainer.pruned_model, self.network_wise_trainer.aux_fc,\n self.network_wise_trainer.optimizer, epoch, val_acc,\n self.network_wise_trainer.scheduler)\n else:\n self.checkpoint.save_face_checkpoint(self.network_wise_trainer.pruned_model, self.network_wise_trainer.aux_fc,\n self.network_wise_trainer.optimizer, epoch, val_acc,\n self.network_wise_trainer.scheduler)\n\ndef main():\n parser = argparse.ArgumentParser(description='Baseline')\n parser.add_argument('conf_path', type=str, metavar='conf_path',\n help='input batch size for training (default: 64)')\n parser.add_argument('--model_path', type=str, metavar='model_path',\n help='model path of the pruned model')\n args = parser.parse_args()\n\n option = Option(args.conf_path)\n if args.model_path:\n option.pretrained = args.model_path\n\n experiment = Experiment(option)\n experiment.pruning()\n # experiment._load_resume()\n experiment.fine_tuning()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"quan_table/dcp/finetune/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"625902265","text":"#!/usr/bin/python2\n# coding=utf-8\nimport os\nimport time\n\n\ndef verify(poll, timeout, file_address):\n end_time = time.time() + timeout\n while True:\n value = os.path.exists(file_address)\n if value:\n return value\n time.sleep(poll)\n if time.time() > end_time:\n print('超时,查询不到')\n break","sub_path":"common/method.py","file_name":"method.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"630290061","text":"from setuptools import setup, find_packages\nimport os\n\nversion = '1.2.8'\n\nsetup(name='slc.cleanwordpastedtext',\n version=version,\n description=\"Clean up the HTML formatting problems introduced by pasting content from MSWord into Plone's RichText fields.\",\n long_description=open(\"README.txt\").read() + \"\\n\" +\n open(os.path.join(\"docs\", \"CHANGES.txt\")).read() + \"\\n\" +\n open(os.path.join(\"docs\", \"CONTRIBUTORS.txt\")).read(),\n\n # Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n \"Framework :: Plone\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n keywords='plone syslab simplon microsoft word',\n author='Syslab.com GmbH',\n author_email='info@syslab.com',\n url='http://svn.plone.org/svn/plone/plone.example',\n license='GPL',\n packages=find_packages('src', exclude=['ez_setup']),\n package_dir={'': 'src'},\n namespace_packages=['slc'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n 'z3c.form',\n 'htmllaundry',\n 'archetypes.schemaextender',\n ],\n entry_points=\"\"\"\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\",\n )\n","sub_path":"pypi_install_script/slc.cleanwordpastedtext-1.2.8.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"57922790","text":"from django.urls import path\nfrom django.conf.urls import url\n\n\nfrom . import views\n\n\napp_name = 'projects'\nurlpatterns = [\n path('', views.index, name='index'),\n\n path('p//', views.detail, name='detail'),\n\n path('p//infos', views.project_info, name='project_info'),\n\n path('p//tasks', views.project_tasks, name='project_tasks'),\n\n path('p//t/', views.project_task, name='project_task'),\n\n path('u//', views.profile, name='profile'),\n\n path('projects/', views.projects, name='projects'), \n \n path('projects/new', views.new_project, name='new_project'), \n \n path('p//edit', views.edit_project, name='edit_project'),\n \n path('projects/delete/', views.delete_project, name='delete_project'),\n \n path('g//', views.group_overview, name='group_overview'),\n\n]\n","sub_path":"projects/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"413393095","text":"import vtk\nimport math\nimport random\n\n#Class for the Sphere\nclass Point:\n def __init__(self, x, y, z):\n self.x = x\n self.y = y\n self.z = z\n\n# source\n## Tronco\ncylinder = vtk.vtkCylinderSource()\ncylinder.SetRadius(0.2)\ncylinder.SetHeight(1.3)\ncylinder.SetResolution(50)\ncylinder.Update()\n\n## Hojas\nsphere = vtk.vtkSphereSource()\nsphere.SetRadius(0.3)\n\n # Make the surface smooth.\nsphere.SetPhiResolution(500)\nsphere.SetThetaResolution(500)\nsphere.Update()\n# mapper\n## Tronco\nmapperTrunk = vtk.vtkPolyDataMapper()\nmapperTrunk.SetInputData(cylinder.GetOutput())\n\n## Hojas\nmapperLeaf = vtk.vtkPolyDataMapper()\nmapperLeaf.SetInputData(sphere.GetOutput())\n\n#actor\n## Tronco\nactorTrunk = vtk.vtkActor()\nactorTrunk.SetMapper(mapperTrunk)\nactorTrunk.GetProperty().SetColor(86/255, 47/255, 47/255)\n#actorTrunk.RotateX(20.0)\nactorTrunk.SetPosition(0.0,0.0,0.0)#(x,y,z)\n\n##Hojas\n#establecer el numero de esferas\nquantitySphere = 25\ntopbyNivel = int(math.log(quantitySphere,4)) #altura del arbol 4^n\ndia = 0.4\npoint = Point(0.0,0.8+(dia * topbyNivel),0.0) #maxima altura\n\nactorLeaf = vtk.vtkActor()\nactorLeaf.SetMapper(mapperLeaf)\nactorLeaf.GetProperty().SetColor(0.0, 1.0, 0.0)\nactorLeaf.SetPosition(point.x,point.y,point.z)\n\narraySphere = []\nquantitySphere -=1\n#for quantitySphere in range(quantitySphere-1):\nindexArray = 0\nfor quantityNivel in range(1,topbyNivel+2): #uno mas para los restantes\n qSphereSave = 4**quantityNivel\n if (quantitySphere >= qSphereSave):\n quantitySphere -= qSphereSave\n if (len(arraySphere) == 0):\n pointXPos = Point(0.3,(0.8+(dia * topbyNivel)-dia),0.0)\n pointXNeg = Point(-0.3,(0.8+(dia * topbyNivel)-dia),0.0) \n pointZPos = Point(0.0,(0.8+(dia * topbyNivel)-dia),0.3) \n pointZNeg = Point(0.0,(0.8+(dia * topbyNivel)-dia),-0.3) \n arraySphere.append(pointXPos)\n arraySphere.append(pointXNeg)\n arraySphere.append(pointZPos)\n arraySphere.append(pointZNeg)\n \n else:\n for i in range(indexArray,len(arraySphere)):\n pointXPos = Point(arraySphere[i].x+0.3,arraySphere[i].y-dia,arraySphere[i].z)\n pointXNeg = Point(arraySphere[i].x-0.3,arraySphere[i].y-dia,arraySphere[i].z)\n pointZPos = Point(arraySphere[i].x,arraySphere[i].y-dia,arraySphere[i].z+0.3)\n pointZNeg = Point(arraySphere[i].x,arraySphere[i].y-dia,arraySphere[i].z-0.3)\n arraySphere.append(pointXPos)\n arraySphere.append(pointXNeg)\n arraySphere.append(pointZPos)\n arraySphere.append(pointZNeg)\n indexArray = 4**(quantityNivel-1)\n \n else:\n \n if(len(arraySphere) == 0):\n print('hola')\n else:\n for i in range(quantitySphere):\n leafRandom=random.randint(indexArray, len(arraySphere)-1)\n position=random.randint(1, 4)\n if (position == 1):\n pointXPos = Point(arraySphere[leafRandom].x+0.1,arraySphere[leafRandom].y-dia+0.2,arraySphere[leafRandom].z)\n arraySphere.append(pointXPos)\n elif (position == 2):\n pointXNeg = Point(arraySphere[leafRandom].x-0.1,arraySphere[leafRandom].y-dia+0.2,arraySphere[leafRandom].z)\n arraySphere.append(pointXNeg)\n elif (position == 3):\n pointZPos = Point(arraySphere[leafRandom].x,arraySphere[leafRandom].y-dia+0.2,arraySphere[leafRandom].z+0.1)\n arraySphere.append(pointZPos)\n elif (position == 4):\n pointZNeg = Point(arraySphere[leafRandom].x,arraySphere[leafRandom].y-dia+0.2,arraySphere[leafRandom].z-0.1)\n arraySphere.append(pointZNeg)\n\n#axes\ntransform = vtk.vtkTransform()\ntransform.Translate(0.0, 0.0, 0.0) \naxes = vtk.vtkAxesActor()\naxes.SetUserTransform(transform)\n\n#renderer\nrenderer = vtk.vtkRenderer()\nrenderer.SetBackground(0.0, 0.0, 0.0)\nrenderer.AddActor(actorTrunk)\nrenderer.AddActor(actorLeaf)\nfor pointLeaf in arraySphere:\n actorLeaf_aux = vtk.vtkActor()\n actorLeaf_aux.SetMapper(mapperLeaf)\n actorLeaf_aux.GetProperty().SetColor(0.0, 1.0, 0.0)\n actorLeaf_aux.SetPosition(pointLeaf.x,pointLeaf.y,pointLeaf.z)\n\n renderer.AddActor(actorLeaf_aux)\nrenderer.AddActor(axes)\n\n#renderWindow\nrender_window = vtk.vtkRenderWindow()\nrender_window.SetWindowName(\"Simple VTK scene\")\nrender_window.SetSize(800, 800)\nrender_window.AddRenderer(renderer)\n\n#interactor\ninteractor = vtk.vtkRenderWindowInteractor()\ninteractor.SetRenderWindow(render_window)\n\n# Initialize the interactor and start the rendering loop\ninteractor.Initialize()\nrender_window.Render()\ninteractor.Start()\n","sub_path":"parcial_1/Lab_1/Components/arbolDinamico.py","file_name":"arbolDinamico.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"580216430","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 29 16:06:09 2020\n\n@author: A\n\"\"\"\n#%%\n'''\n============== Read me ================\n\nvideo_url 변수에 크롤링 하고 싶은 유튜브 영상의 주소를 저장 후 프로그램을 실행해야 합니다.\n1. https://www.youtube.com에 점속\n2. 사용자가 원하는 채널을 클릭합니다.\n3. 해당 채널 동영상 버튼 클릭\n4. 해당 채널 동영상 탭 주소 복사\n\n5. url에 복사된 동영상 탭 주소를 저장.\n6. 프로그램(크콜링) 실행 -자동으로 크롬 드라이버 실행되오니 드라이버를 강제 종료 하지 마시오\n6-1. 동영상 1개당 크롤링 시간이 2초이상 걸립니다. 따라서 상당히 오래걸리니 시간적 여유가 있을 때 실행하기 바랍니다.\n (실행된 크롬 드라이버를 종료하지 않은 상태에서 다른 작업 가능합니다.)\n7. 저장된 xlsx 파일 확인 - 영상길이 데이터 추가 예정\n\n'''\n#%%\ndef str_to_date(string):\n \n yyyymmdd = string\n \n yyyy= yyyymmdd.split('-')[0]\n mm= yyyymmdd.split('-')[1]\n dd= yyyymmdd.split('-')[2]\n \n if int(mm) < 10 :\n mm = '0'+mm\n \n if int(dd) < 10 :\n dd = '0'+dd \n \n return yyyy + '-' + mm + '-' + dd \n#%%\n\nimport numpy as np\nimport pandas as pd\nimport time\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\n\ndriver = webdriver.Chrome('chromedriver.exe')\n\n# 크리에이터의 video 전체가 나오는 페이지를 연다.\nurl = 'https://www.youtube.com/c/%EC%9D%B4%EC%8A%A4%ED%83%80TV/videos'\ndriver.get(url)\n\n#2. 크롤링을 위해 화면 맨 아래까지 스크롤 내리기\nSCROLL_PAUSE_TIME = 0.5# 한번 스크롤 하고 멈출 시간 설정\n\nbody = driver.find_element_by_tag_name('body')# body태그를 선택하여 body에 넣음\n\n# 동영상 페이지 제일 밑으로 스크롤\nwhile True:\n last_height = driver.execute_script('return document.documentElement.scrollHeight')\n # 현재 화면의 길이를 리턴 받아 last_height에 넣음\n for i in range(10):\n body.send_keys(Keys.END)\n # body 본문에 END키를 입력(스크롤내림)\n time.sleep(SCROLL_PAUSE_TIME)\n new_height = driver.execute_script('return document.documentElement.scrollHeight')\n if new_height == last_height:\n break;\n \npage = driver.page_source\nsoup = BeautifulSoup(page, 'lxml')\n\n# 채널명 크롤링\nusername = soup.find('div', {'id' : 'text-container'}).text\nusername= username.strip()\n\n# ��목, url 크롤링\nall_videos = soup.find_all(id='dismissable')\n\nlist_title = []\nlist_url = []\nlist_video_length = []\nfor video in all_videos:\n title = video.find(id='video-title')\n if len(title.text.strip())>0: # 공백을 제거하고 글자수가 0보다 크면 append \n list_title.append(title.text)\n \n #find('a',{'id':'thumbnail'})['href']\n url = video.find(id='video-title')['href'] # url append\n list_url.append(url)\n\n video_lenth = video.find('span',{'class' : 'style-scope ytd-thumbnail-overlay-time-status-renderer'})\n list_video_length.append(video_lenth.text.strip())\n\n '''\n if(len(list_url)) >= 600: # url 최대 개수 설정(시간관계상)\n break\n '''\n\n# 영상 업로드 날짜 크롤링\nlist_upload_date = []\nlist_view_count = []\nfor url in list_url:\n video_url = 'https://www.youtube.com' + url\n\n driver.get(video_url)\n #driver.maximize_window()\n body = driver.find_element_by_tag_name('body')\n time.sleep(1.5)\n \n page = driver.page_source\n soup = BeautifulSoup(page, 'lxml')\n\n # 영상 업로드 날짜, 조회수 추출\n '''\n 날짜 데이터 정규식으로 추출하기 \n ex) 최초공개 2017.10.10 처럼 들어오는 경우 있음 -> 날짜부분만 정규식으로 추출하기\n '''\n upload_date = soup.find('div', {'id' : 'date'}).find('yt-formatted-string', 'style-scope ytd-video-primary-info-renderer').text\n upload_date = upload_date.replace('. ', '-') # 날짜 포맷 수정\n upload_date = upload_date.replace('.', '') # 날짜 포맷 수정\n upload_date = str_to_date(upload_date)\n list_upload_date.append(upload_date)\n \n view_cnt = soup.find('span', 'view-count style-scope yt-view-count-renderer').text\n view_cnt = int(''.join(ele for ele in view_cnt if ele.isdigit() or ele == '.')) # 문자열에서 숫자 추출\n list_view_count.append(view_cnt)\ndriver.close() \n\n# create xlsx file\ndf_video_info = pd.DataFrame({'upload_date':[]})\ndf_video_info['title'] = list_title\ndf_video_info['running time'] = list_video_length\ndf_video_info['upload_date'] = list_upload_date\ndf_video_info['view count'] = list_view_count\n\n\ndf_upload_cnt = pd.DataFrame()\ndf_upload_cnt = df_video_info.groupby(\"upload_date\").count()\ndf_upload_cnt = df_upload_cnt.iloc[: , : 1]\ndf_upload_cnt.columns = [\"daily video count\"]\n\nxlxs_dir = username + '_upload_cnt.xlsx'\n# xlsx 파일 생성\n# Write two DataFrames to Excel using to_excel(). Need to specify an ExcelWriter object first.\n# 2개 이상의 DataFrame을 하나의 엑셀 파일에 여러개의 Sheets 로 나누어서 쓰려면 먼저 pd.ExcelWriter() 객체를 지정한 후에, sheet_name 을 나누어서 지정하여 써주어야 합니다. \nwith pd.ExcelWriter(xlxs_dir) as writer:\n df_video_info.to_excel(writer, sheet_name = 'video info')\n df_upload_cnt.to_excel(writer, sheet_name = 'upload count') \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"src/youtube_upload_date_crawling.py","file_name":"youtube_upload_date_crawling.py","file_ext":"py","file_size_in_byte":6058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"434629159","text":"from helper.wasteDisposal import WasteDisposal\nimport json\nimport telegram\n\nwith open('config.json') as data_file:\n data = json.load(data_file)\n zip = data[\"zip\"]\n chatId = data[\"chatId\"]\n token = data[\"token\"]\n\nBot = telegram.Bot(token=token)\n\ndisposals = [WasteDisposal(\"paper\", \"Papier\", zip), WasteDisposal(\"cardboard\", \"Karton\", zip)]\n\nfor disposal in disposals:\n if disposal.is_today():\n Bot.send_message(chatId, f\"{disposal.displayname}sammlig isch hütt!\")\n if disposal.is_tomorrow():\n Bot.send_message(chatId, f\"{disposal.displayname}sammlig isch morn!\")\n","sub_path":"altpapierBot.py","file_name":"altpapierBot.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"359830546","text":"#!/usr/bin/env python3\n\nimport json\nimport logging\nimport re\nimport subprocess\nimport os\nimport yaml\n\nimport messagebuilder\nimport configfetcher\nimport rqueue\nfrom enum import Enum\nfrom wblogging import LoggingSetupParser\n\nIGNORED_USERS = ['L10n-bot', 'Libraryupgrader']\nIGNORED_POSITIVE_VOTES = ['jenkins-bot', 'PipelineBot', 'SonarQube Bot']\nJENKINS_USER = 'jenkins-bot'\n\nlogger = logging.getLogger('wikibugs.wb2-grrrrit')\n\n\nclass IncludeOwner(Enum):\n IF_NOT_USER = 1\n ALWAYS = 2\n\n\ndef trim_repo(repo: str) -> str:\n if repo.startswith('mediawiki/') or repo.startswith('operations/'):\n repo = repo.split('/', 1)[-1]\n return repo\n\n\ndef extract_bug(commit_msg: str):\n search = re.search(r'Bug: T(\\d+)', commit_msg)\n if search:\n return 'T' + search.group(1)\n\n\ndef should_ignore_CI_comment(ret, event):\n \"\"\"\n jenkins-bot and Pipeline bot get a special treatment: their comments are only reported\n in case an approval _is present_, and _changes_ to a _negative value_.\n The goal is to filter out post-commit events, code coverage comments, etc.\n\n :param event: The event to handle\n :return: Whether the event should be ignored or not.\n \"\"\"\n if ret['user'] not in IGNORED_POSITIVE_VOTES:\n return False\n\n if 'approvals' not in event:\n return True\n\n if not any('oldValue' in approval and int(approval['value']) < 0\n for approval in event['approvals']):\n return True\n\n return False\n\n\n# T239928\ndef change_is_WIP(event: dict) -> bool:\n return event.get('change', {}).get('wip', False)\n\n\ndef process_event(event: dict):\n user = event.get('uploader', {}).get('name') or event.get('author', {}).get('name')\n if user in IGNORED_USERS or change_is_WIP(event):\n return None\n\n ret = None\n if event['type'] == 'patchset-created':\n ps = 'PS' + str(event['patchSet']['number'])\n ret = process_simple(event, ps, 'uploader', IncludeOwner.IF_NOT_USER)\n elif event['type'] == 'comment-added':\n ret = process_simple(event, 'CR', 'author')\n\n if should_ignore_CI_comment(ret, event):\n return None\n\n comment = ''\n original_comment = event.get('comment')\n inline = 0\n\n if original_comment:\n inline_match = re.search(r'\\((\\d+) comments?\\)', original_comment)\n if inline_match:\n try:\n inline = int(inline_match.group(1))\n except ValueError:\n pass\n # cheat! Get rid of (# comments) from the text\n original_comment = original_comment.replace(inline_match.group(0), '')\n comment = \"\\n\".join(original_comment.split('\\n')[1:]).strip().split('\\n')[0].strip()\n if comment:\n comment = '\"' + comment[:138] + '\"'\n else:\n comment = event['change']['subject'][:140]\n ret['message'] = comment\n if ret['user'] == JENKINS_USER:\n ret['message'] = event['change']['subject']\n ret['inline'] = inline\n ret['approvals'] = {}\n\n if 'approvals' in event:\n for approval in event['approvals']:\n value = int(approval['value'])\n old_value = int(approval.get('oldValue', 0))\n\n if value == old_value:\n # if the review value didn't change, don't mention the score\n continue\n\n if approval['type'] == 'Verified' and value != 0:\n ret['approvals']['V'] = value\n if ret['user'] == JENKINS_USER and value == -1:\n ret['user'] = 'CI reject'\n elif approval['type'] == 'Code-Review' and value != 0:\n ret['approvals']['C'] = value\n\n elif event['type'] == 'change-merged':\n ret = process_simple(event, 'Merged', 'submitter')\n if ret['user'] == JENKINS_USER and ret['owner'] in IGNORED_USERS:\n return None\n elif ret['user'] != JENKINS_USER:\n # Ignore any merges by anyone that is not jenkins-bot\n # This is always preceded by a C:2 by them, so we need not spam\n return None\n elif event['type'] == 'change-restored':\n ret = process_simple(event, 'Restored', 'restorer')\n elif event['type'] == 'change-abandoned':\n ret = process_simple(event, 'Abandoned', 'abandoner')\n\n return ret\n\n\ndef process_simple(event: dict, type_: str, user_property: str,\n include_owner: IncludeOwner = IncludeOwner.ALWAYS) -> dict:\n ret = {\n 'type': type_,\n 'user': event[user_property]['name'],\n 'message': event['change']['subject'],\n 'repo': event['change']['project'],\n 'branch': event['change']['branch'],\n # Use short URL form (T175929#6250620)\n 'url': \"https://gerrit.wikimedia.org/r/{}\".format(event['change']['number']),\n 'task': extract_bug(event['change']['commitMessage']),\n }\n\n owner = event['change']['owner']['name']\n if (\n (include_owner == IncludeOwner.ALWAYS) or\n (include_owner == IncludeOwner.IF_NOT_USER and ret['user'] != owner)\n ):\n ret['owner'] = owner\n\n return ret\n\n\ndef build_message(processed: dict) -> str:\n helper = messagebuilder.IRCMessageBuilder().ircformat\n text = '({})'.format(helper(processed['type'], foreground='green'))\n text += ' {}:'.format(helper(processed['user'], foreground='teal', style='bold'))\n if 'approvals' in processed and processed['approvals']:\n def format_approval(value: int) -> str:\n if value == 1:\n return helper(\"+1\", foreground='green')\n elif value == 2:\n return helper(\"+2\", foreground='green', style='bold')\n elif value == -1:\n return helper(\"-1\", foreground='red')\n else: # -2\n return helper(str(value), foreground='red', style='bold')\n\n text += ' ['\n has_c = 'C' in processed['approvals']\n has_v = 'V' in processed['approvals']\n if has_v:\n text += 'V: {}'.format(format_approval(processed['approvals']['V']))\n if has_v and has_c:\n text += ' '\n if has_c:\n text += 'C: {}'.format(format_approval(processed['approvals']['C']))\n text += ']'\n text += ' {}'.format(processed['message'])\n if 'inline' in processed and processed['inline']:\n text += ' ({} {})'.format(\n helper(str(processed['inline']), foreground='green', style='bold'),\n 'comments' if processed['inline'] > 1 else 'comment'\n )\n text += ' [{}]'.format(trim_repo(processed['repo']))\n if processed['branch'] not in ('master', 'main', 'production'):\n text += ' ({})'.format(processed['branch'])\n text += ' -'\n text += ' {}'.format(helper(processed['url'], foreground='teal'))\n if processed['task']:\n text += ' (https://phabricator.wikimedia.org/{})'.format(processed['task'])\n if 'owner' in processed:\n text += ' (owner: {})'.format(helper(processed['owner'], foreground='teal', style='bold'))\n\n return text\n\n\ndef channel_filter(repo: str, branch: str) -> set:\n # TODO use the channelfilter module\n with open(os.path.join(os.path.dirname(__file__), 'gerrit-channels.yaml')) as f:\n data = yaml.safe_load(f)\n channels = set()\n for channel in data['channels']:\n repos = data['channels'][channel]\n for repo_re, filters in repos.items():\n if re.match(repo_re, repo):\n if filters:\n if not re.match(filters['branch'], branch):\n continue\n channels.add(channel)\n\n if not channels:\n channels.add(data['default-channel'])\n\n channels.add(data['firehose-channel'])\n return channels\n\n\ndef main():\n conf = configfetcher.ConfigFetcher()\n queue = rqueue.RedisQueue(\n conf.get('REDIS_QUEUE_NAME'),\n conf.get('REDIS_HOST')\n )\n ssh = subprocess.Popen(\n ['ssh', 'suchabot@gerrit.wikimedia.org',\n '-i', os.path.join(os.path.dirname(__file__), 'id_rsa'),\n '-p', '29418',\n 'gerrit', 'stream-events'\n ],\n shell=False,\n bufsize=1, # line buffered\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n logger.info('Opened SSH connection')\n\n for line in ssh.stdout:\n logger.info(\"%s: %s\", \"stream-events\", line.decode())\n parsed = json.loads(line.decode())\n processed = process_event(parsed)\n if processed:\n logger.info(\"%s: %s\", \"processed\", json.dumps(processed))\n try:\n msg = build_message(processed)\n channels = list(channel_filter(processed['repo'], processed['branch']))\n queue.put({'raw': True, 'msg': msg, 'channels': channels})\n logger.info(\"%s: '%s' to [%s]\", \"message\", msg, \", \".join(channels))\n except KeyboardInterrupt:\n raise\n except:\n logger.exception('Error queuing message')\n ssh.stdout.flush()\n\n\nif __name__ == '__main__':\n parser = LoggingSetupParser(\n description='Sends events from Gerrit to IRC'\n )\n parser.parse_args()\n\n while True:\n try:\n main()\n except KeyboardInterrupt:\n raise\n except:\n logger.exception('Error, probably SSH connection dropped.')\n","sub_path":"grrrrit.py","file_name":"grrrrit.py","file_ext":"py","file_size_in_byte":9449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"582474275","text":"\"\"\"\nURLpatterns for the debug toolbar.\n\nThe debug toolbar middleware will monkey-patch them into the default urlconf\nif they aren't explicitly included.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom django.conf.urls import patterns, url\n\nurlpatterns = patterns('debug_toolbar.views', # noqa\n url(r'^render_panel/$', 'render_panel', name='render_panel'),\n url(r'^sql_select/$', 'sql_select', name='sql_select'),\n url(r'^sql_explain/$', 'sql_explain', name='sql_explain'),\n url(r'^sql_profile/$', 'sql_profile', name='sql_profile'),\n url(r'^template_source/$', 'template_source', name='template_source'),\n)\n","sub_path":"debug_toolbar/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"80679018","text":"import random\nfrom Point import Point\nfrom ObstacleField import *\nimport PathFinder\n\nclass ObstacleGenerator:\n\n def __init__(self):\n\n self.lengths = [12, 12, 24, 24, 36] #list of obstacle lengths, # of lengths = # of obstacles\n self.maxx = 96 #x dimension\n self.maxy = 72 #y dimension\n self.points = [] #[[x1,y1],[x2,y2]....] set of occupied points\n self.pointobjects = [] #List of occupied point objects\n self.array = [] # 0 1 array describing course\n self.invertarray = []\n self.cornerarray = []\n\n self.exitoffset = 6\n self.entranceoffset = 34\n self.entrancewidth = 24\n self.exitwidth = 24\n self.message = -1\n\n def insertBorders(self):\n self.cornerarray.append([(0, self.maxy), (self.maxx, self.maxy - 1)]) # top wall\n self.cornerarray.append([(0, 0), (self.maxx, 1)]) # bottom wall\n self.cornerarray.append([(0, 0), (1, self.entranceoffset)]) # bottom entrance\n self.cornerarray.append([(0, self.maxy), (1, self.entranceoffset + self.entrancewidth)]) # top entrance\n self.cornerarray.append([(self.maxx, 0), (self.maxx - 1, self.exitoffset)]) #\n self.cornerarray.append([(self.maxx, self.maxy), (self.maxx - 1, self.exitoffset + self.exitwidth)])\n\n #Check if obstacles overlap eachother or outer walls\n def checkCollisions(self, obstaclepoints, points):\n\n #check if any point overlaps with any existing obstacle\n for obstaclepoint in obstaclepoints:\n for point in points:\n if (point == obstaclepoint):\n return True\n\n #check if any point is out of bounds\n if ((obstaclepoint[0] < 0) or (obstaclepoint[0] >= self.maxx)):\n return True\n elif ((obstaclepoint[1] < 0) or (obstaclepoint[1] >= self.maxy)):\n return True\n\n #if there are no problems, return False, no collisions\n return False\n\n #convert to array for Cadens path checker\n def convertToArray(self):\n\n #populate empty course array with 0\n self.array = [[0 for x in range(self.maxx)] for y in range(self.maxy)]\n\n #populate obstaclepoints in array as 1\n for point in self.points:\n self.array[point[1]][point[0]] = 1\n\n for row in range(self.maxy):\n if ((row < self.exitoffset) or row >= (self.exitoffset + self.exitwidth)):\n self.array[row].insert(0,1)\n\n else:\n self.array[row].insert(0,0)\n\n if ((row < self.entranceoffset) or row >= (self.entranceoffset + self.entrancewidth)):\n self.array[row].append(1)\n\n else:\n self.array[row].append(0)\n\n self.invertarray = [[0 for a in range(len(self.array))] for b in range(len(self.array[0]))]\n\n for y in range(len(self.array)):\n for x in range(len(self.array[0])):\n self.invertarray[x][y] = self.array[y][x]\n\n '''for i in self.array:\n print(i)\n\n print()\n for i in self.invertarray:\n print(i)'''\n\n\n #Generate Obstacles\n def generate(self):\n\n pathcheck = False\n pathfinder = PathFinder.PathFinder()\n\n #Continue until a valid path is found\n while (not pathcheck):\n self.points = []\n self.pointobjects = []\n self.cornerarray = []\n #For each obstacle in lengths\n for length in self.lengths:\n\n collision = True\n\n #Continue until obstacle doesn't collide with anything\n while (collision == True):\n initpoint = (random.randint(0,self.maxx-1),random.randint(0,self.maxy-1)) #Generate a random point\n direction = random.randint(0,3) # 0,1,2,3 :: E,N,W,S #Generate a random direction\n\n obstaclepoints = []\n\n if (direction == 0): #East #Generate points starting at initial point\n #and continue in the direction until the end of\n for i in range(length): #the length of the obstacle\n obstaclepoints.append( (initpoint[0] + i, initpoint[1]) )\n obstaclepoints.append( (initpoint[0] + i, initpoint[1] + 1) )\n\n elif (direction == 1): #North\n\n for i in range(length):\n obstaclepoints.append( (initpoint[0], initpoint[1] + i) ) #Generate one length\n obstaclepoints.append( (initpoint[0] + 1, initpoint[1] + i) ) #generate a second length to make it 2 thick\n\n elif (direction == 2): #West\n\n for i in range(length):\n obstaclepoints.append( (initpoint[0] - i, initpoint[1]) )\n obstaclepoints.append( (initpoint[0] - i, initpoint[1] + 1) )\n\n elif (direction == 3): #South\n\n for i in range(length):\n obstaclepoints.append( (initpoint[0], initpoint[1] - i) )\n obstaclepoints.append( (initpoint[0] + 1, initpoint[1] - i) )\n\n\n\n collision = self.checkCollisions(obstaclepoints, self.points) #Check if points collide with other obstacles or walls\n\n if (collision == False): #If no collision add the obstacle to master list\n self.points = self.points + obstaclepoints\n (a,b) = (obstaclepoints[0], obstaclepoints[len(obstaclepoints)-1])\n a = (a[0], self.maxy - a[1]) # make the bottom left be 0,0 for pyglet\n b = (b[0], self.maxy - b[1])\n self.cornerarray.append([a, b])\n\n\n #convert to array for the pathchecker\n self.convertToArray()\n\n pathfinder.setField(self.invertarray)\n pathfinder.debug = False\n pathfinder.setSleep(0)\n pathcheck = pathfinder.checkField()\n\n #pathcheck = True\n\n #pathfinder.setSleep(1)\n #pathfinder.checkField()\n\n self.points.sort()\n #print(self.points)\n self.insertBorders()\n\n #Convert points list into a list of point objects for obstacle field\n for point in self.points:\n self.pointobjects.append(Point((point[0]+1),(point[1]+1)))\n\n\n# for i in range(1000):\n# course.generate()\n# field = ObstacleField(72, 96, 24, 34, 24, 6, course.pointobjects)\n #print(i)\n # field.toString()\n","sub_path":"Test/ObstacleGenerator.py","file_name":"ObstacleGenerator.py","file_ext":"py","file_size_in_byte":6944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"437249388","text":"import urllib\nimport argparse\nimport os.path\nimport xml.etree.ElementTree as ET\nimport pickle as pkl\nimport pandas as pd \nimport sqlalchemy\nfrom sqlalchemy import create_engine\n\n\n\"\"\"Obtém a lista de situações para proposições.\"\"\"\nvotoprop_url = (\"http://www.camara.leg.br/SitCamaraWS/Proposicoes.asmx/\"\n \"ObterVotacaoProposicao?tipo=PEC&numero=241&ano=2016\")\n\nvotoprop = []\nvotopar = []\n\nurl = urllib.urlopen(votoprop_url)\ndata = ET.fromstring(url.read())\ncount = 0\nfor item in data:\n for vot in item.findall('Votacao'):\n count += 1\n for vots in vot.findall('votos'):\n for dep in vots.findall('Deputado'):\n deputado = dep.get('Nome')\n voto = dep.get('Voto')\n votoprop.append([deputado, voto, count])\n for orie in vot.findall('orientacaoBancada'):\n for ban in orie.findall('bancada'):\n partido = ban.get('Sigla')\n orientacao = dep.get('Voto')\n votopar.append([partido, orientacao, count])\n\nvotoprop_df = pd.DataFrame(votoprop, columns=[\"deputado\", \"voto\", \"votacao\"])\n\nvotoprop_df = votoprop_df.pivot(index='deputado', columns='votacao', values='voto')\nvotoprop_df = votoprop_df.fillna('5')\nvotoprop_df.to_csv('votoprop.csv', sep=',', encoding=\"latin1\")\n\n\nvotopar_df = pd.DataFrame(votopar, columns=[\"partido\", \"orientacao\", \"votacao\"])\n\nvotopar_df = votopar_df.pivot(index='partido', columns='votacao', values='orientacao')\nvotopar_df = votopar_df.fillna('5')\nvotopar_df.to_csv('votopar.csv', sep=',', encoding=\"latin1\")\n\n","sub_path":"obter_historico_votacoes.py","file_name":"obter_historico_votacoes.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"286173463","text":"#Importing required libraries.\nif 1==1:\n import rospy\n import rospkg\n import sys\n import time\n import os\n import json\n from os import curdir, path as pa\n import random\n from matplotlib import pyplot as plt\n import numpy as np\n from std_msgs.msg import String, Time\n from geometry_msgs.msg import Pose\n rospack =rospkg.RosPack()\n import xml.etree.ElementTree as ET\n import random\n from collections import defaultdict\n #Import python module with functions necessary for interfacing with kautham\n import kautham_py.kautham_python_interface as kautham\n\n #Importing files(Modules.)\n from DataHandler import nodes_handler\n from DataHandler import taskfile_writer\n from SceneGenerator import scene_generator\n from KauthamDataEncoder import encoder\n\n #Global variables\n directory=''\n Robot_move_control= ''\n Robot_pos=[]\n taskfile=''\n graspedobject= False\n\n\n#This Function will open the kautham Problem.\ndef openKauthamProblem(dimension):\n #Setting problem files\n ROSpackage_path = rospack.get_path(\"kautham\")\n modelFolder = ROSpackage_path + \"/demos/models/\"\n #check for arguments\n if len(sys.argv)<4:\n print(\"Hello\")\n if dimension==2:\n kauthamProblemFile = ROSpackage_path + \"/demos/OMPL_geo_demos/boxes_world_R2/OMPL_RRTconnect_boxes_world_R2.xml\"\n elif dimension==3:\n kauthamProblemFile= ROSpackage_path + \"/demos/OMPL_geo_demos/boxes_world_R3/OMPL_RRTconnect_boxes_world_R3.xml\"\n elif dimension==4:\n #kauthamProblemFile= ROSpackage_path + \"/demos/OMPL_geo_demos/Synergies/artificialVFRRT.xml\"\n kauthamProblemFile=\"/home/momun/catkin_ws/kautham/demos/OMPL_geo_demos/Synergies/artificialVFRRT.xml\"\n kauthamproblem = os.path.basename(kauthamProblemFile)\n else:\n kauthamProblemFile= ROSpackage_path + \"/\" + sys.argv[1]\n kauthamproblem = os.path.basename(sys.argv[1])\n\n rospy.loginfo (\"Starting Kautham Python Client\")\n rospy.init_node(\"kautham_python_client\")\n rospy.loginfo_once(kauthamProblemFile)\n ##Solving the motion planning problem\n #Open kautham problem\n kautham.kOpenProblem(modelFolder,kauthamProblemFile)\n return kauthamproblem\n\n\n#This is The Kautham Client which will generate the data for dateset.\ndef kauthamClient(dimension,no_of_paths,flag_path,format,no_of_env,flag_taskfile,no_of_obstacles,size_of_obstacles,rn):\n\n #For Loop for Number Of Environments To Be Created.\n for x in range(1,no_of_env+1):\n\n #Opening Kautham Problem by calling the function.\n kauthamproblem=openKauthamProblem(4)\n\n #Variable used to name the Data set File.\n file_name_counter= 1\n folder_or_file=\"folder\"\n current_dirr=os.getcwd()\n #Creating the required Folders To Save the Datasets.\n if flag_taskfile:\n\n taskfile_writer.folderOrFileCreater(current_dirr,\"TaskFiles\",folder_or_file)\n #Creating inital and goal state Folder To save Init and goal States.\n taskfile_writer.folderOrFileCreater(current_dirr+\"/TaskFiles\",\"Initial_Goal_States\",folder_or_file)\n\n if flag_path:\n #Creating Data Set Folder to Save DataSet.\n taskfile_writer.folderOrFileCreater(current_dirr,\"DataSet\",folder_or_file)\n #Creating Folder To save cloud Files.\n taskfile_writer.folderOrFileCreater(current_dirr,\"DataSet/obs_cloud\",folder_or_file)\n \n #Folder For Saving the centre points of the obstacles.\n taskfile_writer.folderOrFileCreater(current_dirr,\"DataSet/obstacle_controls\",folder_or_file)\n\n #Loading Obstacle.dat File\n #Loading Obstacle.Perm File\n obs_dat,obs_perm=scene_generator.readPathObstacleFile(no_of_obstacles,current_dirr)\n\n #Variable To Count Init and Goal FileNumber initial_goal_states/init_goal(c).txt.\n initial_goal_file_counter=1\n #Calculating Obstacle Controls Using Perm and obs.dat file\n \n\n #Obstacles Controls Using Obs_Dat and Perm File.\n #unormalized_obstacle_control,obstacle_control=scene_generator.createObstacleControls(no_of_obstacles,obs_dat,obs_perm,rn)\n #Obstacles Controls using random.random()\n unormalized_obstacle_control,obstacle_control=scene_generator.randomObstacleControls(no_of_obstacles,rn)\n obstacle_controls=obstacle_control.flatten()\n print(obstacle_control)\n\n\n #Calling Obs_Point_Cloud Function.\n name='obs_cloud' +str(x-1)+\".npy\"\n obs_dat_file=taskfile_writer.folderOrFileCreater(current_dirr+\"/DataSet/obs_cloud\",name,\"file\")\n #Calling Point Cloud Encoder Function.\n encoder.pointCloudEncoder(obs_dat_file,obstacle_control,no_of_obstacles,size_of_obstacles,rn)\n \n #Saving the Centre Locatations of the obstacles\n name='obstacle_control' +str(x-1)+\".npy\"\n obstacle_control_file=taskfile_writer.folderOrFileCreater(current_dirr+\"/DataSet/obstacle_controls\",name,\"file\")\n for control in obstacle_controls:\n obstacle_control_file.write(f\"{control}\\n\")\n obstacle_control_file.close()\n print(obstacle_controls)\n\n #Setting Obstacles Configuration.\n kautham.kSetObstaclesConfig(obstacle_controls)\n\n if flag_path:\n #Creating SubFolders in DataSet Folder to save DataSet .npy Format.\n folder_name=\"e\"+str(x-1)\n taskfile_writer.folderOrFileCreater(current_dirr+\"/DataSet\",folder_name,folder_or_file)\n\n if flag_taskfile:\n #Creating SubFolders in TaksFiles Folder to save TaskFiles .xml.\n folder_name=\"Taskfiles\"+str(x)\n taskfile_writer.folderOrFileCreater(current_dirr+\"/TaskFiles\",folder_name,folder_or_file)\n\n #Creating SubFolders in z_initialANDgoal_states Folder to save initial and Goal states .txt.\n folder_name=\"initial_goal_states\"+str(x)\n taskfile_writer.folderOrFileCreater(current_dirr+\"/TaskFiles/Initial_Goal_States\",folder_name,folder_or_file)\n\n #Variable To Count FileNumber of DataSet .npy DataSet/e1/2D_dataSet(file_name_counter).txt.\n file_name_counter=1\n\n # For Loop to Generate Number of Paths Per Env.\n for z in range(no_of_paths):\n #For loop for Number of Times Init and Goal are initiled False Randomly.\n for y in range(10):\n\n #For 2D Prob 2D Init and goal i.e x and y.\n if dimension==\"r2\":\n init = [random.random(), random.random()]\n goal = [random.random(), random.random()]\n #For 3D Prob 3D Init and goal i.e x and y,z.\n elif dimension==\"r3\":\n init = [random.random(),random.random(),random.random()]\n goal = [random.random(),random.random(),random.random()]\n\n if kautham.kSetQuery(init,goal):\n print(\"Query valid (init=\", init, \" goal = \", goal, \"). Calling getPath\")\n path = kautham.kGetPath(0) #do not print the path\n\n if path:\n #Printing Computational Time Of the Planner.\n print(\"ComputationTime:\",kautham.kGetPlannerComputationTime())\n\n if flag_taskfile:\n\n if not flag_path:\n format=\".txt\"\n #Writing Initial and Goal State of Every Path Computed in init_goal.txt.\n taskfileinit = taskfile_writer.folderOrFileCreater(current_dirr+'/TaskFiles/Initial_Goal_States/initial_goal_states'+str(x),\"initial_goal\"+str(initial_goal_file_counter)+format,\"file\")\n\n if dimension==\"r2\":\n taskfileinit.write(str(init[0])+\" \"+str(init[1])+\"\\n\")\n taskfileinit.write(str(goal[0])+\" \"+str(goal[1])+\"\\n\")\n if dimension==\"r3\":\n taskfileinit.write(str(init[0])+\" \"+str(init[1])+\" \"+str(init[2])+\"\\n\")\n taskfileinit.write(str(goal[0])+\" \"+str(goal[1])+\" \"+str(goal[2])+\"\\n\")\n\n #Variable To Count Init and Goal FileNumber z_initialANDgoal_states/initial_goal_states/init_goal(c).txt.\n initial_goal_file_counter+=1\n\n taskfileinit.close()\n\n #Variable To count Folder Number of .xml Files.\n folder_number=x\n\n if flag_taskfile:\n taskfile =taskfile_writer.folderOrFileCreater(current_dirr+\"/TaskFiles/Taskfiles\"+str(x)+\"/\",'taskfile'+ str(file_name_counter)+'.xml',\"file\")\n\n file_name='path' +str(file_name_counter-1)+format\n py_taskfile =taskfile_writer.folderOrFileCreater(current_dirr+\"/DataSet/e\"+str(folder_number-1),file_name,\"file\")\n taskfile_writer.writeTaskfile(path, str(x)+\"_\"+kauthamproblem,flag_path,dimension,taskfile,py_taskfile)\n\n if flag_path:\n if not flag_taskfile:\n\n #Naming the dataset file.\n file_name='path' +str(file_name_counter)+format\n py_taskfile =taskfile_writer.folderOrFileCreater(current_dirr+\"/DataSet/e\"+str(folder_number-1),file_name,\"file\")\n\n k = sorted(list(path.keys()))[-1][1]+1\n for i in range(int(len(path.keys())/k)-1):\n tex=''\n for j in range(0,k):\n tex=tex + str(path[i,j]) + \" \"\n\n #Calling DataSet Writing Function.\n taskfile_writer.writePathMPNETFormate(tex,py_taskfile,dimension)\n\n py_taskfile.close()\n\n #Variable To Count FileNumber of DataSet .npy DataSet/e1/2D_dataSet(file_name_counter).txt.\n file_name_counter+=1\n\n break\n else:\n #Restarting Kautham.\n\n #nodes_handler.kauthamRosNode(\"start\")\n #openKauthamProblem(4)\n print(\"You Should Restart Kautham-Rosnode if it is crashed.\")\n else:\n print(\"Query not valid (init=\", init, \" goal = \", goal, \"). Skipping getPath call\")\n\n kautham.kCloseProblem()\n # Kautham Node To Free up Storage.\n print(\"Restarting Kautham-Node.\")\n nodes_handler.kauthamRosNode(\"kill\")\n print(\"Kautham Node Killed Succefully.\")\n time.sleep(2)\n nodes_handler.kauthamRosNode(\"start\")\n print(\"Kautham Node Restarted Succesfully.\")\n time.sleep(3)\n\n #Close kautham problem\n #kautham.kCloseProblem()\n\n\nif __name__ == '__main__':\n try:\n nodes_handler.kauthamRosNode(\"roscore\")\n nodes_handler.kauthamRosNode(\"start\")\n\n #Variable for While loop Condition\n run_code=True\n\n #While Loop for continue or exit the code.\n while run_code:\n\n #Loading Configurations From Json File\n config= dict()\n path=sys.argv\n if len(path)<2:\n print(\"Please Pass Config File Path:\")\n exit()\n else:\n filepath=path[1]\n if os.path.isfile(filepath):\n\n with open(filepath) as filename:\n\n #dictionary with configurations from json file.\n config=json.load(filename)\n else:\n print(\"Please Enter a valid Config File Path:\")\n exit()\n\n\n default_or_not=\"c\"\n #User Defined Settings.\n if default_or_not==\"c\":\n\n Generate_tf=config[\"Should_Save_Taskfile\"]\n #Flag for Taskfiles Generation.\n Flag_tf=False\n\n if Generate_tf==\"y\":\n Flag_tf=True\n\n #Asking user that weather to generate txt or numpy file or not.\n #Generate_txt=input(\"Do you Want to Generate DataSet file or not?(y/n):\").lower()\n Generate_txt=config[\"Should_Save_DataSet\"]\n #Flag should be true if we want to generate .txt or numpy file or not.\n Flag=False\n\n #Setting Flag true if user input y.\n if Generate_txt==\"y\":\n #Asking for Format of DataSet file.\n #format=input(\"Enter The Format:i.e .txt or .npy:\")\n format=config[\"Format_Dataset_File\"]\n Flag=True\n\n #Running for 2D.\n\n #Assigning Values From The JSON File\n rn=config[\"Dimension\"]\n no_of_env=config[\"Number_Of_Env\"]\n no_of_paths=config[\"Number_of_Paths\"]\n no_of_obstacles=config[\"Number_of_Obstacles\"]\n should_visualize_path_and_PointCloud=config[\"Should_Visualize_Path_and_ObstacleCloud\"]\n size_of_obstacles=config[\"Size_of_Obstacles\"]\n\n if rn==2:\n\n #Main Function Imported From Functions.py\n # 1st Parameter is for r2=2D and r3=3D\n # 2nd is for Number of Paths to be computed.\n # 3rd is for weather to create Dataset or not.\n # 4th is for format of the Dataset File i.e txt or numpy etc.\n # 5th is for Number of Enviroments\n\n kauthamClient(\"r2\", no_of_paths,Flag,format,no_of_env,Flag_tf,no_of_obstacles,size_of_obstacles,rn)\n\n #Running for 3D.\n elif rn==3:\n kauthamClient(\"r3\", no_of_paths,Flag,format,no_of_env,Flag_tf,no_of_obstacles,size_of_obstacles,rn)\n #Running for both 2D and 3D.\n elif rn==1:\n kauthamClient(\"r2\", no_of_paths, Flag,format,no_of_env,Flag_tf,no_of_obstacles,size_of_obstacles,rn)\n kauthamClient(\"r3\", no_of_paths, Flag,format,no_of_env,Flag_tf,no_of_obstacles,size_of_obstacles,rn)\n\n #Asking to run again or exit\n choice=input(\"\\n\\nDo You Want To Run Again(y/n):\")\n if choice==\"n\":\n run_code=False\n\n except rospy.ROSInterruptException:\n pass\n","sub_path":"Training_Data_Generator/data_handler.py","file_name":"data_handler.py","file_ext":"py","file_size_in_byte":14598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"492435593","text":"# This file will use the Tweepy Cursor API to reply to mentions, follow users that follow us, and a backup like and retweet\n# imports tweepy, time, and the create_api function from config.py\nimport tweepy\nimport time\nfrom config import create_api\n\n# Define a follow_followers function that accepts api and check if they are not followed, then follow them\n\n\n# Define a check_mentions function that accepts api, keywords, and since_id, follow and reply to the user if user has mentioned us\ndef check_mentions(api, keywords, since_id):\n new_since_id = since_id\n for tweet in tweepy.Cursor(api.mentions_timeline,\n since_id=since_id).items(100):\n new_since_id = max(tweet.id, new_since_id)\n if tweet.in_reply_to_status_id is not None:\n continue\n if any(keyword in tweet.text.lower() for keyword in keywords):\n if not tweet.user.following:\n tweet.user.follow()\n\n api.update_status(status=\"\\\"while(!( succeed = try_again() ) )\\\" \\nZero To Mastery, ZTMBot to the rescue! \\nhttps://zerotomastery.io/\",\n in_reply_to_status_id=tweet.id)\n return new_since_id\n\n\n# Define a fav_retweet function that accepts api, create terms string to search for and use the tweepy.Cursor object to search those terms 100 times\ndef fav_retweet(api):\n '''\n This function search for tweets in the with a search criteria\n and automatic like the tweet if the tweet has not been liked and\n retweet the tweet if the tweet has not been retweeted\n '''\n search = [\"#ZTM\", \"#Zerotomastery\"]\n for tweet in tweepy.Cursor(api.search, search).items(100):\n try:\n if not tweet.favorite():\n tweet.favorite()\n print(\"I have liked the tweet\")\n if not tweet.retweet():\n tweet.retweet()\n print('Retweeted the tweet')\n except tweepy.TweepError as e:\n print(e.reason)\n except StopIteration:\n break\n\n\n# Define a main function to connect to the api and create a since_id counter, call all above functions\ndef main():\n api = create_api()\n since_id = 1\n keywords = [\"#ZTM\", \"#Zerotomastery\",\n \"#ztm\", \"zerotomastery\", \"ZeroToMastery\"]\n while True:\n since_id = check_mentions(api, keywords, since_id)\n fav_retweet(api)\n time.sleep(60)\n\n\n# if __name__ main, call the main function\nif __name__ == \"__main__\":\n main()\n","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"550250462","text":"# Copyright 2019 Atalaya Tech, Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport logging\nfrom urllib.parse import urlparse\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\nfrom bentoml.exceptions import BentoMLException\n\nlogger = logging.getLogger(__name__)\n\n\ndef is_s3_url(url):\n \"\"\"\n Check if url is an s3, s3n, or s3a url\n \"\"\"\n try:\n return urlparse(url).scheme in [\"s3\", \"s3n\", \"s3a\"]\n except ValueError:\n return False\n\n\ndef upload_directory_to_s3(\n upload_directory_path, region, bucket_name, s3_path_prefix=''\n):\n s3_client = boto3.client('s3', region)\n try:\n for root, _, file_names in os.walk(upload_directory_path):\n relative_path_to_upload_dir = os.path.relpath(root, upload_directory_path)\n if relative_path_to_upload_dir == '.':\n relative_path_to_upload_dir = ''\n for file_name in file_names:\n key = os.path.join(\n s3_path_prefix, relative_path_to_upload_dir, file_name\n )\n logger.debug(\n 'Uploading %s to s3 %s', file_name, bucket_name + '/' + key\n )\n s3_client.upload_file(os.path.join(root, file_name), bucket_name, key)\n except ClientError as error:\n raise BentoMLException(\n \"Failed to upload directory to s3 bucket {}, error: {}\".format(\n bucket_name, str(error)\n )\n )\n\n\ndef create_s3_bucket_if_not_exists(bucket_name, region):\n s3_client = boto3.client('s3', region)\n try:\n s3_client.get_bucket_acl(Bucket=bucket_name)\n logger.debug(\"Found bucket %s in region %s already exist\", bucket_name, region)\n except ClientError as error:\n if error.response and error.response['Error']['Code'] == 'NoSuchBucket':\n logger.debug('Creating s3 bucket: %s in region %s', bucket_name, region)\n\n # NOTE: boto3 will raise ClientError(InvalidLocationConstraint) if\n # `LocationConstraint` is set to `us-east-1` region.\n # https://github.com/boto/boto3/issues/125.\n # This issue still show up in boto3 1.13.4(May 6th 2020)\n try:\n s3_client.create_bucket(\n Bucket=bucket_name,\n CreateBucketConfiguration={'LocationConstraint': region},\n )\n except ClientError as s3_error:\n if (\n s3_error.response\n and s3_error.response['Error']['Code']\n == 'InvalidLocationConstraint'\n ):\n logger.debug(\n 'Special s3 region: %s, will attempt create bucket without '\n '`LocationConstraint`',\n region,\n )\n s3_client.create_bucket(Bucket=bucket_name)\n else:\n raise s3_error\n else:\n raise error\n\n\ndef is_s3_bucket_exist(bucket_name, region):\n s3_client = boto3.client('s3', region)\n try:\n s3_client.get_bucket_acl(Bucket=bucket_name)\n return True\n except ClientError as error:\n if error.response and error.response['Error']['Code'] == 'NoSuchBucket':\n return False\n else:\n raise error\n\n\ndef download_directory_from_s3(download_dest_directory, s3_bucket, path_prefix):\n \"\"\" Download directory from s3 bucket to given directory.\n Args:\n download_dest_directory: String\n s3_bucket: String\n path_prefix: String\n\n Returns: None\n \"\"\"\n s3_client = boto3.client('s3')\n try:\n list_content_result = s3_client.list_objects(\n Bucket=s3_bucket, Prefix=path_prefix\n )\n for content in list_content_result['Contents']:\n file_name = content['Key'].split('/')[-1]\n file_path = os.path.join(download_dest_directory, file_name)\n if not os.path.isfile(file_path):\n s3_client.download_file(s3_bucket, content['Key'], file_path)\n else:\n logger.error('File %s already exists', file_path)\n except ClientError as e:\n logger.error('Error getting object from bucket %s, %s', s3_bucket, e)\n raise e\n","sub_path":"bentoml/utils/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":4903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"243574033","text":"from django.shortcuts import render\nfrom .models import departments, inventory\nfrom .models import Issue_list, count_division, storage_location\nfrom accounts.models import StudentProfile, Facultyprofile\nfrom .decorators import student_only, faculty_only\nimport json\n\n\n# Create your views here.\n\n\n@student_only\ndef StudentDashboard(request):\n # show items\n if request.method == 'GET':\n Branch = departments.objects.filter(\n department=(StudentProfile.objects.get(\n username=request.session['id'])).Branch)\n items = inventory.objects.filter(\n item_department=Branch[0].id)\n\n return render(request,\n \"inventory/studentdashboarddemo.html\",\n {'data': items}\n )\n\n # # Issue request\n if request.method == 'POST':\n # set session['lab'] or sent it via request\n lab = request.session.get('lab')\n faculty = storage_location.object.filter(location=lab).incharge\n student = StudentProfile.objects.filter(username=request.session[\"id\"])\n json_data = json.loads(request.data)\n Issue_list.objects.save(\n item_list=json_data,\n student=student,\n faculty=faculty\n )\n return render(request, \"inventory/studentdashboard.html\",\n {'data': items})\n # Issue items\n # if request.method == 'POST':\n # json_data = json.loads(request.data)\n # for item in json_data:\n\n\n@faculty_only\ndef FacultyDashboard(request):\n \"\"\"\n Shows the issue requests to the faculty concerned\n \"\"\"\n if request.method == 'GET':\n profile = Facultyprofile.objects.get(id=request.session['id'])\n data = Issue_list.objects.filter(faculty=profile)\n return render(request,\n \"inventory/facultydashboard.html\",\n {'data': data}\n )\n\n\n@faculty_only\ndef IssueConfirm(request):\n \"\"\"\n Approve the issue request\n made by the student\n \"\"\"\n if request.method == \"POST\":\n # if request['json']:\n json_data = json.loads(request.data)\n # update issue list\n Issue_list.objects.get().update(status=True)\n # update items in the model\n for item in json_data:\n # updates the count of every item in inventry\n count_division.update(item_name=item.id)\n if request.method == 'GET':\n # profile = Facultyprofile.objects.get(id=request.session['id'])\n return render(request, \"inventory/facultydashboard.html\")\n","sub_path":"inventory/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"105245834","text":"'''\nCreated on Feb 18, 2019\n\n@author: Jake\n'''\nfrom digi.xbee.devices import XBeeDevice, RemoteXBeeDevice\nimport RobotInstruction\nfrom builtins import input\nfrom digi.xbee.models.address import XBee16BitAddress, XBee64BitAddress\n\n\nCOMport = 'COM9'\nbaud_rate = 9600\nremote_address = '0008'\ntransmitDevice = XBeeDevice(COMport,baud_rate)\nremoteDevice = RemoteXBeeDevice(transmitDevice, XBee64BitAddress.from_hex_string(remote_address), XBee16BitAddress.from_hex_string(remote_address))\n\n\ndef main():\n transmitDevice.close()\n \n print('transmitting to: ')\n print(remoteDevice.get_16bit_addr())\n cont = 'y'\n while(cont != 'q'):\n try:\n transmitDevice.open()\n instruction = getCommand()\n \n #print(hex(instruction))\n sendInstructions(instruction)\n except Exception as e:\n print(\"failed to send data\")\n print(e)\n transmitDevice.close()\n transmitDevice.close()\n cont = input('Continue?')\n transmitDevice.close()\n \ndef getCommand():\n cmd = input(\"Specify a Command: \")\n dist = input(\"Distance: \")\n return RobotInstruction.create_instruction_literal(cmd,int(dist)) #create instruction for actual program\n\ndef sendInstructions(data):\n transmitDevice.send_data(remoteDevice, data) #sends the data to the remote device\n \nif __name__ == '__main__':\n main()","sub_path":"src/XBeeTransmit.py","file_name":"XBeeTransmit.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"607719721","text":"\n\n# 4.8 FIRST COMMON ANCESTOR\nclass TreeNode:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n\ndef get_common_ancestor(root, p, q):\n # Error check that nodes p, q exist on tree\n if not covers(root, p) or not covers(root, q):\n return None\n return ancestor_helper(root, p, q)\n\n\ndef ancestor_helper(root, p, q):\n # If node is found\n if root is None or root == p or root == q:\n return root\n\n # Checks if nodes in same subtree, if not we found the ancestor node\n p_on_left = covers(root.left, p)\n q_on_left = covers(root.left, q)\n if p_on_left != q_on_left:\n return root\n\n # Child root node for subtree with p, q nodes\n child_side = TreeNode(root.left if p_on_left else root.right)\n return ancestor_helper(child_side, p, q)\n\n\ndef covers(root, p):\n if root is None:\n return False\n if root == p:\n return True\n return covers(root.left, p) or covers(root.right, p)\n","sub_path":"Trees/CTCI/first_common_ancestor.py","file_name":"first_common_ancestor.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"166677612","text":"# DROP TABLES\n\nsongplay_table_drop = \"DROP TABLE IF EXISTS songplay;\"\nuser_table_drop = \"DROP TABLE IF EXISTS users;\"\nsong_table_drop = \"DROP TABLE IF EXISTS song;\"\nartist_table_drop = \"DROP TABLE IF EXISTS artist;\"\ntime_table_drop = \"DROP TABLE IF EXISTS time;\"\n\n# CREATE TABLES\n\nsongplay_table_create = (\n \"CREATE TABLE IF NOT EXISTS songplays (\\\n songplay_id SERIAL PRIMARY KEY, \\\n start_time BIGINT NOT NULL,\\\n user_id INTEGER, \\\n level VARCHAR(10), \\\n song_id VARCHAR(30),\\\n artist_id VARCHAR(30),\\\n session_id INTEGER,\\\n location VARCHAR(200)\\\n )\\\n \"\n)\n\n# songplay_table_create FK CONSTRAINTS\n# CONSTRAINT fk_user_id FOREIGN KEY(user_id) REFERENCES users(user_id),\\\n# CONSTRAINT fk_song_id FOREIGN KEY(song_id) REFERENCES songs(song_id),\\\n# CONSTRAINT fk_artist_id FOREIGN KEY(artist_id) REFERENCES artists(artist_id),\\\n# CONSTRAINT fk_time_id FOREIGN KEY(start_time) REFERENCES time(start_time)\\\n\nuser_table_create = (\n \"CREATE TABLE IF NOT EXISTS users (\\\n user_id INTEGER PRIMARY KEY, \\\n first_name VARCHAR(100), \\\n last_name VARCHAR(100),\\\n gender VARCHAR(1), \\\n level VARCHAR(10)\\\n )\\\n \")\n\nsong_table_create = (\n \"CREATE TABLE IF NOT EXISTS songs (\\\n song_id VARCHAR(30) PRIMARY KEY, \\\n title VARCHAR(200), \\\n artist_id VARCHAR(30), \\\n year INTEGER, \\\n duration NUMERIC\\\n )\"\n)\n\nartist_table_create = (\n \"CREATE TABLE IF NOT EXISTS artists (\\\n artist_id VARCHAR(50) PRIMARY KEY, \\\n name VARCHAR (300),\\\n location VARCHAR(300),\\\n latitude VARCHAR(300),\\\n longitude VARCHAR(300)\\\n )\"\n)\n\ntime_table_create = (\n \"CREATE TABLE IF NOT EXISTS time(\\\n start_time BIGINT PRIMARY KEY,\\\n hour INTEGER,\\\n day INTEGER,\\\n week INTEGER,\\\n month INTEGER,\\\n year INTEGER,\\\n weekday INTEGER\\\n )\"\n)\n\n# INSERT RECORDS\n\nsongplay_table_insert = (\n \"INSERT INTO songplays (start_time, user_id, level, song_id, artist_id, session_id, location) \\\n VALUES (%s, %s, %s, %s, %s, %s, %s) \\\n ON CONFLICT DO NOTHING\"\n)\n\nuser_table_insert = (\n \"INSERT INTO users (user_id, first_name, last_name, gender, level) \\\n VALUES (%s, %s, %s, %s, %s)\\\n ON CONFLICT (user_id) DO UPDATE SET level=EXCLUDED.level\"\n)\n\nsong_table_insert = (\n \"INSERT INTO songs (song_id, title, artist_id, year, duration) \\\n VALUES (%s, %s, %s, %s, %s)\\\n ON CONFLICT DO NOTHING\"\n)\n\nartist_table_insert = (\n \"INSERT INTO artists (artist_id, name, location, latitude, longitude) \\\n VALUES (%s, %s, %s, %s, %s)\\\n ON CONFLICT DO NOTHING\"\n)\n\n\ntime_table_insert = (\n \"INSERT INTO time (start_time, hour, day, week, month, year, weekday)\\\n VALUES (%s, %s, %s, %s, %s, %s, %s)\\\n ON CONFLICT DO NOTHING\"\n)\n\n# FIND SONGS\n\nsong_select = (\n \"SELECT * FROM songs s JOIN artists a USING (artist_id) WHERE s.title = %s AND a.name = %s AND s.duration = %s\"\n)\n\n# QUERY LISTS\n\ncreate_table_queries = [user_table_create,\n artist_table_create, time_table_create, song_table_create, songplay_table_create]\ndrop_table_queries = [songplay_table_drop, user_table_drop,\n song_table_drop, artist_table_drop, time_table_drop]\n","sub_path":"sql_queries.py","file_name":"sql_queries.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"580000994","text":"#\n# Copyright (C) 2017 Murata Manufacturing Co.,Ltd.\n#\n\n##\n# @brief AP main function.\n# @author E2N3\n# @date 2018.11.09\n\n# -*- coding: utf-8 -*-\n\nimport json\nimport sys\nimport threading\nfrom Debug import Debug_GetObj\nfrom CLS_Define import COM_DEF\nfrom tx_snd import snd_rsp_cmd\n\n\n##\n# @brief Identify the command ID and call the method of the AP_FUNC class.\n# @param cls_ap_func AP function class (class object)\n# @param l_com_hdr_info command header parameter\n# @param d_tlv_param tlv parameter \\n\n# [\"SecurityType\"] Security type\n# @retval d_ap_rply response data \\n\n# [\"Result\"] value of the result \\n\n# - Success : COM_DEF.i_RET_SUCCESS \\n\n# - Failure : Value other than COM_DEF.i_RET_SUCCESS\ndef call_apfunc(cls_ap_func, l_com_hdr_info, d_tlv_param):\n\n d_ap_rply = {}\n\n # Get debug info\n Dbg = Debug_GetObj(COM_DEF.i_MODULE_AP)\n\n Dbg.log(COM_DEF.TRACE, \"[S] call_apfunc\")\n\n # get command id\n i_cmd_id = l_com_hdr_info[0][2]\n\n Dbg.log(COM_DEF.DEBUG,\n \"[0x%04x] COMMAND : 0x%04x\"\n % (l_com_hdr_info[0][1], i_cmd_id))\n\n if COM_DEF.i_CMD_Attach == i_cmd_id:\n d_ap_rply = cls_ap_func.attach(l_com_hdr_info, d_tlv_param)\n elif COM_DEF.i_CMD_SetCurrentTime == i_cmd_id:\n d_ap_rply = cls_ap_func.date(l_com_hdr_info, d_tlv_param)\n elif COM_DEF.i_CMD_SetSsid == i_cmd_id:\n d_ap_rply = cls_ap_func.ssid(l_com_hdr_info, d_tlv_param)\n elif COM_DEF.i_CMD_SetSecurity == i_cmd_id:\n if \"SecurityType\" in d_tlv_param:\n if COM_DEF.i_SecurityType_Open == d_tlv_param[\"SecurityType\"]:\n d_ap_rply = cls_ap_func.open(l_com_hdr_info, d_tlv_param)\n elif COM_DEF.i_SecurityType_Wep == d_tlv_param[\"SecurityType\"]:\n d_ap_rply = cls_ap_func.wep(l_com_hdr_info, d_tlv_param)\n elif COM_DEF.i_SecurityType_Wpa == d_tlv_param[\"SecurityType\"]:\n d_ap_rply = cls_ap_func.wpa(l_com_hdr_info, d_tlv_param)\n else:\n Dbg.log(COM_DEF.ERROR,\n \"Security Type Err !! : \" +\n str(d_tlv_param[\"SecurityType\"]))\n d_ap_rply[\"Result\"] = COM_DEF.i_RET_TLV_ABNORMAL\n else:\n Dbg.log(COM_DEF.ERROR,\n \"Security Type parameter is nothing !! \")\n d_ap_rply[\"Result\"] = COM_DEF.i_RET_TLV_ABNORMAL\n\n elif COM_DEF.i_CMD_SetChannel == i_cmd_id:\n d_ap_rply = cls_ap_func.channel(l_com_hdr_info, d_tlv_param)\n elif COM_DEF.i_CMD_SetCountryCode == i_cmd_id:\n d_ap_rply = cls_ap_func.country(l_com_hdr_info, d_tlv_param)\n elif COM_DEF.i_CMD_SetStealthMode == i_cmd_id:\n d_ap_rply = cls_ap_func.stealth(l_com_hdr_info, d_tlv_param)\n elif COM_DEF.i_CMD_SetRadioOutput == i_cmd_id:\n d_ap_rply = cls_ap_func.controlbss(l_com_hdr_info, d_tlv_param)\n elif COM_DEF.i_CMD_GetStaList == i_cmd_id:\n d_ap_rply = cls_ap_func.stalist(l_com_hdr_info, d_tlv_param)\n elif COM_DEF.i_CMD_SetConnectionLimit == i_cmd_id:\n d_ap_rply = cls_ap_func.limit(l_com_hdr_info, d_tlv_param)\n elif COM_DEF.i_CMD_Set11n == i_cmd_id:\n d_ap_rply = cls_ap_func.control11n(l_com_hdr_info, d_tlv_param)\n elif COM_DEF.i_CMD_Detach == i_cmd_id:\n d_ap_rply = cls_ap_func.detach(l_com_hdr_info, d_tlv_param)\n elif COM_DEF.i_CMD_TestReady == i_cmd_id:\n d_ap_rply = cls_ap_func.test_ready(l_com_hdr_info, d_tlv_param)\n elif COM_DEF.i_CMD_StartDhcpd == i_cmd_id:\n d_ap_rply = cls_ap_func.dhcpd(l_com_hdr_info, d_tlv_param)\n elif COM_DEF.i_CMD_SetIpInfo == i_cmd_id:\n d_ap_rply = cls_ap_func.setipinfo(l_com_hdr_info, d_tlv_param)\n elif COM_DEF.i_CMD_GetIpInfo == i_cmd_id:\n d_ap_rply = cls_ap_func.getipinfo(l_com_hdr_info, d_tlv_param)\n else:\n Dbg.log(COM_DEF.ERROR,\n \"[0x%04x] command 0x%04x not supported\"\n % (l_com_hdr_info[0][1], i_cmd_id))\n d_ap_rply[\"Result\"] = COM_DEF.i_RET_COMHDR_ABNORMAL\n\n Dbg.log(COM_DEF.TRACE, \"[E] call_apfunc\")\n\n return d_ap_rply\n\n\n##\n# @brief It receives the queue notification from the common reception thread\n# and calls the AP_FUNC class method. \\n\n# Receive the result and send the response.\n# @param que_main queue used by this module and main_ctrl_thread\n# (queue class object)\n# @param cls_soc socket used for sending response command to MC\n# (clas object)\n# @param s_device_type device type\n# @param s_host MC IP Address\n# @param s_model_name AP model name. (AP folder name)\n# @retval None\ndef ap_ctrl_thread(que_main, cls_soc, s_device_type, s_host, s_model_name):\n\n d_ap_rply = {}\n\n # Get debug info\n Dbg = Debug_GetObj(COM_DEF.i_MODULE_AP)\n\n Dbg.log(COM_DEF.TRACE, \"[S] ap_ctrl_thread\")\n\n # read environment file\n s_env_file = \"./device/AP/env.json\"\n fp = open(s_env_file, 'r')\n d_env_data = json.load(fp)\n s_ap_name = d_env_data[\"DeviceName\"]\n fp.close()\n\n Dbg.log(COM_DEF.INFO, \"Device Name : \" + s_ap_name)\n\n sys.path.append(\"./device/AP/\" + s_ap_name + \"/\")\n from Control import AP_FUNC\n\n # Get API function\n cls_ap_func = AP_FUNC(cls_soc, s_host, s_model_name)\n\n while(1):\n Dbg.log(COM_DEF.INFO, \"wait queue...\")\n\n # wait to queue\n l_decode_data = que_main.get()\n\n Dbg.log(COM_DEF.TRACE, \"queue get data\")\n\n # get comhdr param\n l_com_hdr_info = l_decode_data[0][0]\n # get tlv param\n d_tlv_param = l_decode_data[0][1]\n\n d_ap_rply = call_apfunc(cls_ap_func, l_com_hdr_info, d_tlv_param)\n\n # send response command\n snd_rsp_cmd(l_com_hdr_info, d_ap_rply, cls_soc,\n COM_DEF.i_MODULE_AP, s_device_type)\n\n # while end\n\n Dbg.log(COM_DEF.TRACE, \"[E] ap_ctrl_thread\")\n\n\n##\n# @brief Start module main process.\n# @param que_main queue used by this module and main_ctrl_thread\n# (queue class object)\n# @param cls_soc socket used for sending response command to MC\n# (clas object)\n# @param s_device_type device type\n# @param d_DevConfigInfo device configuration info\n# @retval None\ndef module_start(que_main, cls_soc, s_device_type, d_DevConfigInfo):\n\n s_host = d_DevConfigInfo[\"ExHost\"]\n s_model_name = d_DevConfigInfo[\"ModelName\"]\n ap_th = threading.Thread(target=ap_ctrl_thread,\n args=(que_main, cls_soc, s_device_type,\n s_host, s_model_name, ),\n name=\"AP_main\")\n ap_th.setDaemon(True)\n ap_th.start()\n","sub_path":"Pytest/DPFIM_5535_Pytest/Others/src/device/AP/AP_main.py","file_name":"AP_main.py","file_ext":"py","file_size_in_byte":6695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"577732171","text":"# First solution\ndivider_seven = [i for i in range(1, 100000) if i % 7 == 0]\ndivider_three = [i for i in range(1, 100000) if i % 3 == 0]\nanswer = [i for i in divider_seven if i in divider_three]\nprint(answer)\n# Second solution (As all of the elements of this list is unique by the task,\n# we can use Sets and built-in function of intersect)\n# this works much faster due to hashing nature of sets\n# although much less readable as sets are unordered\ndivider_seven_2 = {i for i in range(1, 100000) if i % 7 == 0}\ndivider_three_2 = {i for i in range(1, 100000) if i % 3 == 0}\nanswer_2 = divider_seven_2.intersection(divider_three_2)\nprint(answer_2)","sub_path":"Homework_Nine/Task_Two.py","file_name":"Task_Two.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"372125290","text":"import multiprocessing as mp\nimport time\n\n\ndef f(q):\n q.put([22, None, \"hello\"])\n time.sleep(3)\n\n\nif __name__ == \"__main__\":\n q = mp.Queue()\n p = mp.Process(target=f, args=(q,))\n p.start()\n print(q.get())\n p.join()\n print(\"p over\")\n","sub_path":"day10_161211/study_code/进程间通信.py","file_name":"进程间通信.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"260975592","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Interact with the Application Framework Directory-Sync Service API.\n\nTo obtain rich information about users and devices for the purposes of\nreporting and policy enforcement, cloud-based applications need access\nto directory information. However, most directories are located\non-premise so they cannot be accessed by cloud-based applications.\nThe Directory Sync Service allows cloud-based applications to access\ndirectory data by using an on-premise agent to collect it, and then\ntransferring the data to the cloud-based Directory-Sync Service.\n\nExamples:\n Refer to the examples provided with this library.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nimport logging\n\nfrom .httpclient import HTTPClient\n\n\nclass DirectorySyncService(object):\n \"\"\"An Application Framework Directory-Sync Service Instance.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n\n Parameters:\n session (HTTPClient): :class:`~pancloud.httpclient.HTTPClient` object. Defaults to ``None``.\n url (str): URL to send API requests to. Later combined with ``port`` and :meth:`~request` ``path`` parameter.\n\n Args:\n **kwargs: Supported :class:`~pancloud.httpclient.HTTPClient` parameters.\n\n \"\"\"\n self.kwargs = kwargs.copy() # used for __repr__\n self.session = kwargs.pop('session', None)\n self._httpclient = self.session or HTTPClient(**kwargs)\n self.url = self._httpclient.url\n self._debug = logging.getLogger(__name__).debug\n\n def __repr__(self):\n for k in self.kwargs.get('headers', {}):\n if k.lower() == 'authorization':\n x = dict(self.kwargs['headers'].items())\n x[k] = '*' * 6 # starrify token\n return '{}({}, {})'.format(\n self.__class__.__name__,\n ', '.join('%s=%r' % (x, _) for x, _ in\n self.kwargs.items() if x != 'headers'),\n 'headers=%r' % x\n )\n return '{}({})'.format(\n self.__class__.__name__,\n ', '.join(\n '%s=%r' % x for x in self.kwargs.items())\n )\n\n def attributes(self, **kwargs): # pragma: no cover\n \"\"\"Retrieve the attribute configuration object.\n\n Retrieves a mapping that identifies the custom directory\n attributes configured for the Directory SyncService instance,\n and the mapping of the custom attributes to standard directory\n attributes.\n\n Args:\n **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.\n\n Returns:\n requests.Response: Requests Response() object.\n\n Examples:\n Refer to ``directory_attributes.py`` example.\n\n \"\"\"\n path = \"/directory-sync-service/v1/attributes\"\n r = self._httpclient.request(\n method=\"GET\",\n path=path,\n url=self.url,\n **kwargs\n )\n return r\n\n def count(self, object_class=None, params=None, **kwargs): # pragma: no cover\n \"\"\"Retrieve the attribute configuration object.\n\n Retrieve a count of all directory entries that belong to the\n identified objectClass. The count is limited to a single domain.\n\n Args:\n params (dict): Payload/request dictionary.\n object_class (str): Directory object class.\n **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.\n\n Returns:\n requests.Response: Requests Response() object.\n\n Examples:\n Coming soon.\n\n \"\"\"\n path = \"/directory-sync-service/v1/{}/count\".format(\n object_class\n )\n r = self._httpclient.request(\n method=\"GET\",\n path=path,\n url=self.url,\n params=params,\n **kwargs\n )\n return r\n\n def domains(self, **kwargs): # pragma: no cover\n \"\"\"Retrieves a list of all domains available.\n\n Directory Sync Service can be configured to read directory\n entries from multiple domains. This API retrieves all the\n domains from which your Directory Sync Service instance is\n configured to read entries. Domains are identified in both DNS\n and distinguished name format.\n\n Args:\n **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.\n\n Returns:\n requests.Response: Requests Response() object.\n\n Examples:\n Coming soon.\n\n \"\"\"\n path = \"/directory-sync-service/v1/domains\"\n r = self._httpclient.request(\n method=\"GET\",\n path=path,\n url=self.url,\n **kwargs\n )\n return r\n\n def query(self, object_class=None, json=None, **kwargs): # pragma: no cover\n \"\"\"Query data stored in directory.\n\n Retrieves directory data by querying a Directory Sync Service\n cloud-based instance. The directory data is stored with the\n Directory Sync Service instance using an agent that is installed\n in the customer's network.This agent retrieves directory data\n from the customer's Active Directory, and then sends it to the\n cloud-based Directory Sync Service instance.\n\n Args:\n object_class (str): Directory object class.\n json (dict): Payload/request body.\n **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.\n\n Returns:\n requests.Response: Requests Response() object.\n\n Examples:\n Coming soon.\n\n \"\"\"\n path = \"/directory-sync-service/v1/{}\".format(object_class)\n r = self._httpclient.request(\n method=\"POST\",\n url=self.url,\n json=json,\n path=path,\n **kwargs\n )\n return r\n","sub_path":"pancloud/directorysync.py","file_name":"directorysync.py","file_ext":"py","file_size_in_byte":5923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"620307474","text":"import pygame\r\nimport random\r\nimport math\r\n\r\n# py-game variables\r\nwindowWidth = 1000\r\nwindowHeight = 800\r\npygame.init()\r\npygame.display.set_caption(str(\"Quad-copter balance\"))\r\nclock = pygame.time.Clock()\r\nblack = (0, 0, 0)\r\nscreen = pygame.display.set_mode((windowWidth, windowHeight))\r\nscreen.fill((255, 255, 255))\r\nloopCount = 0\r\nrun = True\r\nuserangle = 0\r\n# Fonts and other graphics\r\nmyFont = pygame.font.SysFont(\"Times New Roman\", 18)\r\ntelemetries = (140, 170, 200, 230, 260, 290, 320, 350, 380, 410)\r\ntelem = 0\r\npidcorrect = 0\r\n\r\n\r\n# Pretty self explanatory PID class\r\nclass PID:\r\n # declare PID variables\r\n pGain = 0\r\n iGain = 0\r\n dGain = 0\r\n target = 0\r\n reading = 0\r\n error = 0\r\n pTerm = 0\r\n iTerm = 0\r\n iLimit = 0.06\r\n errorSum = 0\r\n dTerm = 0\r\n errorSlope = 0\r\n lastError = 0\r\n correction = 0\r\n\r\n # For graphing, these store previous values\r\n errorlist = [0] * 196\r\n plist = [0] * 196\r\n ilist = [0] * 196\r\n dlist = [0] * 196\r\n correctlist = [0] * 196\r\n\r\n def __init__(self, pgain, igain, dgain):\r\n self.pGain = pgain\r\n self.iGain = igain\r\n self.dGain = dgain\r\n\r\n def getcorrection(self, targetvalue, currentreading):\r\n global loopCount\r\n self.target = targetvalue\r\n self.reading = currentreading\r\n self.error = self.target - self.reading\r\n self.pTerm = self.error * self.pGain\r\n self.errorSum += self.error\r\n self.iTerm = self.errorSum * self.iGain\r\n \"\"\"\r\n if self.iTerm > self.iLimit:\r\n self.iTerm = self.iLimit\r\n elif self.iTerm < -self.iLimit:\r\n self.iTerm = -self.iLimit\r\n \"\"\"\r\n if loopCount % 100 == 0:\r\n self.errorSum = 0\r\n self.errorSlope = (self.error - self.lastError)/0.1\r\n self.dTerm = self.errorSlope * self.dGain\r\n self.lastError = self.error\r\n self.correction = self.pTerm + self.iTerm + self.dTerm\r\n self.errorlist.append(self.error)\r\n del self.errorlist[0]\r\n self.plist.append(self.pTerm)\r\n del self.plist[0]\r\n self.ilist.append(self.iTerm)\r\n del self.ilist[0]\r\n self.dlist.append(self.dTerm)\r\n del self.dlist[0]\r\n self.correctlist.append(self.correction)\r\n del self.correctlist[0]\r\n return self.correction\r\n\r\n\r\n# Quadcopter class, allowing you to make multiple Quads controlled by different PIDs\r\nclass Quadcopter:\r\n angularPos = 0\r\n angularVel = 0\r\n length = 0\r\n width = 0\r\n pivotX = 500\r\n pivotY = 300\r\n radius = 200\r\n\r\n def __init__(self, angPos, angVel, Len, Width, X, Y):\r\n self.angularPos = angPos\r\n self.angularVel = angVel\r\n self.length = Len\r\n self.width = Width\r\n self.pivotX = X\r\n self.radius = int(Len/2)\r\n self.pivotY = Y\r\n\r\n def draw(self):\r\n # Draws the quad-copter as a line\r\n # convert to radians\r\n angle = self.angularPos * 3.1415926536 / 180\r\n x1 = self.pivotX - (math.cos(angle) * self.radius)\r\n x2 = self.pivotX + (math.cos(angle) * self.radius)\r\n y1 = self.pivotY - (math.sin(angle) * self.radius)\r\n y2 = self.pivotY + (math.sin(angle) * self.radius)\r\n pygame.draw.line(screen, black, (x1, y1), (x2, y2), self.width)\r\n\r\n def disturbquad(self, wind, duration, time):\r\n # sways the quad-copter to simulate wind or imbalance\r\n wind = wind + random.randint(-1, 1) / 6\r\n wind /= 1000\r\n time *= 60\r\n if not duration == 0:\r\n duration *= 60\r\n if time < loopCount < time + duration:\r\n self.angularVel += wind\r\n else:\r\n self.angularVel += wind\r\n\r\n\r\n# Creating Quadcopter and PID objects\r\nquad = Quadcopter(80, 0, 400, 17, 500, 300)\r\npid = PID(0.2, 0.0003, 0.15)\r\n\r\n\r\ndef addTelemetry(caption, data):\r\n # this function displays the value of a variable\r\n global telem\r\n Label = myFont.render(caption + \":\", 1, black)\r\n display = myFont.render(str(round(data, 4)), 1, black)\r\n screen.blit(Label, (10, telemetries[telem]))\r\n screen.blit(display, (len(caption)*8 + 20, telemetries[telem]))\r\n telem += 1\r\n\r\n\r\ndef handleloop():\r\n # this handles quitting and also updates the loop counters\r\n global telem, run, loopCount, mousePressed\r\n telem = 0\r\n loopCount += 1\r\n if loopCount == 100000:\r\n loopCount = 0\r\n screen.fill((255, 255, 255))\r\n for e in pygame.event.get():\r\n if e.type == pygame.QUIT:\r\n run = False\r\n\r\n\r\ndef mouseinput():\r\n # Gets the mouse input to allow the user to turn the quad\r\n global userangle\r\n mousepos = pygame.mouse.get_pos()\r\n opposite = mousepos[1] - quad.pivotY\r\n adjacent = mousepos[0] - quad.pivotX\r\n if not (mousepos[0] - quad.pivotX) == 0:\r\n userangle = math.atan(opposite / adjacent) * 180 / 3.1415926535\r\n if pygame.mouse.get_pressed()[0]:\r\n quad.angularPos = userangle\r\n quad.angularVel = 0\r\n\r\n\r\ndef graphpoints(inputlist, color, scale, ypos):\r\n # Takes in a list and displays it as a line graph\r\n actualx = 10\r\n for i in range(0, len(inputlist)-2):\r\n actualx += 5\r\n pygame.draw.line(screen, color, (actualx, int(scale * (inputlist[i]) + ypos)), (actualx + 5, int(scale * (inputlist[i+1]) + ypos)), 3)\r\n\r\n\r\nwhile run:\r\n handleloop()\r\n pidcorrect = pid.getcorrection(0, quad.angularPos)\r\n # reduce the rate at which the motors update for realism\r\n if loopCount % 3 == 0:\r\n quad.angularVel += pidcorrect * 0.08\r\n # take mouse input\r\n mouseinput()\r\n # Increase Angular Position by Angular Velocity\r\n quad.angularPos += quad.angularVel\r\n # Graph the PID terms\r\n graphpoints(pid.errorlist, (255, 0, 0), 1, 100)\r\n graphpoints(pid.plist, (0, 255, 0), 5, 500)\r\n graphpoints(pid.ilist, (0, 0, 255), 15, 600)\r\n graphpoints(pid.dlist, (200, 190, 190), 5, 700)\r\n # Draw the Quad-copter\r\n quad.draw()\r\n # Display variables\r\n addTelemetry(\"Angular Position\", quad.angularPos)\r\n addTelemetry(\"Angular Velocity\", quad.angularVel)\r\n addTelemetry(\"error list\", len(pid.errorlist))\r\n addTelemetry(\"PID correct\", pidcorrect)\r\n pygame.display.flip()\r\n","sub_path":"FinalPIDTest.py","file_name":"FinalPIDTest.py","file_ext":"py","file_size_in_byte":6233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"601512049","text":"\nfrom flask import Flask\nfrom flask import request\nfrom config.config import DevelopmentConfig as config\nfrom elasticsearch_service.search import Search\nfrom flask import jsonify\n\napp = Flask(__name__)\napp.config['DEBUG'] = config.DEBUG\napp.config['FLASK_ENV'] = config.ENV\napp.config['FLASK_APP'] = config.APP\n\n\n@app.route('/')\ndef index():\n return 'home'\n\n@app.route('/search')\ndef search():\n user_search = request.args.get('q')\n if not user_search:\n raise AssertionError('query not defined')\n\n elasticsearch_search = Search(user_search)\n results = elasticsearch_search.get_result()\n return jsonify(results)\n\n@app.route('/search/all')\ndef search_all():\n page_index = request.args.get('page_index')\n page_size = request.args.get('page_size')\n\n elasticsearch_search = Search(None)\n results = elasticsearch_search.get_all_result()\n return jsonify(results)\n\n@app.route('/search/')\ndef search_by_id(id):\n elasticsearch_search = Search(None)\n results = elasticsearch_search.get_by_id(id)\n return jsonify(results)\n ","sub_path":"search-service/product_api.py","file_name":"product_api.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"565174793","text":"from flask import Flask, render_template, request, redirect\r\nfrom forms.patient_form import PatientForm\r\nfrom forms.symptom_form import SymptomForm\r\nfrom forms.disease_form import DiseaseForm\r\nimport uuid\r\nimport json\r\nimport plotly\r\nfrom sqlalchemy.sql import func\r\nimport plotly.graph_objs as go\r\nfrom flask_sqlalchemy import SQLAlchemy\r\n\r\napp = Flask(__name__)\r\napp.secret_key = 'key'\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://postgres:fastdagger@localhost/milev'\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\r\ndb = SQLAlchemy(app)\r\n\r\nclass PatientHasSymptom(db.Model):\r\n __tablename__ = 'patient_has_symptom'\r\n\r\n patient_id = db.Column(db.String(8), db.ForeignKey('orm_patient.patient_id'), primary_key=True)\r\n symptom_id = db.Column(db.String(8), db.ForeignKey('orm_symptom.symptom_id'), primary_key=True)\r\n\r\n patient = db.relationship(\"OrmPatient\", back_populates=\"symptoms\")\r\n symptom = db.relationship(\"OrmSymptom\", back_populates=\"patients\")\r\n\r\nclass OrmPatient(db.Model):\r\n __tablename__ = 'orm_patient'\r\n\r\n patient_id = db.Column(db.String(8), primary_key=True)\r\n patient_age = db.Column(db.Integer, nullable=False)\r\n patient_height = db.Column(db.Float, nullable=False)\r\n patient_weight = db.Column(db.Float, nullable=False)\r\n patient_temperature = db.Column(db.Float, nullable=False)\r\n\r\n symptoms = db.relationship('PatientHasSymptom', back_populates = 'patient')\r\n\r\n\r\nclass OrmSymptom(db.Model):\r\n __tablename__ = 'orm_symptom'\r\n\r\n symptom_id = db.Column(db.String(8), primary_key=True)\r\n description = db.Column(db.String(50), nullable=False)\r\n\r\n patients = db.relationship('PatientHasSymptom', back_populates='symptom')\r\n\r\n diseases = db.relationship('SymptomHasDisease', back_populates='symptom')\r\n\r\n\r\nclass SymptomHasDisease(db.Model):\r\n __tablename__ = 'symptom_has_disease'\r\n\r\n disease_id = db.Column(db.String(8), db.ForeignKey('orm_disease.disease_id'), primary_key=True)\r\n symptom_id = db.Column(db.String(8), db.ForeignKey('orm_symptom.symptom_id'), primary_key=True)\r\n\r\n symptom = db.relationship(\"OrmSymptom\", back_populates=\"diseases\")\r\n disease = db.relationship(\"OrmDisease\", back_populates=\"symptoms\")\r\n\r\n\r\nclass OrmDisease(db.Model):\r\n tablename = 'orm_disease'\r\n\r\n disease_id = db.Column(db.String(8), primary_key=True)\r\n disease_name = db.Column(db.String(20), nullable=False)\r\n severity = db.Column(db.Integer, nullable=False)\r\n\r\n symptoms = db.relationship('SymptomHasDisease', back_populates='disease')\r\n\r\n\r\ndb.create_all()\r\n\r\ndb.session.query(PatientHasSymptom).delete()\r\ndb.session.query(SymptomHasDisease).delete()\r\ndb.session.query(OrmPatient).delete()\r\ndb.session.query(OrmSymptom).delete()\r\ndb.session.query(OrmDisease).delete()\r\n\r\ndb.create_all()\r\n\r\nNastya = OrmPatient(\r\n patient_id='Nastya',\r\n patient_age=19,\r\n patient_height=190,\r\n patient_weight=77,\r\n patient_temperature=37.7\r\n)\r\n\r\nMax = OrmPatient(\r\n patient_id='Max',\r\n patient_age=20,\r\n patient_height=166,\r\n patient_weight=56,\r\n patient_temperature=3\r\n)\r\n\r\nSerg = OrmPatient(\r\n patient_id='Serg',\r\n patient_age=29,\r\n patient_height=196,\r\n patient_weight=49,\r\n patient_temperature=37.1\r\n)\r\n\r\nKate = OrmPatient(\r\n patient_id='Kate',\r\n patient_age=43,\r\n patient_height=163,\r\n patient_weight=77,\r\n patient_temperature=37.4\r\n)\r\n\r\n\r\nURTI = OrmDisease(\r\n disease_id='J00-06',\r\n disease_name='URTI',\r\n severity=3\r\n)\r\n\r\nFlu = OrmDisease(\r\n disease_id='J10',\r\n disease_name='Flu',\r\n severity=4\r\n)\r\n\r\nMigraine = OrmDisease(\r\n disease_id='G43.0',\r\n disease_name='Migraine',\r\n severity=5\r\n)\r\n\r\nCold = OrmDisease(\r\n disease_id='J00',\r\n disease_name='Cold',\r\n severity=3\r\n)\r\n\r\ncough = OrmSymptom(\r\n symptom_id = '21122253',\r\n description = 'cough'\r\n)\r\n\r\nthroat = OrmSymptom(\r\n symptom_id = '31122253',\r\n description = 'throat pain'\r\n)\r\n\r\nhead = OrmSymptom(\r\n symptom_id = '11122253',\r\n description = 'head pain'\r\n)\r\n\r\nrelation1 = SymptomHasDisease(\r\n symptom_id = '21122253',\r\n disease_id = 'J10'\r\n)\r\n\r\nrelation2 = SymptomHasDisease(\r\n symptom_id = '21122253',\r\n disease_id = 'J00-06'\r\n)\r\n\r\n\r\nrelation3 = SymptomHasDisease(\r\n symptom_id = '31122253',\r\n disease_id = 'J00-06'\r\n)\r\n\r\n\r\nrelation4 = SymptomHasDisease(\r\n symptom_id = '31122253',\r\n disease_id = 'J00'\r\n)\r\n\r\n\r\nrelation5 = SymptomHasDisease(\r\n symptom_id = '11122253',\r\n disease_id = 'G43.0'\r\n)\r\n\r\nrelation7 = PatientHasSymptom(\r\n patient_id = 'Nastya',\r\n symptom_id = '31122253'\r\n)\r\n\r\nrelation8 = PatientHasSymptom(\r\n patient_id = 'Nastya',\r\n symptom_id = '21122253'\r\n)\r\n\r\nrelation9 = PatientHasSymptom(\r\n patient_id = 'Max',\r\n symptom_id = '11122253'\r\n)\r\n\r\nrelation10 = PatientHasSymptom(\r\n patient_id = 'Serg',\r\n symptom_id = '21122253'\r\n)\r\n\r\nrelation11 = PatientHasSymptom(\r\n patient_id = 'Serg',\r\n symptom_id = '31122253'\r\n)\r\n\r\ndb.session.add_all([\r\n\r\n relation1,\r\n relation2,\r\n relation3,\r\n relation4,\r\n relation5,\r\n relation7,\r\n relation8,\r\n relation9,\r\n relation10,\r\n relation11,\r\n Nastya,\r\n Max,\r\n Serg,\r\n Kate,\r\n URTI,\r\n Flu,\r\n Migraine,\r\n Cold,\r\n cough,\r\n throat,\r\n head\r\n])\r\n\r\ndb.session.commit()\r\n\r\n\r\n # symptom_id_fk = db.relationship('OrmSymptom', secondary='patient_has_symptom')\r\n\r\n\r\n# class PatientHasSymptom(db.Model):\r\n# tablename = 'patient_has_symptom'\r\n# patient_id = db.Column(db.String(20), db.ForeignKey('orm_patient.patient_id'), primary_key=True)\r\n# symptom_id = db.Column(db.String(20), db.ForeignKey('orm_symptom.symptom_id'), primary_key=True)\r\n#\r\n#\r\n# class OrmSymptom(db.Model):\r\n# tablename = 'orm_symptom'\r\n#\r\n# symptom_id = db.Column(db.String(20), primary_key=True)\r\n# description = db.Column(db.String(50), nullable=False)\r\n#\r\n# patient_id_fk = db.relationship('OrmPatient', secondary='patient_has_symptom')\r\n# disease_id_fk = db.relationship('OrmDisease', secondary='symptom_has_disease')\r\n#\r\n# class SymptomHasDisease(db.Model):\r\n# tablename = 'symptom_has_disease'\r\n# disease_id = db.Column(db.String(20), db.ForeignKey('orm_disease.disease_id'), primary_key=True)\r\n# symptom_id = db.Column(db.String(20), db.ForeignKey('orm_symptom.symptom_id'), primary_key=True)\r\n#\r\n# class OrmDisease(db.Model):\r\n# tablename = 'orm_skill'\r\n#\r\n# disease_id = db.Column(db.String(20), primary_key=True)\r\n# disease_name = db.Column(db.String(20), nullable=False)\r\n# severity = db.Column(db.Integer, nullable=False)\r\n#\r\n# symptom_id_fk = db.relationship('OrmSymptom', secondary='symptom_has_disease')\r\n\r\n\r\n\r\n@app.route('/')\r\ndef root():\r\n return render_template('index.html')\r\n\r\n@app.route('/patients')\r\ndef patients():\r\n res = db.session.query(OrmPatient).all()\r\n\r\n return render_template('patients_table.html', patients=res)\r\n\r\n@app.route('/create_patient', methods=['POST', 'GET'])\r\ndef create_patient():\r\n form = PatientForm()\r\n\r\n if request.method == 'POST':\r\n new_patient = OrmPatient(\r\n patient_id=form.patient_id.data,\r\n patient_age=form.patient_age.data,\r\n patient_height=form.patient_height.data,\r\n patient_weight=form.patient_weight.data,\r\n patient_temperature=form.patient_temperature.data\r\n )\r\n db.session.add(new_patient)\r\n db.session.commit()\r\n return render_template('success.html')\r\n elif request.method == 'GET':\r\n return render_template('patient_form.html', form=form)\r\n\r\n\r\n@app.route('/patient_edit/', methods=['GET', 'POST'])\r\ndef edit_patient(id):\r\n form = PatientForm()\r\n result = db.session.query(OrmPatient).filter(OrmPatient.patient_id == id).one()\r\n\r\n if request.method == 'GET':\r\n\r\n form.patient_id.data = result.patient_id\r\n form.patient_age.data = result.patient_age\r\n form.patient_height.data = result.patient_height\r\n form.patient_weight.data = result.patient_weight\r\n form.patient_temperature.data = result.patient_temperature\r\n\r\n return render_template('edit_patient.html', form=form, form_name='edit patient')\r\n elif request.method == 'POST':\r\n\r\n result.patient_age = form.patient_age.data\r\n result.patient_height = form.patient_height.data\r\n result.patient_weight = form.patient_weight.data\r\n result.patient_temperature = form.patient_temperature.data\r\n\r\n db.session.commit()\r\n return redirect('/patients')\r\n\r\n@app.route('/delete_patient/', methods=['GET', 'POST'])\r\ndef delete_patient(id):\r\n result = db.session.query(OrmPatient).filter(OrmPatient.patient_id == id).one()\r\n\r\n db.session.delete(result)\r\n db.session.commit()\r\n\r\n return render_template('success.html')\r\n\r\n# SYMPTOM\r\n@app.route('/symptoms')\r\ndef symptoms():\r\n res = db.session.query(OrmSymptom).all()\r\n\r\n return render_template('symptoms_table.html', symptoms=res)\r\n\r\n@app.route('/new_symptom', methods=['GET', 'POST'])\r\ndef new_symptom():\r\n form = SymptomForm()\r\n\r\n if request.method == 'POST':\r\n new_symptom = OrmSymptom(\r\n symptom_id=form.symptom_id.data,\r\n description=form.description.data\r\n )\r\n db.session.add(new_symptom)\r\n db.session.commit()\r\n return render_template('success.html')\r\n elif request.method == 'GET':\r\n return render_template('symptom_form.html', form=form)\r\n\r\n@app.route('/edit_symptom/', methods=['GET', 'POST'])\r\ndef edit_symptom(id):\r\n form = SymptomForm()\r\n result = db.session.query(OrmSymptom).filter(OrmSymptom.symptom_id == id).one()\r\n\r\n if request.method == 'GET':\r\n\r\n form.symptom_id.data = result.symptom_id\r\n form.description.data = result.description\r\n\r\n return render_template('edit_symptom.html', form=form, form_name='edit symptom')\r\n elif request.method == 'POST':\r\n\r\n result.description = form.description.data\r\n\r\n db.session.commit()\r\n return redirect('/symptoms')\r\n\r\n\r\n@app.route('/delete_symptom/', methods=['GET', 'POST'])\r\ndef delete_symptom(id):\r\n result = db.session.query(OrmSymptom).filter(OrmSymptom.symptom_id == id).one()\r\n\r\n db.session.delete(result)\r\n db.session.commit()\r\n\r\n return render_template('success.html')\r\n\r\n\r\n# DISEASE\r\n@app.route('/diseases')\r\ndef diseases():\r\n res = db.session.query(OrmDisease).all()\r\n\r\n return render_template('diseases_table.html', diseases=res)\r\n\r\n\r\n@app.route('/new_disease', methods=['GET', 'POST'])\r\ndef new_disease():\r\n form = DiseaseForm()\r\n\r\n if request.method == 'POST':\r\n new_disease = OrmDisease(\r\n disease_id=form.disease_id.data,\r\n disease_name=form.disease_name.data,\r\n severity=form.severity.data\r\n\r\n )\r\n db.session.add(new_disease)\r\n db.session.commit()\r\n return render_template('success.html')\r\n elif request.method == 'GET':\r\n return render_template('disease_form.html', form=form)\r\n\r\n\r\n@app.route('/edit_disease/', methods=['GET', 'POST'])\r\ndef edit_disease(id):\r\n form = DiseaseForm()\r\n result = db.session.query(OrmDisease).filter(OrmDisease.disease_id == id).one()\r\n\r\n if request.method == 'GET':\r\n\r\n form.disease_id.data = result.disease_id\r\n form.disease_name.data = result.disease_name\r\n form.severity.data = result.severity\r\n\r\n return render_template('edit_disease.html', form=form, form_name='edit disease')\r\n elif request.method == 'POST':\r\n\r\n result.disease_name = form.disease_name.data\r\n result.severity = form.severity.data\r\n\r\n db.session.commit()\r\n return redirect('/diseases')\r\n\r\n\r\n@app.route('/delete_disease/', methods=['GET', 'POST'])\r\ndef delete_disease(id):\r\n result = db.session.query(OrmDisease).filter(OrmDisease.disease_id == id).one()\r\n\r\n db.session.delete(result)\r\n db.session.commit()\r\n\r\n return render_template('success.html')\r\n\r\n@app.errorhandler(404)\r\ndef page_not_found():\r\n return render_template('404.html'), 404\r\n\r\n\r\n@app.route('/dashboard', methods=['GET', 'POST'])\r\ndef dashboard():\r\n\r\n my_query = (\r\n db.session.query(\r\n OrmPatient.patient_id,\r\n func.count(OrmSymptom.symptom_id).label('symptom_count')\r\n ).join(PatientHasSymptom, PatientHasSymptom.patient_id == OrmPatient.patient_id).join(OrmSymptom, OrmSymptom.symptom_id == PatientHasSymptom.symptom_id).\r\n group_by(OrmPatient.patient_id)\r\n ).all()\r\n\r\n dy_query = (\r\n db.session.query(\r\n OrmSymptom.description,\r\n func.count(OrmDisease.disease_id).label('disease_count')\r\n ).join(SymptomHasDisease, SymptomHasDisease.symptom_id == OrmSymptom.symptom_id).join(OrmDisease, OrmDisease.disease_id == SymptomHasDisease.disease_id).\r\n group_by(OrmSymptom.symptom_id)\r\n ).all()\r\n\r\n\r\n patient_id, symptom_count = zip(*my_query)\r\n\r\n bar = go.Bar(\r\n x=patient_id,\r\n y=symptom_count\r\n )\r\n\r\n description, disease_count = zip(*dy_query)\r\n pie = go.Pie(\r\n labels=description,\r\n values=disease_count\r\n )\r\n\r\n data = {\r\n \"bar\": [bar],\r\n \"pie\": [pie]\r\n }\r\n graphs_json = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\r\n\r\n return render_template('dashboard.html', graphsJSON=graphs_json)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.debug = True\r\n app.run()\r\n\r\n\r\n\r\n\r\n","sub_path":"laboratory2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"12380039","text":"from lib.random_libs import libtcodpy as libtcod\n\n__author__ = 'cmotevasselani'\n\n\nclass SmarterMonster:\n # AI for basic monsters\n def take_turn(self, state):\n # a basic monster takes its turn. If you can see it, it can see you\n monster = self.owner\n if libtcod.map_is_in_fov(state.fov_map, monster.x, monster.y):\n # move towards player if far away\n if monster.fighter.hp < monster.fighter.base_max_hp * .25:\n monster.move_away_from_player(state)\n elif monster.distance_to(state.player) >= 2:\n monster.move_towards(state.objects, state.game_map, state.player.x, state.player.y)\n\n # attack if close enough\n elif state.player.fighter.hp > 0:\n monster.fighter.attack(state.player, state)\n\n","sub_path":"lib/ai/smarter_monster.py","file_name":"smarter_monster.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"350172283","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n Simple example showing evaluating embedding on similarity datasets\n\"\"\"\nimport logging\nfrom six import iteritems\nfrom web.datasets.similarity import fetch_MEN, fetch_WS353, fetch_SimLex999\nfrom web.embeddings import fetch_GloVe, load_embedding\nfrom web.evaluate import evaluate_similarity\nimport sys\n\n#first argument is count method (sys.argv[1])\n#second argument is weight method (sys.argv[2])\ndef call_module(g_filename):\n # Configure logging\n logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG, datefmt='%I:%M:%S')\n\n # Fetch GloVe embedding (warning: it might take few minutes)\n #w_glove = fetch_GloVe(corpus=\"wiki-6B\", dim=300)\n kargs = {'vocab_size':200000, 'dim':400}\n fname=g_filename\n w_custom = load_embedding(fname, format=\"glove\", normalize=True,\n lower=True, clean_words=False, load_kwargs=kargs)\n # Define tasks\n tasks = {\n \"MEN\": fetch_MEN(),\n \"WS353\": fetch_WS353(),\n \"SIMLEX999\": fetch_SimLex999()\n }\n\n # Print sample data\n for name, data in iteritems(tasks):\n print(\"Sample data from {}: pair \\\"{}\\\" and \\\"{}\\\" is assigned score {}\".format(name, data.X[0][0], data.X[0][1], data.y[0]))\n\n # Calculate results using helper function\n for name, data in iteritems(tasks):\n print(\"Spearman correlation of scores on {} {}\".format(name, evaluate_similarity(w_custom, data.X, data.y)))\n","sub_path":"evaluate/examples/evaluate_similarity_module.py","file_name":"evaluate_similarity_module.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"332109943","text":"from .install import InstallCommand\nfrom .list import ListCommand\nfrom .search import SearchCommand\nfrom .show import ShowCommand\nfrom .uninstall import UninstallCommand\n\ncommands = {\n InstallCommand.command: InstallCommand,\n ListCommand.command: ListCommand,\n SearchCommand.command: SearchCommand,\n ShowCommand.command: ShowCommand,\n UninstallCommand.command: UninstallCommand,\n}\n\n__all__ = [\n InstallCommand, \n ListCommand, \n SearchCommand, \n ShowCommand,\n UninstallCommand,\n]\n","sub_path":"package/commands/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"187030812","text":"s = input().split()\nnums = []\nfor i in s:\n nums.append(int(i))\nn = int(input())\n\nswt = False\nfor i in range(len(nums)):\n if n > nums[i]:\n print(i+1)\n swt = True\n break\nif swt == False:\n print(len(nums)+1)\n\n\n","sub_path":"for in list/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"199956532","text":"import cv2\nimport json\nimport numpy as np\nimport requests\nimport sys\nfrom zipfile import ZipFile\nimport io\nimport os\nimport re\nfrom dateparser import parse\n\ndef seg(product, class_number, used_dates, zf, sample_number):\n datatype = \"ref\"\n res = requests.get('http://localhost:5000/api/datalake')\n mean = None\n std = None\n sample_folder = None\n subfolder = None\n hashtable = {}\n\n if res.ok:\n print(\"requesting informations\")\n hashtable = res.json()\n for key in hashtable:\n if re.search(product, key) and re.search(datatype, key):\n folder = key\n if re.search(product, key) and re.search(\"sample\", key):\n sample_folder = key\n most_recent = parse(\"0\")\n for key in hashtable[folder]:\n date = parse(key)\n if date > most_recent and date not in used_dates:\n most_recent = date\n subfolder = key\n date = most_recent\n for number in range(len(hashtable[folder][subfolder])):\n hashtable2 = {}\n hashtable2[\"frame_type\"] = datatype\n hashtable2[\"time_stamp\"] = subfolder\n hashtable2[\"number\"] = number\n data = json.dumps(hashtable2)\n res = requests.get('http://localhost:5000/api/datalake/%s'%product, json = data)\n print(\"requested\")\n\n if res.ok:\n print(\"calibrating\")\n hashtable2 = res.json()\n frame = hashtable2[\"frame\"]\n #frame = cv2.UMat(np.array(frame, dtype=np.uint8))\n #frame = cv2.resize(frame, None, fx=0.4, fy=0.4)\n #list_frame = frame.tolist()\n list_frame = frame\n data = json.dumps(list_frame)\n res = requests.post('http://localhost:5000/api/get_ref', json = data)\n\n if res.ok:\n #print(\"res ok\")\n res = res.json()\n mean = res[\"mean\"]\n std = res[\"std\"]\n print(\"mean: \")\n print(mean)\n print(\"std: \")\n print(std)\n\n datatype = \"sample\"\n folder = sample_folder\n res = requests.get('http://localhost:5000/api/datalake')\n if res.ok and mean and std:\n print(folder)\n print(subfolder)\n total = len(hashtable[folder][subfolder])\n for number in range(total):\n hashtable = {}\n hashtable[\"frame_type\"] = datatype\n hashtable[\"time_stamp\"] = subfolder\n hashtable[\"number\"] = number\n data = json.dumps(hashtable)\n res = requests.get('http://localhost:5000/api/datalake/%s'%product, json = data)\n if res.ok:\n hashtable = res.json()\n frame = hashtable[\"frame\"]\n #frame = cv2.UMat(np.array(frame, dtype=np.uint8))\n #frame = cv2.resize(frame, None, fx=0.4, fy=0.4)\n #img = frame.tolist()\n img = frame\n hashtable = {\"img\": img, \"mean\": mean, \"std\": std}\n data = json.dumps(hashtable)\n res = requests.post('http://localhost:5000/api/segmentation', json = data)\n if res.ok and res.status_code != 204:\n print(\"%d of %d\"%(number, total))\n res = res.json()\n frame = cv2.UMat(np.array(frame, dtype=np.uint8))\n cv2.imwrite(\"%s%d-%d.jpg\" %(path+class_name, sample_number, number), frame)\n box = (class_number, res['x'], res['y'], res['w'], res['h'])\n f = open(\"%s%d-%d.txt\" %(path+class_name, sample_number, number), 'w')\n f.write(\"%d %f %f %f %f\" %box)\n print(\"%d %f %f %f %f\" %box)\n f.close()\n zf.write(\"%s%d-%d.jpg\" %(path+class_name, sample_number, number))\n zf.write(\"%s%d-%d.txt\" %(path+class_name, sample_number, number))\n os.remove(\"%s%d-%d.jpg\" %(path+class_name, sample_number, number))\n os.remove(\"%s%d-%d.txt\" %(path+class_name, sample_number, number))\n return date\n\nclasses = \"coke\\nbread\\napple\\nbanana\\npepsi\\ncoxinha\\neclair\\ncheese_bread\\nchoux_cream\\nmate\"\nclass_name = \"choux_cream\"\nclass_number = 8\npath = \"../temp/\"\nused_dates = []\nn_folders = 2\nwith ZipFile('%s%s.zip'%(path, class_name), mode=\"w\") as zf:\n f = open(\"classes.txt\", 'w')\n f.write(classes)\n f.close()\n zf.write(\"classes.txt\")\n os.remove(\"classes.txt\")\n for i in range(n_folders):\n date = seg(class_name, class_number, used_dates, zf, i)\n used_dates.append(date)\n","sub_path":"tests/data_lake_sampler.py","file_name":"data_lake_sampler.py","file_ext":"py","file_size_in_byte":4488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"389965911","text":"# ======================= Потоки и многозадачность ============================\n# ----------------------------- GUI и потоки ----------------------------------\n\n# ------------------- Основное приложение \"Поисковика\" ------------------------\n\n# Библиотека Qt имеет специальный класс QThread, представляющий собой \"обёртку\"\n# над потоками, специфичными для конкретной платформы.\n\n# При использовании QThread возможны два варианта:\n# - создать класс-наследник QObject со всеми необходимыми функциями, а затем \n# выполнить метод moveToThread(), чтобы поместить экземпляра класса в поток\n# (предпочтительное решение)\n# - создать класс-наследник QThread и реализовать метод run (не универсальное решение)\n\nimport sys\n\nfrom PyQt5 import QtGui, QtWidgets\nfrom PyQt5.QtCore import Qt, QObject, QThread, pyqtSignal, pyqtSlot\n\nfrom queue import Queue\n\nfrom finder import Finder\nfrom search_form import Ui_FinderForm\n\nclass FinderMonitor(QObject):\n ''' Класс-монитор, принимающий результаты поиска из очереди результатов\n Данный класс будет помещён в отдельный поток QThread\n '''\n gotData = pyqtSignal(tuple)\n finished = pyqtSignal(int)\n\n def __init__(self, parent, urls, text):\n super().__init__()\n self.parent = parent\n self.urls = urls\n self.text = text\n self.res_queue = Queue()\n self.finder = Finder(self.text, self.res_queue)\n\n def search_text(self):\n ''' Запуск поиска.\n Поиск будет выполняться в отдельном потоке\n '''\n self.finder.search_in_urls(self.urls)\n # Текущая функция будет: \n # - принимать результаты из очереди;\n # - создавать сигналы для взаимодействия с GUI\n while True:\n data = self.res_queue.get()\n if data is None:\n break\n self.gotData.emit(data)\n self.res_queue.task_done()\n\n self.res_queue.task_done()\n self.finished.emit(0)\n\n def stop(self):\n self.finder.stop_search()\n\n\nclass ProgressDialog(QtWidgets.QDialog):\n ''' Класс GUI-формы \"Поисковика\"\n '''\n def __init__(self, parent=None):\n QtWidgets.QDialog.__init__(self, parent)\n self.ui = Ui_FinderForm()\n self.ui.setupUi(self)\n self.ui.pushButton.clicked.connect(self.start_search)\n self.ui.pushButton_2.clicked.connect(self.stop_search)\n self.monitor = None\n self.is_active = False\n self.progress = 0\n self.prog_val = 1\n\n @pyqtSlot(tuple)\n def update_results(self, data):\n ''' Отображение результатов поиска\n '''\n self.ui.plainTextEdit.appendPlainText(\"++ {} ++\".format(data[0]))\n for text in data[1]:\n self.ui.plainTextEdit.appendPlainText(\" \" + text)\n self.ui.plainTextEdit.appendPlainText(\"\")\n\n @pyqtSlot()\n def update_progress(self):\n ''' Изменение строки прогресса\n '''\n self.progress += self.prog_val\n self.ui.progressBar.setValue(self.progress)\n\n def stop_search(self):\n ''' Остановка поиска\n '''\n if self.monitor is not None:\n self.is_active = False\n self.monitor.stop()\n\n def finished(self):\n ''' Действия при завершении поиска\n '''\n self.is_active = False\n self.ui.pushButton_2.setEnabled(False)\n self.ui.pushButton.setEnabled(True)\n \n def start_search(self):\n ''' Запуск поиска\n '''\n if not self.is_active:\n self.ui.plainTextEdit.clear()\n self.is_active = True\n urls = self.ui.plainTextEdit_2.toPlainText().split('\\n')\n text = self.ui.lineEdit.text()\n # Сброс значения прогресса и вычисление единицы прогресса\n self.progress = 0\n self.prog_val = 100 / len(urls)\n\n self.monitor = FinderMonitor(self, urls, text)\n self.monitor.gotData.connect(self.update_results)\n self.monitor.gotData.connect(self.update_progress)\n\n # Создание потока и помещение объекта-монитора в этот поток\n self.thread = QThread()\n self.monitor.moveToThread(self.thread)\n self.ui.pushButton_2.setEnabled(True)\n self.ui.pushButton.setEnabled(False)\n\n # ---------- Важная часть - связывание сигналов и слотов ----------\n # При запуске потока будет вызван метод search_text\n self.thread.started.connect(self.monitor.search_text)\n\n # При завершении поиска необходимо завершить поток и изменить GUI \n self.monitor.finished.connect(self.thread.quit)\n self.monitor.finished.connect(self.finished)\n\n # Завершение процесса поиска по кнопке \"Остановить\"\n self.ui.pushButton_2.clicked.connect(self.monitor.stop)\n\n # Запуск потока, который запустит self.monitor.search_text\n self.thread.start()\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n progress = ProgressDialog()\n progress.show()\n sys.exit(app.exec_())","sub_path":"lesson_5/codes_5/finder_gui.py","file_name":"finder_gui.py","file_ext":"py","file_size_in_byte":6080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"625187915","text":"from utils import *\nfrom DNAFountain import *\nimport zipfile, os\nimport bz2, lzma\nimport csv, shutil\nlogging.getLogger().setLevel(logging.DEBUG)\n\nclass DNA_transformer:\n\n def __init__(self, infile_folder, dna_folder, message_file, out_file_folder):\n self.infile_folder = infile_folder\n self.dna_folder = dna_folder\n self.message_file = message_file\n self.out_file_folder = out_file_folder\n \n def Jpeg2DNA(self, input_file, dna_file):\n in_file = 'temp_in_file'\n temp_file = zipfile.ZipFile(in_file,mode='w', allowZip64=False, compression=zipfile.ZIP_LZMA)\n temp_file.write(input_file)\n temp_file.close()\n\n data, pad_num = preprocess(in_file, 17)\n\n f = DNAFountain(data, alpha=0.5, rs=4)\n f.encode()\n f.save(dna_file)\n os.remove(in_file)\n\n return len(data), pad_num\n\n def DNA2Jpeg(self, dna_file, data_length, pad_num, out_file_folder):\n out_file = 'temp_out_file'\n g = Glass(dna_file, data_length, rs=4, pad_num=pad_num)\n g.decode()\n g.save(out_file)\n\n out_zip_file = zipfile.ZipFile(out_file, 'r')\n out_zip_file.extractall(out_file_folder)\n os.remove(out_file)\n\n def all_encode(self):\n if os.path.exists(self.dna_folder):\n shutil.rmtree(self.dna_folder)\n os.mkdir(self.dna_folder)\n file_names = os.listdir(self.infile_folder)\n for i in range(len(file_names)):\n if (file_names[i].split('.'))[-1] != 'j4d':\n del(file_names[i])\n file_names.sort(key=lambda x:int(x.split('.')[0]))\n f = open(self.message_file, 'w')\n f_csv = csv.writer(f)\n for file_name in file_names:\n print('-'*20+'encode '+file_name+'-'*20)\n f_csv.writerow(self.Jpeg2DNA(input_file=os.path.join(self.infile_folder,file_name), dna_file=os.path.join(self.dna_folder, file_name+'.dna.Fasta')))\n\n\n\n def all_decode(self):\n file_names = os.listdir(self.dna_folder)\n file_names.sort(key=lambda x:int(x.split('.')[0]))\n param = []\n with open(self.message_file,'r') as f:\n f_csv = csv.reader(f)\n for line in f_csv:\n param.append([int(line[0]), int(line[1])])\n for i in range(len(file_names)):\n print('-'*20+'decode '+file_names[i]+'-'*20)\n self.DNA2Jpeg(dna_file=os.path.join(self.dna_folder, file_names[i]), data_length=param[i][0], pad_num=param[i][1], out_file_folder=self.out_file_folder)","sub_path":"DNA_transformer.py","file_name":"DNA_transformer.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"408749546","text":"#!/usr/bin/env python\n\nimport json\nimport sys\n\nimport boto3\n\nfrom botocore.exceptions import ClientError\n\ndynamodb = boto3.resource('dynamodb', region_name='us-east-1')\n\ntable = dynamodb.Table('Members')\n\ntry:\n response = table.get_item(\n Key={\n 'MemberID': int(sys.argv[1])\n }\n )\nexcept ClientError as e:\n print(e.response)\n\nprint(response)\n\n","sub_path":"dynamodb/read-item.py","file_name":"read-item.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"226042689","text":"import urllib.request \nimport urllib.parse \nimport json \n#from tkinter import * \n \n'''''root =Tk() \nroot.wm_attributes('-topmost',1) \nroot.geometry('+1000+40') \nroot.minsize(300,200)''' \n \nfanyi = input('输入你想要翻译的:') \n \nurl = \"http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule&sessionFrom=\" \n \n \nhead = {} \nhead['User-Agent']='Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36]' \n \ndata = {} \ndata['i']=fanyi \ndata['from']='AUTO'\ndata['to']='AUTO' \ndata['smartresult']='dict' \ndata['client']='fanyideskweb' \ndata['salt']='1507343425426' \ndata['sign']='0df813336ad806fbaa2e0120dddb1d02' \ndata['doctype']='json' \ndata['version']='2.1' \ndata['keyfrom']='fanyi.web' \ndata['action']='FY_BY_CLICKBUTTION' \ndata['typoResult']='true' \n \ndata = urllib.parse.urlencode(data).encode('utf-8') \nresponse = urllib.request.urlopen(url,data) \nhtml = response.read().decode('utf-8') \nprint (html) \ntarget = json.loads(html) \nresult = target['translateResult'][0][0]['tgt'] \nprint (result) \n","sub_path":"Web crawlers1.py","file_name":"Web crawlers1.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"634473276","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nAccount: SpecSavers SW\n\nExtract all contact lenses.\n\"\"\"\n\nimport json\nimport re\n\nfrom scrapy.spider import BaseSpider\nfrom scrapy.http import Request\n\nfrom product_spiders.items import Product, ProductLoaderWithoutSpaces as ProductLoader\n\n\nclass SpecSavers(BaseSpider):\n name = 'specsavers_new_sw-specsavers.se'\n allowed_domains = ['specsavers.se']\n\n start_urls = ('https://www.specsavers.se/kontaktlinser',)\n \n def parse(self, response):\n for url in response.xpath('//a[contains(@class,\"product-tile\")]/@href').extract():\n yield Request(response.urljoin(url), callback=self.parse_product)\n\n def parse_product(self, response):\n product = re.findall('\"products\":(.*)}}}', response.body)\n if product:\n product = json.loads(product[0])[0]\n\n loader = ProductLoader(item=Product(), response=response)\n name = response.xpath('//div[contains(@class,\"field-name-title\")]/h1/text()').extract()\n name += response.xpath('//div[contains(@class,\"field-name-field-cl-lens-type\")]/div/span/text()').extract()\n name += response.xpath('//div[contains(@class,\"form-item-cl-supply\")]/text()').extract()\n loader.add_value('name', u' '.join([x.strip() for x in name]))\n loader.add_value('identifier', response.url.split('/')[-1])\n loader.add_value('url', response.url)\n loader.add_value('brand', product['brand'])\n loader.add_value('category', product['category'])\n image_url = response.xpath('//img[contains(@class, \"img-responsive\")]/@src').extract()\n if image_url:\n loader.add_value('image_url', image_url)\n loader.add_value('price', product['price'])\n\n yield loader.load_item()\n","sub_path":"e-commerce/CompetitorMonitor/product_spiders/spiders/specsavers_sw_new/specsavers.py","file_name":"specsavers.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"413316481","text":"\"\"\"\n文章分词\n提供两种方案:\n\"\"\"\n\nimport jieba\nimport os\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.datasets.base import Bunch\nfrom sklearn.datasets import base\nfrom concurrent import futures\nimport sys\nimport pickle\npathDir = os.path.dirname(__file__)\ncurPath = os.path.abspath(pathDir)\nrootPath = os.path.split(curPath)[0]\nprint(rootPath)\n# os.path.split(rootPath)[0]\n#\nuserdict = rootPath + '/data_models/userdict.txt'\n# userdict = 'E:/Document/project/data_/data_split/data_k/userdict.txt'\njieba.load_userdict(userdict)\n\n\n# E:\\Document\\project\\cloudbrain-assistant1-2\\voice_assistant-1_2_2\\voiceAssistant\\Text_classification\\data_models\\userdict.txt\ndef tokenizer():\n return jieba\n\n\ndef readfile(filepath, encoding='utf-8'):\n # 读取文本\n with open(filepath, \"rt\", encoding=encoding) as fp:\n content = fp.read()\n return content\n\n\ndef savefile(savepath, content):\n # 保存文本\n with open(savepath, \"wt\") as fp:\n fp.write(content)\n\n\ndef writeobj(path, obj):\n # 持久化python对象\n with open(path, \"wb\") as file_obj:\n pickle.dump(obj, file_obj)\n\n\ndef readobj(path):\n # 载入python对象\n with open(path, \"rb\") as file_obj:\n obj = pickle.load(file_obj)\n return obj\n\n\ndef check_dir_exist(dir):\n # 坚持目录是否存在,不存在则创建\n if not os.path.exists(dir):\n os.mkdir(dir)\n\n\ndef folder_handler(args):\n \"\"\"遍历一个文件夹下的文本\"\"\"\n folder, encoding, seg = args\n print('遍历:', folder)\n try:\n assert os.path.isdir(folder)\n except AssertionError:\n return None\n files = os.listdir(folder)\n content = []\n filenames = []\n for name in files:\n if name.startswith('.DS'):\n continue\n filepath = os.path.join(folder, name)\n text = readfile(filepath, encoding)\n # 在此可直接分词\n if seg:\n text = ' '.join(jieba.cut(text, cut_all=True))\n content.append(text)\n filenames.append(filepath)\n return (filenames, content)\n\n\ndef corpus_bunch(data_dir, encoding='utf-8', seg=True, tier=2) -> Bunch:\n \"\"\"\n 得到文本库,返回一个 Bunch 对象\n :param data_dir: 文本库目录,目录下以文件归类 data_dir/category/1.txt\n :param encoding: 文本库编码\n :param seg: 是否需要分词\n :param tier: data_dir 目录下的层级 2: data_dir/category/1.txt, 1: data_dir/1.txt\n :return:\n \"\"\"\n try:\n assert os.path.isdir(data_dir)\n except AssertionError:\n print('{} is not a folder!')\n sys.exit(0)\n try:\n assert tier in [1, 2]\n except AssertionError:\n print('目录层级 tier 只能是 1 或 2!')\n sys.exit(0)\n corpus = Bunch(filenames=[], label=[], contents=[])\n if tier == 2:\n folders = [os.path.join(data_dir, d) for d in os.listdir(data_dir) if not d.startswith('.DS')]\n else:\n folders = [data_dir]\n # 创建线程池遍历二级目录\n with futures.ThreadPoolExecutor(max_workers=len(folders)) as executor:\n folders_executor = {executor.submit(folder_handler, (folder, encoding, seg)): folder for folder in folders}\n for fol_exe in futures.as_completed(folders_executor):\n folder = folders_executor[fol_exe]\n filenames, content = fol_exe.result()\n if content:\n cat_name = folder.split('/')[-1]\n content_num = len(content)\n print(cat_name, content_num, sep=': ')\n label = [cat_name] * content_num\n corpus.filenames.extend(filenames)\n corpus.label.extend(label)\n corpus.contents.extend(content)\n return corpus\n\n\ndef vector_space(corpus_dir, stop_words=None, vocabulary=None, encoding='utf-8', seg=True, tier=2):\n \"\"\"将一个语料库向量化\"\"\"\n vectorizer = TfidfVectorizer(stop_words=stop_words, vocabulary=vocabulary)\n corpus = corpus_bunch(corpus_dir, encoding=encoding, seg=seg, tier=tier)\n tfidf_bunch = Bunch(filenames=corpus.filenames, label=corpus.label, tdm=[], vocabulary={})\n tfidf_bunch.tdm = vectorizer.fit_transform(corpus.contents)\n tfidf_bunch.vocabulary = vectorizer.vocabulary_\n return tfidf_bunch\n\n\ndef tfidf_space(data_dir, save_path, stopword_path=None, encoding='utf-8', seg=True):\n stpwd = None\n if stopword_path:\n stpwd = [wd.strip() for wd in readfile(stopword_path).splitlines()]\n check_dir_exist(save_path)\n train = data_dir + 'train'\n train_tfidf = vector_space(train, stop_words=stpwd, encoding=encoding, seg=seg)\n test = data_dir + 'test'\n test_tfidf = vector_space(test, stop_words=stpwd, vocabulary=train_tfidf.vocabulary, encoding=encoding, seg=seg)\n writeobj(os.path.join(save_path, 'train_tfidf.data'), train_tfidf)\n writeobj(os.path.join(save_path, 'test_tfidf.data'), test_tfidf)\n writeobj(os.path.join(save_path, 'vocabulary.data'), train_tfidf.vocabulary)\n\n\ndef main_tfidf(file_path, k):\n for i in range(1):\n data_dir = os.path.join(file_path, 'data_%s\\\\' % 6)\n tfidf_space(data_dir, data_dir + 'fearture_space',\n stopword_path=file_path + 'stop_words.txt', seg=True)\n\n\nif __name__ == '__main__':\n data_dir = 'E:\\\\Document\\\\project\\\\data_\\\\data_set\\\\'\n\n # 构建词袋\n for i in range(5):\n data_dir = 'E:\\\\Document\\\\project\\\\data_\\\\data_set_goods\\\\data_%s\\\\' % str(i+1)\n tfidf_space(data_dir, data_dir + 'fearture_space', stopword_path=data_dir + 'stop_words.txt', seg=True)\n","sub_path":"voiceAssistant/Text_classification/txt_classification/tfidf_feature.py","file_name":"tfidf_feature.py","file_ext":"py","file_size_in_byte":5587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"341847060","text":"# -*- coding: utf-8 -*-\n\nfrom flask import Flask, request, abort, render_template\nfrom flask_socketio import SocketIO, emit\n\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import (\n MessageEvent, TextMessage, TextSendMessage,PostbackEvent,TemplateSendMessage,ConfirmTemplate,PostbackTemplateAction\n)\nimport json\nimport os\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app)\n\nline_bot_api = LineBotApi('m8THMajfLUG1GZ8b1H32KS23AMFU22h51PEmA4iYfu8BOudlDG1jcuIHecueHvuRG6NljUxP8dx75xRoH2rJdwvDkNN29vxoDNCD0GV2qGxg5XrDRPluoBKueb44xUzetRsp93utLkBBnTyw/A/n6QdB04t89/1O/w1cDnyilFU=')\nhandler = WebhookHandler('4631be1f161338a9701166797ac05603')\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n abort(400)\n\n return 'OK'\n\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_message(event):\n\n profile = line_bot_api.get_profile(event.source.user_id)\n data = {\n \"name\":profile.display_name,\n \"pic\":profile.picture_url,\n #\"sign\":profile.status_message,\n \"msg\":event.message.text\n }\n socketio.emit(\"msg\",json.dumps(data))\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=event.message.text))\n \nif __name__ == \"__main__\":\n #app.run(host=\"0.0.0.0\", port=os.environ[\"PORT\"])\n socketio.run(app, host=\"0.0.0.0\", port=os.environ[\"PORT\"])","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"70847551","text":"import logging\n\nimport stripe\nfrom fastapi import FastAPI, Response, status\nfrom pydantic import BaseModel\n\napp = FastAPI()\n\nstripe.api_key = 'sk_test_4eC39HqLyjWDarjtT1zdp7dc'\n\n\nclass Item(BaseModel):\n cost: int\n success_url: str\n cancel_url: str\n\n\n@app.get(\"/stripe/hello/\")\nasync def hello():\n return {\"msg\": \"hello world\"}\n\n\n@app.post(\"/stripe/create-checkout-session/\")\nasync def create_checkout_session(item: Item, response: Response):\n items = [{\n \"price_data\": {\n \"currency\": \"rub\",\n \"unit_amount\": item.cost,\n \"product_data\": {\n \"name\": \"Something here\",\n }\n },\n \"quantity\": 1\n }]\n try:\n checkout_session = stripe.checkout.Session.create(payment_method_types=['card'], line_items=items,\n mode='payment',\n success_url=item.success_url,\n cancel_url=item.cancel_url, )\n except Exception as e:\n logging.error(\"create_checkout_session error: {}\".format(e))\n response.status_code = status.HTTP_400_BAD_REQUEST\n return {\"err\": e}\n return {'id': checkout_session.id}\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"357980240","text":"from plugins import PluginResponse, Plugin\nimport sys\n\nfrom datetime import datetime\n\n\nclass date(Plugin): \n myhelp = \"Converts today's date to Discordian form. Does not take arguments!\"\n\n seasons = (\n \"Chaos\",\n \"Discord\",\n \"Confusion\",\n \"Bureaucracy\",\n \"The Aftermath\",\n )\n\n days = (\n \"Sweetmorn\",\n \"Boomtime\",\n \"Pungenday\",\n \"Prickle-Prickle\",\n \"Setting Orange\"\n )\n\n\n\n def __init__(self, dbconn):\n self.keyword = (\"date\",)\n\n def command(self, args):\n response = PluginResponse()\n\n day_of_year = datetime.now().timetuple().tm_yday\n\n seaday = int(day_of_year % 73)\n season = self.seasons[int(day_of_year / 73)]\n day = self.days[int((day_of_year -1) % 5)]\n response.setText(\"Today is {}, day {} of {} . Hail Eris, all Hail Discordia\".format(day,seaday, season))\n \n\n return response\n\n\n","sub_path":"plugins/date.py","file_name":"date.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"248507285","text":"#!/usr/bin/env python3\n\ndef reverse_dictionary(d):\n\n d2 = {}\n\n for key, value in d.items():\n for i in range(len(value)):\n if value[i] not in d2:\n d2[value[i]] = list({key})\n else:\n L = d2[value[i]]\n L.append(key)\n d2[value[i]] = L\n\n return d2\n\ndef main():\n d = {'move': ['liikuttaa'], 'hide': ['piilottaa', 'salata'], 'six': ['kuusi'], 'fir': ['kuusi']}\n print(reverse_dictionary(d))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"part01-kaikki/reverse_dictionary.py","file_name":"reverse_dictionary.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"251491301","text":"import sqlite3\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom PIL import Image,ImageTk\n# from tkinter import ttk\nroot=Tk()\nroot.geometry(\"1366x768+60+10\")\nroot.title(\"Login\")\nroot.resizable(0, 0)\nmyimage=ImageTk.PhotoImage(Image.open('./images/update.png'))\nLabel(image=myimage).pack()\nfullname_lbl=Label(root,text=\"Full Name\",font=('Consolas',15),bg=\"white\")\nfullname_lbl.place(x=180,y=200)\ndepartment_lbl=Label(root,text=\"Department\",font=('Consolas',15),bg=\"white\")\ndepartment_lbl.place(x=720,y=200)\nage_lbl=Label(root,text=\"Age\",font=('Consolas',15),bg=\"white\")\nage_lbl.place(x=180,y=290)\ngender_lbl=Label(root,text=\"Gender\",font=('Consolas',15),bg=\"white\")\ngender_lbl.place(x=720,y=290)\ncontact_lbl=Label(root,text=\"Contact\",font=('Consolas',15),bg=\"white\")\ncontact_lbl.place(x=180,y=380)\naddress_lbl=Label(root,text=\"Address\",font=('Consolas',15),bg=\"white\")\naddress_lbl.place(x=720,y=380)\n\nfullname_entry=Entry(root,width=25,border=0,font=('Consolas',15))\nfullname_entry.place(x=180,y=230)\ndepartment_entry=Entry(root,width=25,border=0,font=('Consolas',15))\ndepartment_entry.place(x=720,y=230)\nage_entry=Entry(root,width=25,border=0,font=('Consolas',15))\nage_entry.place(x=180,y=320)\ngender_entry=Entry(root,width=25,border=0,font=('Consolas',15))\ngender_entry.place(x=720,y=320)\ncontact_entry=Entry(root,width=25,border=0,font=('Consolas',15))\ncontact_entry.place(x=180,y=410)\nemail_entry=Entry(root,width=25,border=0,font=('Consolas',15))\nemail_entry.place(x=720,y=410)\nadd_btn=Button(root,text=\"ADD\",font=('Consolas',15),cursor='hand2',\n bg=\"#00bff3\",border=0,activebackground=\"#00bff3\",padx=25,pady=10)\nadd_btn.place(x=560,y=630)\nclear_btn=Button(root,text=\"CLEAR\",font=('Consolas',15),cursor='hand2',\n bg=\"#00bff3\",border=0,activebackground=\"#00bff3\",padx=25,pady=10)\nclear_btn.place(x=715,y=630)\nroot.mainloop()","sub_path":"updateemployee.py","file_name":"updateemployee.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"103937044","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import fields, osv\n\nclass product_product(osv.osv):\n _name = \"product.product\"\n _inherit = \"product.product\"\n\n def product_info(self, cr, uid, ids, context=None):\n context = context or {}\n if not isinstance(ids,list):\n ids = [ids]\n domain_products = [('product_id', 'in', ids)]\n domain_quant = self._get_domain_locations(cr, uid, ids, context=context)[0]\n res = {}\n quants_obj = self.pool.get('stock.quant')\n quants_ids = quants_obj.search(cr, uid, domain_quant+domain_products+[('location_id.usage','=','internal')], context=context)\n if quants_ids:\n for row in quants_obj.browse(cr, uid, quants_ids, context=context):\n key = row.location_id.id\n if res.get(key, False):\n res[key]['product_qty'] += row.qty\n else:\n res[key] = {'location_name':row.location_id.complete_name, 'product_qty':row.qty}\n result = []\n prod_qty = 0\n for key, value in res.items():\n prod_qty += value['product_qty'];\n result.append({'location_name': value['location_name'], 'product_qty': value['product_qty']});\n return result, prod_qty\n","sub_path":"point_of_sale_ext/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"650649362","text":"# Author: Justin Trugman\n# EE 605\n# Homework 1, Problem 1\n\n\n# Problem\n# How many people do we need to have in a room to make it that the probability\n# of two people in the room will have the same birthday is greater than ½? (Note: Here\n# we consider just the day and month, not the year.)\n\n# Answer = 23 people\n\nif __name__ == \"__main__\":\n prob_no_match = 1\n days_in_year = 365\n counter = 0\n\n while prob_no_match > 0.5:\n prob_no_match = prob_no_match * (days_in_year/365)\n days_in_year = days_in_year - 1\n counter = counter + 1\n \n print(\"Probability of No Match = \" + str(prob_no_match) + '\\n')\n print(\"People for greater than 50 percent chance of same birthday = \" + str(counter))\n\n\n","sub_path":"Homework1/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"588554392","text":"# encoding: utf-8\r\n# Author: Li Qiliang(l00423096).\r\n\r\nimport os\r\nimport sys\r\nimport time\r\nimport glob\r\nimport copy\r\nimport ctypes\r\nimport base64\r\nimport logging\r\nimport argparse\r\nimport multiprocessing\r\nfrom concurrent.futures import ThreadPoolExecutor\r\nfrom urllib import parse,request\r\nimport random\r\nimport json\r\nimport codecs\r\n\r\nos.environ.pop('http_proxy', None)\r\nos.environ.pop('https_proxy', None)\r\nos.environ.pop('HTTP_PROXY', None)\r\nos.environ.pop('HTTPS_PROXY', None)\r\n\r\nfrom boto3.session import Session\r\n\r\nlogging.basicConfig(level=logging.ERROR)\r\n\r\nparser = argparse.ArgumentParser(description='Parameters of CSB-OBS',\r\n formatter_class=\r\n argparse.ArgumentDefaultsHelpFormatter)\r\n\r\nparser.add_argument('--local_folder_absolute_path', type=str, default=None, help='Local folder to upload')\r\nparser.add_argument('--vendor', type=str, default=\"HEC\",\r\n help='vendor of bucket')\r\nparser.add_argument('--region', type=str, default=\"cn-north-1\",\r\n help='region of bucket')\r\nparser.add_argument('--app_token', type=str, default=None,\r\n help='appToken of CSB')\r\nparser.add_argument('--bucket_name', type=str, default=None,\r\n help='Please input you bucket_name')\r\nparser.add_argument('--bucket_path', type=str, default=\"\",\r\n help='Please input you bucket_path')\r\nparser.add_argument('--failed_list_file_name', type=str, default=None,\r\n help='Output list of files which are faied to upload')\r\nparser.add_argument('--failed_list_absobute_path', type=str, default=None,\r\n help='Reupload failed files')\r\nparser.add_argument('--small_file_thread', type=int, default=100,\r\n help='he default value of small_file_thread is 100')\r\nparser.add_argument('--large_file_thread', type=int, default=10,\r\n help='he default value of large_file_thread is 10')\r\nparser.add_argument('--part_file_thread', type=int, default=5,\r\n help='he default value of part_file_thread is 5')\r\nparser.add_argument('--samll_file_size', type=int, default=100 * 1024 * 1024,\r\n help='The default value of samll_file_size is 100MB.')\r\nparser.add_argument('--part_size', type=int, default=200 * 1024 * 1024,\r\n help='The default value of part_size is 200MB.')\r\n\r\nresult, _ = parser.parse_known_args()\r\nargs = copy.deepcopy(result)\r\n\r\nfile_server_url = 'http://10.155.173.11:8080/csb-file-server'\r\n#file_server_url = 'http://127.0.0.1:8080/csb-file-server'\r\n\r\ndef bucket_auth(vendor, region, bucket_name, app_token):\r\n try:\r\n if bucket_name is None:\r\n raise Exception(\"The --bucket_name can not be null.\")\r\n\r\n if app_token is None:\r\n raise Exception(\"The --app_token can not be null.\")\r\n\r\n bucket_auth_endpoint = file_server_url + '/rest/boto3/s3/bucket-auth?vendor=' + vendor + '®ion=' + region + '&bucketid=' + bucket_name + '&apptoken=' + app_token\r\n req = request.Request(url=bucket_auth_endpoint)\r\n res = request.urlopen(req)\r\n result = res.read().decode(encoding='utf-8')\r\n result_dict = json.loads(result)\r\n return result_dict['success'], result_dict['msg']\r\n except Exception as e:\r\n sys.stdout.write(str(e) + '\\n')\r\n\r\ndef get_s3_client_list():\r\n s3_client_list = []\r\n try:\r\n query_urls_endpoint = file_server_url + '/rest/boto3/s3/query/csb-file-server/all/ip-and-port?vendor=' + args.vendor + \"®ion=\" + args.region\r\n req = request.Request(url=query_urls_endpoint)\r\n res = request.urlopen(req)\r\n result = res.read().decode(encoding='utf-8')\r\n result_dict = json.loads(result)\r\n if result_dict['fileServerUrlList'] is not None:\r\n for csb_file_server_url in result_dict['fileServerUrlList']:\r\n csb_obs_service_endpoint = csb_file_server_url + \"/rest/boto3/s3/\" + args.vendor + \"/\" + args.region + \"/\" + args.app_token\r\n session = Session('Hello', 'CSB-OBS')\r\n s3_client = session.client('s3', endpoint_url=csb_obs_service_endpoint)\r\n s3_client_list.append(s3_client)\r\n return s3_client_list\r\n except Exception as e:\r\n sys.stdout.write(str(e) + '\\n')\r\n\r\ndef print_progress_bar(total_num, finished_num, cur_count, print_lock):\r\n with print_lock:\r\n finished_num.value += cur_count\r\n finished_percent = finished_num.value / total_num.value\r\n sys.stdout.write('|' + ('-' * int(50 * finished_percent)) +\r\n (' ' * int(50 * (1 - finished_percent))) +\r\n '| %.2f%%\\n' % (finished_percent * 100))\r\n sys.stdout.flush()\r\n\r\ndef alter_global_small_file_thread_count(global_small_file_thread_count, global_small_file_thread_count_lock, step):\r\n with global_small_file_thread_count_lock:\r\n global_small_file_thread_count.value += step\r\n\r\ndef alter_global_large_file_thread_count(global_large_file_thread_count, global_large_file_thread_count_lock, step):\r\n with global_large_file_thread_count_lock:\r\n global_large_file_thread_count.value += step\r\n\r\ndef alter_global_part_thread_count(global_part_thread_count, global_part_thread_count_lock, step):\r\n with global_part_thread_count_lock:\r\n global_part_thread_count.value += step\r\n\r\n\r\ndef record_failed_file(failed_list_file_path, local_folder_absolute_path, file_name, print_lock):\r\n with print_lock:\r\n with open(failed_list_file_path, 'a') as f:\r\n failed_file_name = os.path.join(local_folder_absolute_path, file_name)\r\n failed_file_name = failed_file_name.replace('\\\\', '/')\r\n f.write(failed_file_name + '\\n')\r\n\r\ndef upload_small_file(s3_client_list, local_folder_absolute_path, failed_list_file_path, bucket_name, bucket_path, file_name, total_num, finished_num, cur_file_size, global_small_file_thread_count, print_lock, global_small_file_thread_count_lock, failed_count=1, selected_index=0):\r\n try:\r\n wait_select_index_list = [index for index in range(len(s3_client_list))]\r\n if failed_count != 1 and len(s3_client_list) > 1:\r\n wait_select_index_list.remove(selected_index)\r\n\r\n random.shuffle(wait_select_index_list)\r\n selected_index = wait_select_index_list[0]\r\n s3_client = s3_client_list[selected_index]\r\n\r\n key = bucket_path + file_name\r\n key = base64.urlsafe_b64encode(key.encode(encoding=\"utf-8\"))\r\n key = str(key, encoding=\"utf-8\")\r\n\r\n with open(os.path.join(local_folder_absolute_path, file_name), 'rb') as file:\r\n resp = s3_client.put_object(Bucket=bucket_name, Key=key, Body=file.read())\r\n\r\n #sys.stdout.write(str(s3_client) + ' success single' + '\\n')\r\n \r\n alter_global_small_file_thread_count(global_small_file_thread_count, global_small_file_thread_count_lock, -1)\r\n #global_small_file_thread_count.value -= 1\r\n\r\n #Accumulate the amount of uploaded data\r\n print_progress_bar(total_num, finished_num, cur_file_size, print_lock)\r\n\r\n except Exception as e:\r\n if failed_count >= 3:\r\n sys.stdout.write(str(e) + '\\n')\r\n alter_global_small_file_thread_count(global_small_file_thread_count, global_small_file_thread_count_lock, -1)\r\n #global_small_file_thread_count.value -= 1\r\n record_failed_file(failed_list_file_path, local_folder_absolute_path, file_name, print_lock)\r\n else:\r\n #sys.stdout.write(str(s3_client) + ' failed single' + '\\n')\r\n failed_count += 1\r\n upload_small_file(s3_client_list, local_folder_absolute_path, failed_list_file_path, bucket_name, bucket_path, file_name, total_num, finished_num, cur_file_size, global_small_file_thread_count, print_lock, global_small_file_thread_count_lock, failed_count=failed_count, selected_index=selected_index)\r\n\r\ndef upload_large_file(s3_client_list, local_folder_absolute_path, failed_list_file_path, bucket_name, bucket_path, file_name, total_num, finished_num, global_large_file_thread_count, global_part_thread_count, print_lock, global_large_file_thread_count_lock, global_part_thread_count_lock, failed_count=1, selected_index=0):\r\n try:\r\n wait_select_index_list = [index for index in range(len(s3_client_list))]\r\n if failed_count != 1 and len(s3_client_list) > 1:\r\n wait_select_index_list.remove(selected_index)\r\n\r\n random.shuffle(wait_select_index_list)\r\n selected_index = wait_select_index_list[0]\r\n s3_client = s3_client_list[selected_index]\r\n\r\n key = bucket_path + file_name\r\n key = base64.urlsafe_b64encode(key.encode(encoding=\"utf-8\"))\r\n key = str(key, encoding=\"utf-8\")\r\n mpu = s3_client.create_multipart_upload(Bucket=bucket_name, Key=key)\r\n\r\n #sys.stdout.write(str(s3_client) + ' success init' + '\\n')\r\n\r\n part_dict = multiprocessing.Manager().dict() #The main process shares the dict with sub threads.\r\n \r\n threadPoolExecutor = ThreadPoolExecutor(args.part_file_thread)\r\n with open(os.path.join(local_folder_absolute_path, file_name), 'rb') as file:\r\n i = 1\r\n while 1:\r\n if global_part_thread_count.value >= args.part_file_thread * 2:\r\n seconds = 0.5 + round(random.uniform(0, 1), 2) # wait: 0.5-1.5 seconds\r\n time.sleep(seconds)\r\n continue\r\n\r\n #sys.stdout.write(str(i) + '\\n')\r\n data = file.read(args.part_size)\r\n if data == b'':\r\n break\r\n alter_global_part_thread_count(global_part_thread_count, global_part_thread_count_lock, 1)\r\n #global_part_thread_count.value += 1\r\n threadPoolExecutor.submit(upload_part, *(s3_client_list, local_folder_absolute_path, failed_list_file_path, bucket_name,\r\n key, total_num, finished_num, mpu[\"UploadId\"], i, data, part_dict, global_part_thread_count, print_lock, global_part_thread_count_lock))\r\n i += 1\r\n\r\n threadPoolExecutor.shutdown(wait=True)\r\n\r\n part_info = {'Parts': []}\r\n for PartNumber, ETag in part_dict.items():\r\n part_info['Parts'].append({'PartNumber': PartNumber, \"ETag\": ETag})\r\n\r\n #sys.stdout.write(str(part_info) + '\\n')\r\n \r\n complete_multipart_upload(s3_client_list, local_folder_absolute_path, failed_list_file_path, file_name, bucket_name, key, mpu[\"UploadId\"], part_info, global_large_file_thread_count, print_lock, global_large_file_thread_count_lock)\r\n\r\n except Exception as e:\r\n if failed_count >= 3:\r\n sys.stdout.write(str(e) + '\\n')\r\n alter_global_large_file_thread_count(global_large_file_thread_count, global_large_file_thread_count_lock, -1)\r\n #global_large_file_thread_count.value -= 1\r\n record_failed_file(failed_list_file_path, local_folder_absolute_path, file_name, print_lock)\r\n else:\r\n #sys.stdout.write(str(s3_client) + ' failded init' + '\\n')\r\n failed_count += 1\r\n upload_large_file(s3_client_list, local_folder_absolute_path, failed_list_file_path, bucket_name, bucket_path, file_name, total_num, finished_num, global_large_file_thread_count, global_part_thread_count, print_lock, global_large_file_thread_count_lock, global_part_thread_count_lock, failed_count=failed_count, selected_index=selected_index)\r\n\r\ndef upload_part(s3_client_list, local_folder_absolute_path, failed_list_file_path, bucket_name, key, total_num, finished_num, upload_id, i, data, part_dict, global_part_thread_count, print_lock, global_part_thread_count_lock, failed_count=1, selected_index=0):\r\n try:\r\n wait_select_index_list = [index for index in range(len(s3_client_list))]\r\n if failed_count != 1 and len(s3_client_list) > 1:\r\n wait_select_index_list.remove(selected_index)\r\n\r\n random.shuffle(wait_select_index_list)\r\n selected_index = wait_select_index_list[0]\r\n s3_client = s3_client_list[selected_index]\r\n\r\n response = s3_client.upload_part(Bucket=bucket_name, Key=key, PartNumber=i,\r\n UploadId=upload_id, Body=data)\r\n \r\n part_dict[i] = response['ResponseMetadata']['HTTPHeaders']['etag']\r\n\r\n alter_global_part_thread_count(global_part_thread_count, global_part_thread_count_lock, -1)\r\n #global_part_thread_count.value -= 1\r\n\r\n #Accumulate the amount of uploaded data\r\n print_progress_bar(total_num, finished_num, len(data), print_lock)\r\n\r\n #sys.stdout.write(str(s3_client) + ' success part' + '\\n')\r\n\r\n if response['ResponseMetadata']['HTTPStatusCode'] >= 300:\r\n raise IOError('Failed to upload.')\r\n\r\n except Exception as e:\r\n if failed_count >= 3:\r\n sys.stdout.write(str(e) + '\\n')\r\n alter_global_part_thread_count(global_part_thread_count, global_part_thread_count_lock, -1)\r\n #global_part_thread_count.value -= 1\r\n record_failed_file(failed_list_file_path, local_folder_absolute_path, file_name, print_lock)\r\n else:\r\n #sys.stdout.write(str(s3_client) + ' failed part' + '\\n')\r\n failed_count += 1\r\n upload_part(s3_client_list, local_folder_absolute_path, failed_list_file_path, bucket_name, key, total_num, finished_num, upload_id, i, data, part_dict, global_part_thread_count, print_lock, global_part_thread_count_lock, failed_count=failed_count, selected_index=selected_index)\r\n\r\ndef complete_multipart_upload(s3_client_list, local_folder_absolute_path, failed_list_file_path, file_name, bucket_name, key, upload_id, part_info, global_large_file_thread_count, print_lock, global_large_file_thread_count_lock, failed_count=1, selected_index=0):\r\n try:\r\n wait_select_index_list = [index for index in range(len(s3_client_list))]\r\n if failed_count != 1 and len(s3_client_list) > 1:\r\n wait_select_index_list.remove(selected_index)\r\n\r\n random.shuffle(wait_select_index_list)\r\n selected_index = wait_select_index_list[0]\r\n s3_client = s3_client_list[selected_index]\r\n\r\n s3_client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload=part_info)\r\n\r\n alter_global_large_file_thread_count(global_large_file_thread_count, global_large_file_thread_count_lock, -1)\r\n #global_large_file_thread_count.value -= 1\r\n\r\n #sys.stdout.write(str(s3_client) + ' success complete_multipart' + '\\n')\r\n\r\n except Exception as e:\r\n if failed_count >= 3:\r\n sys.stdout.write(str(e) + '\\n')\r\n alter_global_large_file_thread_count(global_large_file_thread_count, global_large_file_thread_count_lock, -1)\r\n #global_large_file_thread_count.value -= 1\r\n record_failed_file(failed_list_file_path, local_folder_absolute_path, file_name, print_lock)\r\n else:\r\n #sys.stdout.write(str(s3_client) + ' failed complete_multipart' + '\\n')\r\n failed_count += 1\r\n complete_multipart_upload(s3_client_list, local_folder_absolute_path, failed_list_file_path, file_name, bucket_name, key, upload_id, part_info, global_large_file_thread_count, print_lock, global_large_file_thread_count_lock, failed_count=failed_count, selected_index=selected_index)\r\n\r\ndef main():\r\n try:\r\n start_time = time.time()\r\n\r\n if args.local_folder_absolute_path is None or len(args.local_folder_absolute_path)==0:\r\n raise Exception(\"The --local_folder_absolute_path can not be null.\")\r\n\r\n local_folder_absolute_path = args.local_folder_absolute_path.replace('\\\\', '/')\r\n is_folder = False\r\n if os.path.isdir(local_folder_absolute_path):\r\n is_folder = True\r\n if not local_folder_absolute_path.endswith('/'):\r\n local_folder_absolute_path = local_folder_absolute_path + '/'\r\n\r\n bucket_name = args.bucket_name\r\n bucket_path = args.bucket_path.replace('\\\\', '/')\r\n if not args.bucket_path.endswith('/'):\r\n bucket_path = args.bucket_path + '/'\r\n\r\n single_file_path = None\r\n single_file_name = None\r\n if is_folder and args.failed_list_absobute_path is None:\r\n temp = local_folder_absolute_path[:-1]\r\n pos = temp.rfind(\"/\")\r\n if pos != -1:\r\n bucket_path = bucket_path + temp[pos+1 : ] + '/'\r\n else:\r\n pos = local_folder_absolute_path.rfind(\"/\")\r\n single_file_path = local_folder_absolute_path\r\n single_file_name = local_folder_absolute_path[pos+1 : ]\r\n local_folder_absolute_path = local_folder_absolute_path[ : pos+1]\r\n\r\n #First, bucket auth\r\n is_success, msg = bucket_auth(args.vendor, args.region, bucket_name, args.app_token)\r\n if not is_success:\r\n raise Exception(msg)\r\n \r\n failed_list_file_path = ''\r\n if args.failed_list_file_name is None:\r\n failed_list_file_path = os.path.join(local_folder_absolute_path, 'failed_list.log')\r\n else:\r\n failed_list_file_path = os.path.join(local_folder_absolute_path, args.failed_list_file_name)\r\n failed_list_file_path = failed_list_file_path.replace('\\\\', '/')\r\n\r\n\r\n file_list = []\r\n if args.failed_list_absobute_path is not None:\r\n with codecs.open(args.failed_list_absobute_path, 'r') as f:\r\n file_list = [s.strip() for s in f.readlines()]\r\n \r\n if os.path.exists(failed_list_file_path):\r\n os.remove(failed_list_file_path)\r\n elif is_folder:\r\n if os.path.exists(failed_list_file_path):\r\n os.remove(failed_list_file_path)\r\n \r\n sys.stdout.write('Please wait, recursively traverse all files under you folder...' + '\\n')\r\n file_list = glob.glob(os.path.join(local_folder_absolute_path, '**', '*'), recursive=True)\r\n elif not is_folder:\r\n file_list.append(single_file_path)\r\n \r\n if os.path.exists(failed_list_file_path):\r\n os.remove(failed_list_file_path)\r\n\r\n\r\n if args.failed_list_absobute_path is not None:\r\n temp = local_folder_absolute_path[:-1]\r\n pos = temp.rfind('/')\r\n if pos != -1:\r\n local_folder_absolute_path = temp[: pos+1]\r\n\r\n print_lock = multiprocessing.Lock()\r\n threadPoolExecutor = ThreadPoolExecutor(args.small_file_thread)\r\n\r\n manager = multiprocessing.Manager()\r\n finished_num = manager.Value(ctypes.c_longdouble, 0, lock=True)\r\n total_num = manager.Value(ctypes.c_longdouble, 0, lock=False)\r\n \r\n global_small_file_thread_count_lock = multiprocessing.Lock()\r\n global_small_file_thread_count = manager.Value(ctypes.c_int, 0, lock=True)\r\n\r\n global_large_file_thread_count_lock = multiprocessing.Lock()\r\n global_large_file_thread_count = manager.Value(ctypes.c_int, 0, lock=True)\r\n global_part_thread_count_lock = multiprocessing.Lock()\r\n global_part_thread_count = manager.Value(ctypes.c_int, 0, lock=True)\r\n\r\n s3_client_list = get_s3_client_list()\r\n\r\n #Statistics the total size of all files\r\n sys.stdout.write('Please wait, Calculating the total file size you need to upload...' + '\\n')\r\n for file_path in file_list:\r\n if os.path.isdir(file_path):\r\n continue\r\n total_num.value += os.path.getsize(file_path)\r\n\r\n sys.stdout.write('There are ' + str(total_num.value/1024) + 'KB data to upload.' + '\\n')\r\n sys.stdout.write('Please wait, uploading...' + '\\n')\r\n sys.stdout.flush()\r\n\r\n file_list_len = len(file_list)\r\n i = 0\r\n while i < file_list_len:\r\n file_path = file_list[i]\r\n \r\n i += 1\r\n\r\n if os.path.isdir(file_path):\r\n continue\r\n\r\n file_name = os.path.relpath(file_path, local_folder_absolute_path)\r\n file_name = file_name.replace('\\\\', '/')\r\n #sys.stdout.write(file_name + '\\n')\r\n cur_file_size = os.path.getsize(os.path.join(local_folder_absolute_path, file_name))\r\n #upload small file\r\n if(cur_file_size <= args.samll_file_size):\r\n if global_small_file_thread_count.value >= args.small_file_thread:\r\n seconds = round(random.uniform(0, 1), 2) # wait: 0-1 seconds\r\n time.sleep(seconds)\r\n i -= 1\r\n continue\r\n alter_global_small_file_thread_count(global_small_file_thread_count, global_small_file_thread_count_lock, 1)\r\n #global_small_file_thread_count.value += 1\r\n threadPoolExecutor.submit(upload_small_file, *(s3_client_list, local_folder_absolute_path, failed_list_file_path, bucket_name, bucket_path, file_name, total_num, finished_num, cur_file_size, global_small_file_thread_count, print_lock, global_small_file_thread_count_lock))\r\n #upload large file\r\n else:\r\n if global_large_file_thread_count.value >= args.large_file_thread:\r\n seconds = 1 + round(random.uniform(0, 1), 2) # wait: 1-2 seconds\r\n time.sleep(seconds)\r\n i -= 1\r\n continue\r\n alter_global_large_file_thread_count(global_large_file_thread_count, global_large_file_thread_count_lock, 1)\r\n #global_large_file_thread_count.value += 1\r\n threadPoolExecutor.submit(upload_large_file, *(s3_client_list, local_folder_absolute_path, failed_list_file_path, bucket_name, bucket_path, file_name, total_num, finished_num, global_large_file_thread_count, global_part_thread_count, print_lock, global_large_file_thread_count_lock, global_part_thread_count_lock))\r\n\r\n threadPoolExecutor.shutdown(wait=True)\r\n\r\n\r\n sys.stdout.write('Total time used: %.2f seconds.\\n' % (time.time() - start_time))\r\n\r\n if os.path.exists(failed_list_file_path):\r\n logging.error('Some files failed to upload, see %s\\n' % failed_list_file_path)\r\n\r\n except Exception as e:\r\n sys.stdout.write(str(e) + '\\n')\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"yellow_zone_uploader.py","file_name":"yellow_zone_uploader.py","file_ext":"py","file_size_in_byte":21038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"306550236","text":"import csv\nimport json\nimport math\nimport os\nimport requests\nimport sys\n\nNREL_API_KEY = os.getenv('NREL_API_KEY')\nNREL_BASE_URL = 'https://developer.nrel.gov/'\n\nSTATES = [\n \"AK\",\"AL\",\"AR\",\"AZ\",\"CA\",\"CO\",\"CT\",\"DC\",\"DE\",\"FL\",\n \"GA\",\"HI\",\"IA\",\"ID\",\"IL\",\"IN\",\"KS\",\"KY\",\"LA\",\"MA\",\n \"MD\",\"ME\",\"MI\",\"MN\",\"MO\",\"MS\",\"MT\",\"NC\",\"ND\",\"NE\",\n \"NH\",\"NJ\",\"NM\",\"NV\",\"NY\",\"OH\",\"OK\",\"OR\",\"PA\",\"RI\",\n \"SC\",\"SD\",\"TN\",\"TX\",\"UT\",\"VA\",\"VT\",\"WA\",\"WI\",\"WV\",\n \"WY\",\n]\n\nDATA_DIR = 'data'\n# all states, data for all years, files\nALL_STATES_FILE = '%s/all_states_emissions' % DATA_DIR\nALL_STATES_FILE_JSON = '%s.json' % ALL_STATES_FILE\n\n# all states, data for most recent year, files\nALL_STATES_RECENT_FILE = '%s/all_states_emissions_recent' % DATA_DIR\nALL_STATES_RECENT_CSV = '%s.csv' % ALL_STATES_RECENT_FILE\nALL_STATES_RECENT_HTML = '%s.html' % ALL_STATES_RECENT_FILE\n\n\nclass NRELClient(object):\n \"\"\"\n API Client to access NREL API\n \"\"\"\n def __init__(self):\n self.key = NREL_API_KEY\n self.base_url = NREL_BASE_URL\n self.emissions_url = '%sapi/cleap/v1/state_co2_emissions' % self.base_url\n self.base_params = {'api_key': self.key}\n\n # data\n self.all_states_emissions = self.get_all_states_emissions()\n\n\n def get_state_emissions(self, state, emissions_type='total'):\n \"\"\"\n Return state emissions for the given state.\n\n :param state: two letter abbreviation\n :param emissions_type: Choices are 'commercial', 'electric',\n 'residential', 'industrial', 'transportation', 'total'\n \"\"\"\n params = {'state_abbr': state, 'type': emissions_type}\n params.update(**self.base_params)\n r = requests.get(self.emissions_url, params=params)\n emissions_data = r.json()\n if r.status_code >= 400:\n print(emissions_data)\n r.raise_for_status\n return emissions_data\n\n def get_all_states_emissions(self, emissions_type='total'):\n emissions_data = {}\n for state in STATES:\n try:\n state_data = self.get_state_emissions(\n state,\n emissions_type\n )\n emissions_data[state] = state_data['result'][0]['data']\n except:\n e = sys.exc_info()[0]\n print(e)\n return emissions_data\n\n\n def write_all_states_emissions_recent_sorted(self):\n recent_year = '2014'\n recent_sorted = []\n for state,emissions in self.all_states_emissions.items():\n inserted = False\n for item in enumerate(recent_sorted):\n if emissions[recent_year] < item[1][2]:\n recent_sorted[item[0]:0] = [\n [state, recent_year,emissions[recent_year]]\n ]\n inserted = True\n break\n if not inserted:\n recent_sorted.append(\n [state, recent_year,emissions[recent_year]]\n )\n # get the max emissions from the last item of recent_sorted\n max_emissions = recent_sorted[-1][2]\n max_graph_scale = int(math.ceil(max_emissions/100))*100\n print('max_graph_scale: %s' % max_graph_scale)\n with open(ALL_STATES_RECENT_CSV, 'w') as outfile:\n writer = csv.writer(outfile)\n for state in recent_sorted:\n # percentage of max used for graph height\n height_percentage = int(round((state[2] / max_graph_scale)*100))\n state.append(height_percentage)\n writer.writerow(state)\n\n def write_all_states_emissions_html(self):\n if not os.path.isfile(ALL_STATES_RECENT_CSV):\n self.write_all_states_emissions_recent_sorted()\n\n with open(ALL_STATES_RECENT_CSV, 'r') as infile:\n reader = csv.reader(infile)\n with open('vert_%s' % ALL_STATES_RECENT_HTML, 'w') as vert_file:\n with open('horiz_%s' % ALL_STATES_RECENT_HTML, 'w') as horiz_file:\n for state in reader:\n vertical_bar_html = '''\n
  • %s\n
  • \n ''' % (state[1], state[2], state[3], state[0])\n horizontal_bar_html = '''\n
  • %s\n
  • \n ''' % (state[1], state[2], state[3], state[0])\n vert_file.write(vertical_bar_html)\n horiz_file.write(horizontal_bar_html)\n\n def write_all_states_json(self):\n with open(ALL_STATES_FILE_JSON, 'w') as json_file:\n json.dump(self.all_states_emissions, json_file)\n\n\nif __name__=='__main__':\n client = NRELClient()\n client.write_all_states_json()\n","sub_path":"nrel.py","file_name":"nrel.py","file_ext":"py","file_size_in_byte":5315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"551938468","text":"import os\nimport copy\n\nimport torch\n\nimport numpy as np\n\nfrom .get_functions import get_save_path\n\ndef load_model(save_path, data_type, image_size,\n batch_size, model_name, lr, epochs, c_fold=-1, **kwargs) :\n kwargs['save_path'] = save_path\n kwargs['data_type'] = data_type\n kwargs['image_size'] = image_size\n kwargs['batch_size'] = batch_size\n kwargs['model_name'] = model_name\n kwargs['lr'] = lr\n kwargs['epochs'] = epochs\n kwargs['fold'] = c_fold\n\n model_dirs, load_model_path = get_save_path(**kwargs)\n\n load_path = os.path.join(model_dirs.format(c_fold), '{}.pth'.format(load_model_path))\n\n print(\"Your model is loaded from {}.\".format(load_path))\n checkpoint = torch.load(load_path)\n print(\".pth keys() = {}.\".format(checkpoint.keys()))\n\n model = checkpoint['model']\n model.load_state_dict(copy.deepcopy(checkpoint['model_state_dict']))\n\n return model\n\ndef load_total_metric_results(**kwargs) :\n model_dirs, save_model_path = get_save_path(**kwargs)\n save_path = os.path.join(model_dirs, 'Total results {}.txt'.format(save_model_path))\n loss, acc, pre, rec, f1, iou = [], [], [], [], [], []\n\n for fold in range(1, kwargs['k_fold'] + 1):\n load_path = os.path.join(model_dirs, 'fold {}'.format(fold),\n 'test report {}.txt'.format(save_model_path))\n f = open(load_path, 'r')\n while True:\n line = f.readline()\n if not line: break\n line_split = line.split()\n if 'loss' in line_split : loss.append(float(line_split[-1]))\n if 'accuracy' in line_split : acc.append(float(line_split[-1]))\n if 'precision' in line_split : pre.append(float(line_split[-1]))\n if 'recall' in line_split : rec.append(float(line_split[-1]))\n if 'f1_score' in line_split : f1.append(float(line_split[-1]))\n if 'iou' in line_split : iou.append(float(line_split[-1]))\n f.close()\n\n loss = np.array(loss)\n acc = np.array(acc)\n pre = np.array(pre)\n rec = np.array(rec)\n f1 = np.array(f1)\n iou = np.array(iou)\n\n return loss, acc, pre, rec, f1, iou, save_path","sub_path":"utils/load_functions.py","file_name":"load_functions.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"632980059","text":"from pyramid.config import Configurator\nfrom pyramid.authentication import AuthTktAuthenticationPolicy\nfrom pyramid.authorization import ACLAuthorizationPolicy\nfrom pyramid.events import NewRequest\nfrom sqlalchemy import engine_from_config\nfrom pyramid.mako_templating import renderer_factory as mako_factory\n\nfrom .models import DBSession\nfrom pyramid_beaker import session_factory_from_settings\n\nauthetntication_policy = AuthTktAuthenticationPolicy('MyTopSecret')\nauthorization_policy = ACLAuthorizationPolicy()\n\ndef main(global_config, **settings):\n \"\"\" This function returns a Pyramid WSGI application.\n \"\"\"\n engine = engine_from_config(settings, 'sqlalchemy.')\n DBSession.configure(bind=engine)\n session_config = session_factory_from_settings(settings)\n config = Configurator(settings=settings, session_factory=session_config, root_factory='.models.RootFactory')\n config.set_authentication_policy(authetntication_policy)\n config.set_authorization_policy(authorization_policy)\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_renderer('.html', mako_factory)\n config.add_route('home', '/')\n config.add_route('login', '/login')\n config.add_route('logout', '/logout')\n config.add_route('about', '/about')\n config.add_route('contact', '/contact')\n config.add_route('income', '/income')\n config.add_route('income_category', '/income-category')\n config.add_route('expense', '/expense')\n config.add_route('expense_category', '/expense-category')\n config.add_route('balance', '/balance')\n \n config.add_subscriber('.subscribers.csrf_validation', NewRequest)\n \n config.scan()\n return config.make_wsgi_app()\n\n","sub_path":"personalfinancemanager/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"84577638","text":"\n\ndef main_func():\n from src.tools import addition, multip,create_dict\n params = create_dict()\n print(params)\n number_1 = params['Parameter1']\n number_2 = params['Parameter2']\n print(number_1)\n print(number_2)\n dummy_addition = addition(number_1,number_2)\n dummy_multip = multip(number_1, number_2)\n text_a = 'Addition: {t1} + {t2} = {t3}'.format(t1=number_1,t2=number_2, t3=dummy_addition)\n text_m = 'Multiplication: {t1} * {t2} = {t3}'.format(t1=number_1,t2=number_2, t3=dummy_multip)\n print(text_a)\n print(text_m)\n\n\n\n\nif __name__ == '__main__':\n main_func()\n","sub_path":"main_robert.py","file_name":"main_robert.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"248977389","text":"import matplotlib.pyplot as plt\nfrom configuration import configuration\nfrom frechet import normTrajectory, norm\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.model_selection import train_test_split\nfrom analyzer import visualizePerturbations\nfrom sklearn.metrics import mean_squared_error\n\n\nclass NNConfiguration(configuration):\n\n def __init__(self, timeStep=0.01, steps=100, samples=10, dynamics='None', dimensions=2, lowerBound=[], upperBound=[]):\n\n configuration.__init__(self, timeStep, steps, samples, dynamics, dimensions, lowerBound, upperBound)\n self.relativeError = []\n self.sortedError = []\n self.input = None\n self.output = None\n self.no_of_layers = 2\n self.neurons = [30, 30]\n self.test_size = 0.33\n\n def generateTrajectories(self):\n self.storeTrajectories()\n\n def setLayers(self, number_of_layers):\n self.no_of_layers = number_of_layers\n\n def setNeurons(self, neurons):\n self.neurons = neurons\n\n def trainTestNN(self, act_fn='relu', solver_fn='adam'):\n\n X_train, X_test, Y_train, Y_test = train_test_split(self.input, self.output, test_size=self.test_size, random_state=1)\n clf = None\n if self.no_of_layers == 2:\n clf = MLPRegressor(activation = act_fn, solver= solver_fn, alpha=1e-5, hidden_layer_sizes=(self.neurons[0], self.neurons[1]), random_state=1)\n elif self.no_of_layers == 3:\n clf = MLPRegressor(activation = act_fn, solver= solver_fn, alpha=1e-5, hidden_layer_sizes=(self.neurons[0], self.neurons[1], self.neurons[2]),\n random_state=1)\n\n clf.fit(X_train, Y_train)\n # train_predictions = clf.predict(X_train)\n test_predictions = clf.predict(X_test)\n print(\"Y_test length {} test_predictions length {}\".format(len(Y_test), len(test_predictions)))\n\n mse = mean_squared_error(Y_test, test_predictions)\n print(\"Mean Squared error {}\".format(mse))\n # mseError = 0.0\n # for idx in range(0, len(test_predictions)):\n # mseError += mean_squared_error(test_predictions[idx], Y_test[idx])\n # mseError = mseError/len(test_predictions)\n # print(\"Manual MSE {}\".format(mseError))\n relativeError = []\n for idx in range(0, len(test_predictions)):\n distVal = norm(test_predictions[idx] - Y_test[idx], -1)\n relativeError += [distVal/norm(Y_test[idx], -1)]\n plt.plot(relativeError)\n print(\"Mean relative error {}\".format(sum(relativeError)/len(test_predictions)))\n self.relativeError = [relativeError]\n sortedError = []\n for idx in range(0, len(test_predictions)):\n sortedError += [norm(test_predictions[idx], -1)/norm(Y_test[idx], -1)]\n plt.plot(sortedError)\n self.sortedError = [sortedError]\n print(\"Mean sorted error {}\".format(sum(sortedError) / len(test_predictions)))\n plt.show()\n\n def showRelativePerturbation(self):\n\n visualizePerturbations(self.relativeError)\n\n def showApproxPerturbation(self):\n\n visualizePerturbations(self.sortedError)\n","sub_path":"pyExploreNN/NNConfiguration_setup/NNConfiguration.py","file_name":"NNConfiguration.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"592816149","text":"import dash\nimport pandas as pd\nimport pathlib\nimport dash_core_components as dcc\nimport dash_html_components as html\n\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__)\n\n# get relative data folder\nPATH = pathlib.Path(__file__).parent\nprint(PATH)\nDATA_PATH = PATH.joinpath(\"data\").resolve()\n\napp = dash.Dash(\n __name__, meta_tags=[\n {\"name\": \"viewport\", \"content\": \"width=device-width\"}],\n external_stylesheets=external_stylesheets\n)\nserver = app.server\n\n\napp.layout = html.Div([html.H3('Prueba'),\n html.H1('Hello Dash'),\n html.Div([html.P('Dash converts Python classes into HTML'),\n html.P(\n \"This conversion happens behind the scenes by Dash's JavaScript front-end\")\n ]),\n dcc.Markdown('''\n #### Dash and Markdown\n\n Dash supports [Markdown](http://commonmark.org/help).\n\n Markdown is a simple way to write and format text.\n It includes a syntax for things like **bold text** and *italics*,\n [links](http://commonmark.org/help), inline `code` snippets, lists,\n quotes, and more.\n ''')\n ])\n\n\n# Main\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n","sub_path":"apps/dash-oil-and-gas/exploraciones-inegi/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"240059850","text":"# 파티에 오지 않는 사람 목록 찾기\n\n\n# Step1. DFS로 시도 - DFS로는 풀기 어려울 듯...\n\n# Step2. 탐욕 알고리즘으로 시도 - 오답 발생\n\n# Step3. 완전탐색..?\n# 어차피 5명밖에 안되는데... 부분집합 만들지 뭐..\n\n\"\"\"\n# 비트연산자를 이용한 모든 부분집합 만들기!\n\npeople = ['A', 'B', 'C', 'D', 'E']\nN = 5\nresult_list = []\n\nfor i in range(1< 0:\n data = self.dict_external_sources[source].pop(0)\n else:\n data = self._genSilence(505)\n b += source.getVolume() * np.fromstring(data, dtype=np.int16)\n b = b.clip(-32767.0, 32767.0)\n for extra_file in self.list_extra_files_data:\n if extra_file[\"loop\"]:\n data = extra_file[\"data\"][extra_file[\"index\"]]\n extra_file[\"index\"] += 1\n if extra_file[\"index\"] >= len(extra_file[\"data\"]):\n extra_file[\"index\"] = 0\n else:\n if len(extra_file[\"data\"]) > 0:\n data = extra_file[\"data\"].pop(0)\n if len(data) < 1010:\n data += self._genSilence((1010-len(data))/2)\n else:\n self.list_extra_files_data.pop(self.list_extra_files_data.index(extra_file))\n data = self._genSilence(505)\n b += extra_file[\"volume\"]* np.fromstring(data, dtype=np.int16)\n if len(self.list_morze) > 0:\n b += self.morze_volume * np.fromstring(self.list_morze.pop(0), dtype=np.int16)\n dat += self.codec_encoder.encode((b.astype(np.int16)).tostring())\n\n if dat != '':\n if self.canSend:\n self.sock.writeDatagram(dat, QHostAddress(self.ipAddr), 1112)\n\n\nclass tRadioObmen(QtCore.QObject):\n def __init__(self, ):\n QtCore.QObject.__init__(self)\n\n self.kis_r168 = r168_NetSender(self, \"30.0.98.72\")\n\n self.kis_tlf = r168_NetSender(self, \"30.0.98.61\")\n\n self.__volume = 0.25\n\n def setVolume(self, volume):\n\n self.__volume = volume\n\n def getVolume(self):\n\n return self.__volume\n\n def call(self,val):\n self.kis_r168.playWavFile(\"beep-02.wav\", False, self.__volume)\n self.kis_tlf.playWavFile(\"beep-02.wav\", False, self.__volume)\n\n def beep(self, val):\n if val == 1:\n\n self.kis_tlf.canSend = True\n self.kis_r168.canSend = True\n self.kis_tlf.addRcvr(self.kis_r168)\n self.kis_r168.playWavFile(\"beep-02.wav\", False, self.__volume)\n self.kis_tlf.playWavFile(\"beep-02.wav\", False, self.__volume)\n else:\n self.kis_tlf.remRcvr(self.kis_r168)\n self.kis_tlf.canSend = False\n self.kis_r168.canSend = False\n\n def transmit(self, val):\n if val == 1:\n self.kis_tlf.canSend = True\n self.kis_r168.canSend = True\n self.kis_r168.addRcvr(self.kis_tlf)\n else:\n self.kis_r168.remRcvr(self.kis_tlf)\n #self.kis_tlf.canSend = False\n #self.kis_r168.canSend = False\n\n def stop(self):\n self.kis_r168.remRcvr(self.kis_tlf)\n self.kis_tlf.remRcvr(self.kis_r168)\n self.kis_tlf.canSend = False\n self.kis_r168.canSend = False\n\nradioObmen=tRadioObmen()\n##################################################################################\n","sub_path":"common_blocks/PRZ/imitators/SUPPORT/r168__/radioObmen.py","file_name":"radioObmen.py","file_ext":"py","file_size_in_byte":3816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"602280099","text":"import asyncio\nimport functools\nfrom typing import Callable,Any\nimport time\nasync def delay(seconds:int)->int:\n print(f\"sleeping for {seconds}\")\n await asyncio.sleep(seconds)\n print(f\"finisihed sleeping for {seconds}\")\n return seconds\n\n\ndef async_timed():\n def wrapper(func:Callable)->Callable:\n async def wrapped(*args,**kwargs)->Any:\n print(f\"starting {func} with args {args} {kwargs}\")\n start=time.time()\n try:\n return await func(*args,**kwargs)\n finally:\n end=time.time()\n total=end-start\n print(f\"finished {func} in {total:.4f} seconds\")\n return wrapped\n return wrapper","sub_path":"concurrency_with_asyncio/ch2/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"220248613","text":"import requests\nimport json\nimport mysql.connector\nfrom mysql.connector import errorcode\nimport sqlalchemy\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import *\nimport datetime\nimport pytz\n\n\n# http://docs.sqlalchemy.org/en/latest/orm/tutorial.html\nBase = declarative_base()\nSession = sessionmaker()\nmetadata = MetaData()\n\ndef call_api(url):\n \"\"\"Calls API and returns info from that\"\"\"\n\n req = requests.get(url)\n return req\n\ndef write_file (data):\n \"\"\"Retrieves information from Dublin Bes API and stores as JSON\"\"\"\n\n req_text= data.text\n json_parsed=json.loads(req_text)\n return json_parsed\n\nclass weather:\n def current_weather(self):\n\n# list1=json_parsed['list']\n# first = list1[0]\n main=json_parsed2['main']\n temp = main['temp']\n temp_min = main['temp_min']\n temp_max = main['temp_max']\n humidity = main['humidity']\n pressure = main['pressure']\n weather = json_parsed2['weather']\n weather_desc = weather[0]\n description = weather_desc['description']\n mainDescription = weather_desc['main']\n wind = json_parsed2['wind']\n speed = wind['speed']\n deg = wind['deg']\n cloud=json_parsed2['clouds']\n cloudiness=cloud['all']\n dt = json_parsed2['dt']\n timestamp=datetime.datetime.fromtimestamp(dt, pytz.timezone('Europe/Dublin'))\n\n delete_current();\n insert_current(temp, temp_min, temp_max, description, mainDescription, speed, deg, dt, timestamp, humidity, pressure, cloudiness)\n \n #http://pythonda.com/collecting-storing-tweets-python-mysql\n\ndef connect():\n \"\"\"Function to connect to database on Amazon Web Services\"\"\"\n try:\n engine = create_engine(\n 'mysql+mysqlconnector://root:sbsw@127.0.0.1:1024/sbsw')\n #port = 3306\n connection = engine.connect()\n Session.configure(bind=engine)\n return engine\n # https://campus.datacamp.com/courses/introduction-to-relational-databases-in-python/advanced-sqlalchemy-queries?ex=2#skiponboarding\n\n except Exception as e:\n print(\"An error occurred when connecting to the database: \", e)\n # https://dev.mysql.com/doc/connector-python/en/connector-python-api-errors-error.html\n # https://campus.datacamp.com/courses/introduction-to-relational-databases-in-python/advanced-sqlalchemy-queries?ex=2#skiponboarding\n\ndef delete_current():\n try:\n connection = engine.connect()\n connection.execute(\"TRUNCATE TABLE dbus_current_weather;\")\n return\n\n except Exception as e:\n print(\"An error occurred when deleting current rows: \", e)\n\ndef delete_forecast():\n try:\n connection = engine.connect()\n connection.execute(\"TRUNCATE TABLE dbus_forecast;\")\n return\n\n except Exception as e:\n print(\"An error occurred when deleting forecast rows: \", e)\n \n \ndef insert_current(temp, temp_min, temp_max, description, mainDescription, speed, deg, dt, timestamp, humidity, pressure, cloudiness):\n try:\n connection = engine.connect()\n connection.execute(\n \"INSERT INTO dbus_current_weather (temp, min_temp, max_temp, description, mainDescription, wind_speed, wind_direction, dt, datetime, humidity, pressure, cloudiness) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\",\n (temp, temp_min, temp_max, description, mainDescription, speed, deg, dt, timestamp, humidity, pressure, cloudiness))\n except Exception as e:\n print(\"An error occurred inserting data into current_weather table: \", e)\n return\n\ndef insert_forecast(temp, temp_min, temp_max, description, mainDescription, speed, deg, dt_txt, humidity):\n try:\n connection = engine.connect()\n connection.execute(\n \"INSERT INTO dbus_forecast (temp, min_temp, max_temp, description, mainDescription, wind_speed, wind_direction, datetime, humidity) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s);\",\n (temp, temp_min, temp_max, description, mainDescription, speed, deg, dt_txt, humidity))\n except Exception as e:\n print(\"An error occurred inserting data into forecast_weather table: \", e)\n return\n\nurl2=\"http://api.openweathermap.org/data/2.5/weather?id=2964574&units=metric&APPID=bb260f441e7da59a28734895b6574b4d\"\n\nengine = connect()\ndata2 = call_api(url2)\njson_parsed2=write_file(data2)\n\nrun = weather()\nrun.current_weather()\n\n\n","sub_path":"dublin_bus/current_weather_scraper.py","file_name":"current_weather_scraper.py","file_ext":"py","file_size_in_byte":4582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"568383222","text":"#!/usr/bin/env python3\n\n#stdin supplies the input file to be split and simplified - this can be given by < or through piping in\n#in executing this file < .txt needs to be included as the script does not write to a file directly\nimport sys\n\nfor line in sys.stdin:\n if \"DROME\" in line:\n fields = line.rstrip(\"\\r\\n\").split()\n if fields[-1].startswith(\"FBgn\"):\n print(fields[len(fields)-1], \"\\t\", fields[len(fields)-2])\n \n\n ","sub_path":"day2-homework/make_simplified_mapping_file.py","file_name":"make_simplified_mapping_file.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"41852585","text":"from django.shortcuts import render,redirect\nfrom .forms import *\nfrom django.http import HttpResponse\nimport xlrd\nfrom .models import *\nfrom rest_framework import viewsets\nfrom django.http import HttpResponse, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.parsers import JSONParser\nfrom staff.serializers import *\nfrom rest_framework.views import APIView\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework import mixins\n# from rest_framework.authentication import SessionAuthentication, BasicAuthentication\nfrom rest_framework.permissions import *\nfrom rest_framework import generics,reverse\nfrom .permissions import *\nfrom django.core.mail import send_mail\nfrom django.contrib.auth import logout,authenticate,login\nfrom .models import *\nfrom .extras import *\nfrom django.contrib.auth.decorators import login_required\n# Create your views here.\n\n\n\n\n\n\n\ndef upload_excel(request):\n if request.user.is_staff==True:\n if request.method == 'POST':\n s_city = request.user.staff.city.name\n userform = user_form(request.POST)\n excelform = excel_form(request.POST,request.FILES)\n if(userform.is_valid()):\n g_city = request.POST['city']\n if s_city == g_city:\n userform.save()\n\n return redirect('home-page')\n elif(excelform.is_valid()):\n excelform.instance.uploaded_by = request.user\n x = excelform.save()\n path = str(excelform.instance.sheet.path)\n user_profile_from_excel(path,s_city)\n return redirect('home-page')\n\n excelform = excel_form()\n userform = user_form()\n return render(request,'excel.html',{'form':excelform,'user_form':userform})\n return render(request,'error.html')\n\n\n\ndef get_model(name,model):\n obj = model.objects.get(name = name)\n return obj\n\n\n\ndef user_profile_from_excel(path,s_city):\n book = xlrd.open_workbook(path)\n sheet = book.sheet_by_index(0)\n row = sheet.nrows\n cols = sheet.ncols\n\n for i in range(1,row):\n\n adhaar_no= int(sheet.cell_value(i,1))\n name = sheet.cell_value(i,2)\n DOB = sheet.cell_value(i,3)\n gender = sheet.cell_value(i,4)\n city = sheet.cell_value(i,5)\n state = sheet.cell_value(i,6)\n\n city_model = get_model(city,City)\n state_model = get_model(state,State)\n print(DOB)\n new = str((xlrd.xldate_as_datetime(DOB,book.datemode)))[0:10]\n birth = str(DOB)\n if s_city == city:\n new = user_profile.objects.create(adhaar_no=adhaar_no,name=name,DOB=new,gender= gender,city=city_model, state=state_model)\n new.save()\n\n\n\n\ndef load_cities(request):\n State_id = request.GET.get('state')\n # st_obj = State.objects.get(id=State_id)\n cities = City.objects.filter(state_id=State_id).order_by('name')\n # cities = City.objects.filter(state=st_obj).order_by('name')\n\n for city in cities:\n print(city)\n\n return render(request, 'city_dropdown_list_options.html', {'cities': cities})\n\n\n\n\nclass Dber_list(generics.ListCreateAPIView):\n queryset = user_profile.objects.all()\n serializer_class = user_profile_serializer\n\n\nclass Dber_detail(generics.RetrieveUpdateDestroyAPIView):\n queryset = user_profile.objects.all()\n serializer_class = user_profile_serializer\n # permission_classes = ['IsStaffOrAdmin']\n\n\n\n\n@login_required(login_url='dber-logout')\ndef Dber_mail(request):\n if request.method == 'POST':\n form = email_form(request.POST)\n subject = request.POST.get('subject')\n content = request.POST.get('content')\n send_to = request.POST.get('send_to')\n sent_by = request.user.user_profile.email\n # sent_by = 'sunilkumar.sobha@gmail.com'\n send_mail(subject,content,sent_by,[send_to],fail_silently=False,)\n return redirect('home-page')\n form = email_form()\n return render(request,'dber_mail.html',{'form':form})\n\n\n\n\n@login_required(login_url='dber-logout')\ndef staff_mail(request):\n if request.user.is_staff==True:\n if request.method == 'POST':\n form = staff_email_form(request.POST)\n subject = request.POST.get('subject')\n content = request.POST.get('content')\n sent_by = request.user.staff.email\n city = request.user.staff.city\n objs = user_profile.objects.filter(city = city,adhaar_linked=True)\n send_to =[]\n for x in objs:\n send_to.append(x.email)\n send_mail(subject,content,sent_by,send_to,fail_silently=False,)\n return redirect('home-page')\n form = staff_email_form()\n return render(request,'staff_mail.html',{'form':form})\n\n\n\n\ndef register(request):\n if request.method == 'POST':\n reg_form = register_form(request.POST)\n if reg_form.is_valid:\n adh = request.POST['Adhaar']\n ad_list =[]\n for user in user_profile.objects.all():\n if int(user.adhaar_no) == int(adh):\n u_name = request.POST['username']\n passw = request.POST['password']\n email = request.POST['email']\n user.adhaar_linked = True\n user.username = u_name\n user.Password = passw\n user.email = email\n\n u_name = request.POST['username']\n passw = request.POST['password']\n try:\n new = User.objects.create(username =u_name,password=passw)\n new.save()\n new.set_password(passw)\n user.user = new\n new.save()\n user.save()\n return redirect('dber-logout')\n except:\n pass\n # print(HttpResponse(\"adhhar number not matched , please contact administrator\"))\n # return HttpResponse(\"

    User Not Found

    \")\n return render(request,'error.html')\n\n reg_form = register_form()\n return render(request,'register.html',{'r_form':reg_form})\n\n\n\n\n\ndef login_view(request):\n if request.method == 'POST':\n l_form = login_form(request.POST)\n if l_form.is_valid():\n u_name = request.POST['username']\n passw = request.POST['password']\n user = authenticate(username=u_name,password=passw)\n if user is not None:\n if user.is_active:\n login(request, user)\n return redirect('home-page')\n return redirect('dber-login')\n l_form = login_form()\n return render(request,'login.html',{'l_form':l_form})\n\n\n\n\ndef custom_filter(name1,state1,city1,list):\n list1 = getquery_name('name',name1,list)\n list2 = getquery_state('state',state1,list1)\n list3 = getquery_city('city',city1,list2)\n return list3\n\n\n\n@login_required(login_url='dber-logout')\ndef HomePage(request):\n if request.user.is_staff != True:\n klist = User.objects.all()\n list =[]\n for x in klist:\n if x.is_staff != True:\n if x.is_superuser != True:\n list.append(x)\n\n if request.method == 'POST':\n # adhaar = request.POST.get('adhaar')\n name1 = request.POST.get('name')\n state1 = request.POST.get('state')\n city1 = request.POST.get('city')\n # query = request.POST.get('query')\n list4 = custom_filter(name1,state1,city1,list)\n return render(request,'home.html',{'list':list4})\n\n return render(request,'home.html',{'list':list})\n return redirect('upload_excel')\n\n\n\n\n\ndef logout_view(request):\n logout(request)\n return redirect('dber-login')\n\n\n\n@login_required(login_url='dber-logout')\ndef profile_update(request):\n if request.method == 'POST':\n\n if request.user.is_staff==True:\n l_form = staffprofileForm(data=request.POST,instance=request.user.staff)\n else:\n l_form = profileForm(data=request.POST, instance=request.user.user_profile)\n if l_form.is_valid():\n update = l_form.save(commit=False)\n l_form.user = request.user\n l_form.save()\n user = request.user\n if user.is_active:\n user.set_password(l_form.instance.Password)\n user.save()\n return redirect('dber-logout')\n return render(request,'error.html')\n print(request.user.staff.city)\n print(\"\")\n if request.user.is_staff==True:\n print(request.user.staff.city)\n print(\"\")\n l_form = staffprofileForm(instance=request.user.staff)\n else:\n l_form = profileForm(instance=request.user.user_profile)\n return render(request,'new_password.html',{'l_form':l_form})\n","sub_path":"staff/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"554760835","text":"# coding=utf-8\n\n## 思路: 给node增加连个属性: deep, pos\n## deep表示节点深度\n## pos表示正常完全二叉树的节点值,满足\n## - node.right.pos = node.pos * 2 + 1\n## - node.left.pos = node.pos * 2\n## 因此只需要遍历一次二叉树,然后找出每个深度中,pos值最大的节点即可\n\nfrom collections import deque\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n\n maxDeep = 100\n # @param {TreeNode} root\n # @return {integer[]}\n def rightSideView(self, root):\n if root == None:\n return []\n\n self.maxPos = [-1] * Solution.maxDeep\n self.maxPosNode = [None] * Solution.maxDeep\n\n self.stack = deque()\n\n root.pos = 0\n root.deep = 1\n self.stack.append(root)\n\n while len(self.stack) > 0:\n node = self.stack.pop()\n\n # 子节点入栈\n if node.left != None:\n left = node.left\n left.pos = node.pos * 2\n left.deep = node.deep + 1\n self.stack.append(left)\n if node.right != None:\n right = node.right\n right.pos = node.pos * 2 + 1\n right.deep = node.deep + 1\n self.stack.append(right)\n\n # 处理父节点\n if node.pos > self.maxPos[node.deep]:\n self.maxPos[node.deep] = node.pos\n self.maxPosNode[node.deep] = node\n\n\n nodes = filter(lambda x:x != None, self.maxPosNode)\n values = map(lambda x:x.val, nodes)\n return values\n\n\nfrom unittest import TestCase\n\nclass SolutionTest(TestCase):\n def testView(self):\n\n root = TreeNode(1)\n root.left = TreeNode(2)\n root.right = TreeNode(3)\n root.right.right = TreeNode(4)\n root.left.right = TreeNode(5)\n\n s = Solution()\n print(s.rightSideView(root))\n","sub_path":"binary-tree-right-side-view.py","file_name":"binary-tree-right-side-view.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"403207536","text":"import cv2\nimport numpy as np\n\nimport ip_draw as draw\nimport ip_detection_utils as util\nimport ocr_classify_text as ocr\nfrom CONFIG import Config\n\nC = Config()\n\n\ndef get_corner(boundaries):\n \"\"\"\n Get the top left and bottom right points of boundary\n :param boundaries: boundary: [top, bottom, left, right]\n -> up, bottom: (column_index, min/max row border)\n -> left, right: (row_index, min/max column border) detect range of each row\n :return: corners: [(top_left, bottom_right)]\n -> top_left: (column_min, row_min)\n -> bottom_right: (column_max, row_max)\n \"\"\"\n corners = []\n for boundary in boundaries:\n top_left = (min(boundary[0][0][0], boundary[1][-1][0]), min(boundary[2][0][0], boundary[3][-1][0]))\n bottom_right = (max(boundary[0][0][0], boundary[1][-1][0]), max(boundary[2][0][0], boundary[3][-1][0]))\n corner = (top_left, bottom_right)\n corners.append(corner)\n return corners\n\n\ndef merge_corners(corners):\n \"\"\"\n i. merge overlapped corners\n ii. remove nested corners\n :param corners: corners: [(top_left, bottom_right)]\n -> top_left: (column_min, row_min)\n -> bottom_right: (column_max, row_max)\n :return: new corners\n \"\"\"\n def merge_overlapped(corner_a, corner_b):\n (top_left_a, bottom_right_a) = corner_a\n (col_min_a, row_min_a) = top_left_a\n (col_max_a, row_max_a) = bottom_right_a\n (top_left_b, bottom_right_b) = corner_b\n (col_min_b, row_min_b) = top_left_b\n (col_max_b, row_max_b) = bottom_right_b\n\n col_min = min(col_min_a, col_min_b)\n col_max = max(col_max_a, col_max_b)\n row_min = min(row_min_a, row_min_b)\n row_max = max(row_max_a, row_max_b)\n return (col_min, row_min), (col_max, row_max)\n\n new_corners = []\n for corner in corners:\n is_intersected = False\n for i in range(len(new_corners)):\n r = util.corner_relation(corner, new_corners[i])\n # if corner is in new_corners[i], ignore corner\n if r == -1:\n is_intersected = True\n break\n # if new_corners[i] is in corner, replace corners[i] with corner\n elif r == 1:\n is_intersected = True\n new_corners[i] = corner\n # if [i] and [j] are overlapped\n if r == 2:\n is_intersected = True\n new_corners[i] = merge_overlapped(corner, new_corners[i])\n\n if not is_intersected:\n new_corners.append(corner)\n return new_corners\n\n\ndef uicomponent_or_block(org, corners,\n compo_max_height=C.THRESHOLD_UICOMPO_MAX_HEIGHT,\n compo_min_edge_ratio=C.THRESHOLD_UICOMPO_MIN_EDGE_RATION,\n min_block_edge_length=C.THRESHOLD_BLOCK_MIN_EDGE_LENGTH):\n \"\"\"\n Select the potential ui components (button, input) from block objects\n :param org: Original image\n :param corners: corners: [(top_left, bottom_right)]\n -> top_left: (column_min, row_min)\n -> bottom_right: (column_max, row_max)\n :param compo_max_height: Over the threshold won't be counted\n :param compo_min_edge_ratio: Over the threshold won't be counted\n :param min_block_edge_length: Main length for being a block\n :return: corners of compos and blocks\n \"\"\"\n compos = []\n blocks = []\n for corner in corners:\n (top_left, bottom_right) = corner\n (col_min, row_min) = top_left\n (col_max, row_max) = bottom_right\n height = row_max - row_min\n width = col_max - col_min\n\n if height <= compo_max_height and width/height >= compo_min_edge_ratio:\n compos.append(corner)\n else:\n if width > min_block_edge_length and height > min_block_edge_length:\n blocks.append(corner)\n return blocks, compos\n\n\ndef uicomponent_in_img(org, bin, corners,\n compo_max_height=C.THRESHOLD_UICOMPO_MAX_HEIGHT,\n compo_min_edge_ratio=C.THRESHOLD_UICOMPO_MIN_EDGE_RATION):\n \"\"\"\n Detect potential UI components inner img\n \"\"\"\n def reverse(img):\n \"\"\"\n Reverse the input binary image\n \"\"\"\n r, b = cv2.threshold(img, 1, 255, cv2.THRESH_BINARY_INV)\n return b\n\n corners_compo = []\n pad = 2\n for corner in corners:\n (top_left, bottom_right) = corner\n (col_min, row_min) = top_left\n (col_max, row_max) = bottom_right\n col_min = max(col_min - pad, 0)\n col_max = min(col_max + pad, org.shape[1])\n row_min = max(row_min - pad, 0)\n row_max = min(row_max + pad, org.shape[0])\n height_img = row_max - row_min\n width_img = col_max - col_min\n\n # ignore small img\n if height_img <= compo_max_height or width_img <= compo_max_height:\n continue\n\n clip_bin = bin[row_min:row_max, col_min:col_max]\n clip_bin = reverse(clip_bin)\n boundary_all, boundary_rec, boundary_nonrec = boundary_detection(clip_bin, min_rec_evenness=C.THRESHOLD_REC_MIN_EVENNESS_STRONG) # rectangle check\n corners_rec = get_corner(boundary_rec)\n corners_rec = util.corner_cvt_relative_position(corners_rec, col_min, row_min)\n\n # check the size of rectangle\n for rec in corners_rec:\n (col_min_rec, row_min_rec), (col_max_rec, row_max_rec) = rec\n height_rec = row_max_rec - row_min_rec\n width_rec = col_max_rec - col_min_rec\n if height_rec / height_img < 0.9 and width_rec / width_img < 0.9 and\\\n height_rec <= compo_max_height and width_rec / height_rec >= compo_min_edge_ratio:\n corners_compo.append(rec)\n\n return corners_compo\n\n\ndef img_or_block(org, binary, corners,\n max_thickness=C.THRESHOLD_BLOCK_MAX_BORDER_THICKNESS,\n max_block_cross_points=C.THRESHOLD_BLOCK_MAX_CROSS_POINT):\n \"\"\"\n Check if the objects are img components or just block\n :param org: Original image\n :param binary: Binary image from pre-processing\n :param corners: [(top_left, bottom_right)]\n -> top_left: (column_min, row_min)\n -> bottom_right: (column_max, row_max)\n :param max_thickness: The max thickness of border of blocks\n :param max_block_cross_points: Ratio of point of interaction\n :return: corners of blocks and imgs\n \"\"\"\n blocks = []\n imgs = []\n for corner in corners:\n (top_left, bottom_right) = corner\n (col_min, row_min) = top_left\n (col_max, row_max) = bottom_right\n\n is_block = False\n vacancy = [0, 0, 0, 0]\n for i in range(1, max_thickness):\n try:\n # up down\n if vacancy[0] == 0 and (col_max - col_min - 2 * i) is not 0 and (\n np.sum(binary[row_min + i, col_min + i: col_max - i]) / 255) / (col_max - col_min - 2 * i) <= max_block_cross_points:\n vacancy[0] = 1\n # bottom-up\n if vacancy[1] == 0 and (col_max - col_min - 2 * i) is not 0 and (\n np.sum(binary[row_max - i, col_min + i: col_max - i]) / 255) / (col_max - col_min - 2 * i) <= max_block_cross_points:\n vacancy[1] = 1\n # left to right\n if vacancy[2] == 0 and (row_max - row_min - 2 * i) is not 0 and (\n np.sum(binary[row_min + i: row_max - i, col_min + i]) / 255) / (row_max - row_min - 2 * i) <= max_block_cross_points:\n vacancy[2] = 1\n # right to left\n if vacancy[3] == 0 and (row_max - row_min - 2 * i) is not 0 and (\n np.sum(binary[row_min + i: row_max - i, col_max - i]) / 255) / (row_max - row_min - 2 * i) <= max_block_cross_points:\n vacancy[3] = 1\n if np.sum(vacancy) == 4:\n is_block = True\n except:\n pass\n if is_block:\n blocks.append(corner)\n else:\n imgs.append(corner)\n\n return blocks, imgs\n\n\ndef img_irregular(org, corners,\n must_img_height=C.THRESHOLD_IMG_MUST_HEIGHT, must_img_width=C.THRESHOLD_IMG_MUST_WIDTH):\n \"\"\"\n Select potential irregular shaped img elements by checking the height and width\n Check the edge ratio for img components to avoid text misrecognition\n :param org: Original image\n :param corners: [(top_left, bottom_right)]\n -> top_left: (column_min, row_min)\n -> bottom_right: (column_max, row_max)\n :param must_img_height: Larger is likely to be img\n :param must_img_width: Larger is likely to be img\n :return: corners of img\n \"\"\"\n imgs = []\n for corner in corners:\n (top_left, bottom_right) = corner\n (col_min, row_min) = top_left\n (col_max, row_max) = bottom_right\n height = row_max - row_min\n width = col_max - col_min\n # assumption: large one must be img component no matter its edge ratio\n if height > must_img_height:\n imgs.append(corner)\n return imgs\n\n\ndef img_refine(org, corners,\n max_img_height_ratio=C.THRESHOLD_IMG_MAX_HEIGHT_RATIO,\n text_edge_ratio=C.THRESHOLD_TEXT_EDGE_RATIO, text_height=C.THRESHOLD_TEXT_HEIGHT):\n \"\"\"\n Remove too large imgs and likely text\n :param org: Original image\n :param corners: [(top_left, bottom_right)]\n -> top_left: (column_min, row_min)\n -> bottom_right: (column_max, row_max)\n :param max_img_height_ratio: height of img / total height of original image\n :param text_edge_ratio: width / height, if too large, then likely to be text\n :param text_height: common max height of text\n :return: corners of refined img\n \"\"\"\n img_height, img_width = org.shape[:2]\n\n refined_imgs = []\n for corner in corners:\n (top_left, bottom_right) = corner\n (col_min, row_min) = top_left\n (col_max, row_max) = bottom_right\n height = row_max - row_min\n width = col_max - col_min\n\n # ignore too large ones\n if org.shape[0] > 1000 and height / img_height > max_img_height_ratio:\n continue\n # likely to be text, ignore\n elif 0 < height <= text_height and width / height > text_edge_ratio:\n continue\n refined_imgs.append(corner)\n\n return refined_imgs\n\n\ndef img_shrink(org, binary, corners,\n min_line_length_h=C.THRESHOLD_LINE_MIN_LENGTH_H, min_line_length_v=C.THRESHOLD_LINE_MIN_LENGTH_V,\n max_thickness=C.THRESHOLD_LINE_THICKNESS):\n \"\"\"\n For imgs that are part of a block, strip the img\n \"\"\"\n\n corners_shrunken = []\n pad = 2\n for corner in corners:\n (top_left, bottom_right) = corner\n (col_min, row_min) = top_left\n (col_max, row_max) = bottom_right\n\n col_min = max(col_min - pad, 0)\n col_max = min(col_max + pad, org.shape[1])\n row_min = max(row_min - pad, 0)\n row_max = min(row_max + pad, org.shape[0])\n\n clip_bin = binary[row_min:row_max, col_min:col_max]\n clip_org = org[row_min:row_max, col_min:col_max]\n\n # detect lines in the image\n lines_h, lines_v = line_detection(clip_bin, min_line_length_h, min_line_length_v, max_thickness)\n # select those perpendicularly intersect with others at endpoints\n lines_h, lines_v = util.line_check_perpendicular(lines_h, lines_v, max_thickness)\n # convert the position of lines into relative position in the entire image\n lines_h, lines_v = util.line_cvt_relative_position(col_min, row_min, lines_h, lines_v)\n\n # shrink corner according to the lines\n corner_shrunken = util.line_shrink_corners(corner, lines_h, lines_v)\n corners_shrunken.append(corner_shrunken)\n\n return corners_shrunken\n\n\n# remove imgs that contain text\ndef rm_text(org, corners,\n must_img_height=C.THRESHOLD_IMG_MUST_HEIGHT, must_img_width=C.THRESHOLD_IMG_MUST_WIDTH,\n ocr_padding=C.OCR_PADDING, ocr_min_word_area=C.OCR_MIN_WORD_AREA, show=False):\n \"\"\"\n Remove area that full of text\n :param org: original image\n :param corners: [(top_left, bottom_right)]\n -> top_left: (column_min, row_min)\n -> bottom_right: (column_max, row_max)\n :param must_img_height: Too large should be img\n :param must_img_width: Too large should be img\n :param ocr_padding: Padding for clipping\n :param ocr_min_word_area: If too text area ratio is too large\n :param show: Show or not\n :return: corners without text objects\n \"\"\"\n new_corners = []\n for corner in corners:\n (top_left, bottom_right) = corner\n (col_min, row_min) = top_left\n (col_max, row_max) = bottom_right\n height = row_max - row_min\n width = col_max - col_min\n # highly likely to be block or img if too large\n if height > must_img_height and width > must_img_width:\n new_corners.append(corner)\n else:\n row_min = row_min - ocr_padding if row_min - ocr_padding >= 0 else 0\n row_max = row_max + ocr_padding if row_max + ocr_padding < org.shape[0] else org.shape[0]\n col_min = col_min - ocr_padding if col_min - ocr_padding >= 0 else 0\n col_max = col_max + ocr_padding if col_max + ocr_padding < org.shape[1] else org.shape[1]\n # check if this area is text\n clip = org[row_min: row_max, col_min: col_max]\n if not ocr.is_text(clip, ocr_min_word_area, show=show):\n new_corners.append(corner)\n return new_corners\n\n\ndef rm_line(binary, lines):\n \"\"\"\n Remove lines from binary map\n :param binary: Binary image\n :param lines: [line_h, line_v]\n -> line_h: horizontal {'head':(column_min, row), 'end':(column_max, row), 'thickness':int)\n -> line_v: vertical {'head':(column, row_min), 'end':(column, row_max), 'thickness':int}\n :return: New binary map with out lines\n \"\"\"\n new_binary = binary.copy()\n line_h, line_v = lines\n for line in line_h:\n row = line['head'][1]\n new_binary[row: row + line['thickness'], line['head'][0]:line['end'][0] + 1] = 0\n for line in line_v:\n column = line['head'][0]\n new_binary[line['head'][1]:line['end'][1] + 1, column: column + line['thickness']] = 0\n\n return new_binary\n\n\ndef line_detection(binary,\n min_line_length_h=C.THRESHOLD_LINE_MIN_LENGTH_H, min_line_length_v=C.THRESHOLD_LINE_MIN_LENGTH_V,\n max_thickness=C.THRESHOLD_LINE_THICKNESS):\n \"\"\"\n Detect lines\n :param binary: Binary image from pre-processing\n :param min_line_length_h: Min length for horizontal lines\n :param min_line_length_v: Min length for vertical lines\n :param max_thickness\n :return: lines: [line_h, line_v]\n -> line_h: horizontal {'head':(column_min, row), 'end':(column_max, row), 'thickness':int)\n -> line_v: vertical {'head':(column, row_min), 'end':(column, row_max), 'thickness':int}\n \"\"\"\n def no_neighbor(start_row, start_col, mode, line=None):\n \"\"\"\n check this point has adjacent points in orthogonal direction\n \"\"\"\n if mode == 'h':\n for t in range(max_thickness + 1):\n if start_row + t >= binary.shape[0] or binary[start_row + t, start_col] == 0:\n # if not start point, update the thickness of this line\n if line is not None:\n line['thickness'] = max(line['thickness'], t)\n return True\n mark_h[start_row + t, start_col] = 255\n return False\n elif mode == 'v':\n for t in range(max_thickness + 1):\n if start_col + t >= binary.shape[1] or binary[start_row, start_col + t] == 0:\n # if not start point, update the thickness of this line\n if line is not None:\n line['thickness'] = max(line['thickness'], t)\n return True\n mark_v[start_row, start_col + t] = 255\n return False\n\n row, column = binary.shape[0], binary.shape[1]\n mark_h = np.zeros(binary.shape, dtype=np.uint8)\n mark_v = np.zeros(binary.shape, dtype=np.uint8)\n lines_h = []\n lines_v = []\n x, y = 0, 0\n while x < row - 1 or y < column - 1:\n # horizontal\n new_line = False\n head, end = None, None\n line = {}\n for j in range(column):\n # line start\n if not new_line and mark_h[x][j] == 0 and binary[x][j] > 0 and no_neighbor(x, j, 'h'):\n head = j\n new_line = True\n line['head'] = [head, x]\n line['thickness'] = -1\n # line end\n elif new_line and (j == column - 1 or mark_h[x][j] > 0 or binary[x][j] == 0 or not no_neighbor(x, j, 'h', line)):\n end = j\n new_line = False\n if end - head > min_line_length_h:\n line['end'] = [end, x]\n lines_h.append(line)\n line = {}\n\n # vertical\n new_line = False\n head, end = None, None\n line = {}\n for i in range(row):\n # line start\n if not new_line and mark_v[i][y] == 0 and binary[i][y] > 0 and no_neighbor(i, y, 'v'):\n head = i\n new_line = True\n line['head'] = [y, head]\n line['thickness'] = 0\n # line end\n elif new_line and (i == row - 1 or mark_v[i][y] > 0 or binary[i][y] == 0 or not no_neighbor(i, y, 'v', line)):\n end = i\n new_line = False\n if end - head > min_line_length_v:\n line['end'] = [y, end]\n lines_v.append(line)\n line = {}\n\n if x < row - 1:\n x += 1\n if y < column - 1:\n y += 1\n\n return lines_h, lines_v\n\n\n# take the binary image as input\n# calculate the connected regions -> get the bounding boundaries of them -> check if those regions are rectangles\n# return all boundaries and boundaries of rectangles\ndef boundary_detection(binary,\n min_obj_area=C.THRESHOLD_OBJ_MIN_AREA, min_obj_perimeter=C.THRESHOLD_OBJ_MIN_PERIMETER,\n line_thickness=C.THRESHOLD_LINE_THICKNESS, min_rec_evenness=C.THRESHOLD_REC_MIN_EVENNESS,\n max_dent_ratio=C.THRESHOLD_IMG_MAX_DENT_RATIO):\n \"\"\"\n :param binary: Binary image from pre-processing\n :param min_obj_area: If not pass then ignore the small object\n :param min_obj_perimeter: If not pass then ignore the small object\n :param line_thickness: If not pass then ignore the slim object\n :param min_rec_evenness: If not pass then this object cannot be rectangular\n :param max_dent_ratio: If not pass then this object cannot be rectangular\n :return: boundary: [top, bottom, left, right]\n -> up, bottom: (column_index, min/max row border)\n -> left, right: (row_index, min/max column border) detect range of each row\n \"\"\"\n mark = np.full(binary.shape, 0, dtype=np.uint8)\n boundary_all = []\n boundary_rec = []\n boundary_nonrec = []\n row, column = binary.shape[0], binary.shape[1]\n\n for i in range(row):\n for j in range(column):\n if binary[i, j] == 255 and mark[i, j] == 0:\n # get connected area\n area = util.boundary_bfs_connected_area(binary, i, j, mark)\n # ignore small area\n if len(area) < min_obj_area:\n continue\n\n # calculate the boundary of the connected area\n boundary = util.boundary_get_boundary(area)\n # ignore small area\n perimeter = np.sum([len(b) for b in boundary])\n if perimeter < min_obj_perimeter:\n continue\n\n boundary_all.append(boundary)\n # check if it is line by checking the length of edges\n if util.boundary_is_line(boundary, line_thickness):\n continue\n\n # rectangle check\n if util.boundary_is_rectangle(boundary, min_rec_evenness, max_dent_ratio):\n boundary_rec.append(boundary)\n else:\n boundary_nonrec.append(boundary)\n\n return boundary_all, boundary_rec, boundary_nonrec\n","sub_path":"code/PROJECT/img_processing/lib/ip_detection.py","file_name":"ip_detection.py","file_ext":"py","file_size_in_byte":20790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"563164935","text":"import networkx as nx \r\nfrom community import community_louvain\r\nfrom networkx.algorithms.community.centrality import girvan_newman\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom networkx.algorithms.components import connected_components, number_connected_components\r\nimport itertools\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nimport datetime\r\n\r\ndef count_inter_edges(graph, cluster_1, cluster_2):\r\n cluster_1_nodes = list(cluster_1)\r\n cluster_2_nodes = list(cluster_2)\r\n temp = []\r\n\r\n for node in cluster_1_nodes:\r\n for n in graph.neighbors(node):\r\n if n in cluster_2_nodes:\r\n temp.append(n)\r\n\r\n return len(temp)\r\n\r\ndef count_intra_edges(graph, cluster):\r\n cluster_nodes = list(cluster)\r\n count = 0\r\n \r\n for node in cluster_nodes:\r\n temp = [n for n in graph.neighbors(node)]\r\n for i in temp:\r\n if i not in cluster_nodes:\r\n temp.remove(i)\r\n count += len(temp)\r\n \r\n return int(count / 2)\r\n\r\ndef intra_connection_density(graph, cluster):\r\n n = len(cluster)\r\n if n > 1:\r\n return (2 * count_intra_edges(graph, cluster)) / (n * (n - 1))\r\n elif n == 1 or n == 0:\r\n return 0\r\n\r\ndef inter_connection_density(graph, cluster_1, cluster_2):\r\n n_1 = len(cluster_1)\r\n n_2 = len(cluster_2)\r\n \r\n return count_inter_edges(graph, cluster_1, cluster_2) / (n_1 * n_2)\r\n\r\ndef coupling_degree(graph, cluster_1, cluster_2):\r\n return inter_connection_density(graph, cluster_1, cluster_2) / (intra_connection_density(graph, cluster_1) + intra_connection_density(graph, cluster_2) + 1)\r\n\r\ndef mc_modularity(graph, clusters):\r\n sum = 0\r\n i = 0\r\n n_clusters = len(clusters)\r\n indexes = itertools.combinations([n for n in range(n_clusters)], 2)\r\n \r\n if n_clusters > 1:\r\n for i in indexes:\r\n sum += coupling_degree(graph, clusters[i[0]], clusters[i[1]])\r\n return 1 - ((2 / (n_clusters * (n_clusters - 1))) * sum)\r\n elif n_clusters == 1:\r\n return 0\r\n\r\ndef find_community_louvain(graph):\r\n start = datetime.datetime.now()\r\n\r\n parts = community_louvain.best_partition(graph)\r\n values = [parts.get(node) for node in sorted(graph.nodes())]\r\n clusters = values\r\n n_clusters = len(np.unique(values))\r\n\r\n duration = datetime.datetime.now() - start\r\n\r\n print('Modularity Optimization Duration ------------------ ', duration)\r\n\r\n return clusters, n_clusters, parts\r\n\r\ndef find_community_gnmc(graph, k = None):\r\n comp = girvan_newman(graph)\r\n max_modularity = 0.0\r\n max_community = None\r\n modularity = 0.0\r\n \r\n if k != None:\r\n communities = []\r\n for community in itertools.islice(comp, k):\r\n communities.append(tuple(sorted(c) for c in community))\r\n else:\r\n communities = comp\r\n \r\n start = datetime.datetime.now()\r\n\r\n for community in communities:\r\n new = mc_modularity(graph, community)\r\n if abs(new - modularity) < sys.float_info.epsilon:\r\n break\r\n else:\r\n modularity = new\r\n if modularity >= max_modularity:\r\n max_modularity = modularity\r\n max_community = community\r\n \r\n duration = datetime.datetime.now() - start\r\n\r\n print('MC Modularity Duration ------------------ ', duration)\r\n \r\n parts = {}\r\n if max_community != None:\r\n n_clusters = len(max_community)\r\n clusters = []\r\n \r\n if n_clusters != 1:\r\n idx = 0\r\n if n_clusters > 1:\r\n for i in max_community:\r\n for j in i:\r\n parts[j] = idx\r\n idx += 1\r\n elif n_clusters == 1:\r\n clusters = [0 for i in range(len(max_community[0]))]\r\n\r\n clusters = [parts.get(node) for node in sorted(graph.nodes())]\r\n\r\n else:\r\n clusters = [0]\r\n n_clusters = 1\r\n parts = {0: 0}\r\n\r\n else:\r\n n_clusters = len(graph.nodes())\r\n clusters = [0 for n in range(n_clusters)]\r\n for i in graph.nodes():\r\n parts[i] = i\r\n\r\n return clusters, n_clusters, parts\r\n\r\ndef plot_graph_community(graph, clusters, style = 'spring'):\r\n if style == 'spring':\r\n nx.draw_spring(graph, cmap = plt.get_cmap('jet'), node_color = clusters, node_size = 35, with_labels = False)\r\n elif style == 'spectral':\r\n nx.draw_spectral(graph, cmap = plt.get_cmap('jet'), node_color = clusters, node_size = 35, with_labels = False)\r\n elif style == 'random':\r\n nx.draw_random(graph, cmap = plt.get_cmap('jet'), node_color = clusters, node_size = 35, with_labels = False)\r\n elif style == 'circular':\r\n nx.draw_circular(graph, cmap = plt.get_cmap('jet'), node_color = clusters, node_size = 35, with_labels = False)\r\n elif style == 'shell':\r\n nx.draw_shell(graph, cmap = plt.get_cmap('jet'), node_color = clusters, node_size = 35, with_labels = False)\r\n\r\ndef badly_connected(graph, node_cluster):\r\n # node_cluster = list of node in one cluster\r\n sub = graph.subgraph(node_cluster)\r\n return number_connected_components(sub) > 1\r\n\r\ndef count_badly_connected(graph, parts):\r\n # parts = {: }\r\n temp = pd.DataFrame.from_dict(parts, orient='index').reset_index().sort_values(by='index').reset_index(drop=True)\r\n temp.columns = ['node', 'cluster']\r\n cluster = list(temp['cluster'].unique())\r\n cnt = 0\r\n bad_cluster = []\r\n for i in cluster:\r\n if badly_connected(graph, list(temp[temp['cluster'] == i]['node'])):\r\n bad_cluster.append(i)\r\n cnt += 1\r\n return cnt","sub_path":"community_detection.py","file_name":"community_detection.py","file_ext":"py","file_size_in_byte":5661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"132062159","text":"import time\n\nclass ratRace:\n '''\n Rat race 1.1.2\n This game is a simulation of player's monthly income and expence chart. Player has to provide his salary, monthly expenses & liabilities (if any) along with its tenure during registration.\n Player will have option to spend the remaining balance of creating assets and/or other stuff\n Target of the player is to \n '''\n user=''\n salary=0\n balance=0\n month=1\n\n expenses=[['Education Loan',10000,60],\n ['Monthly Expence',5000,600],\n ['Room rent',5600,240]\n ]\n\n spendingChoices=[['Fixed Deposit - Monthly payout interest 0.5%'],\n ['Recurring Deposit - cumulative monthly interest of 0.3%'],\n ['Liquor - standard rate 700 Rs per bottle'],\n ['Purchase a 2 wheeler vehicle - 5k down payment and 3000 EMI for next 36 months'],\n ['Don\\'t want to spend yet']\n ]\n\n Assets=[]\n Collections=[]\n \n def __init__(self,u,s):\n self.user=u\n self.salary=s\n self.Collections.append(['Salary', s])\n print('Welcome to rate race, '+str(self.user))\n\n def passMonth(self):\n print('-----------------------\\n'+str(self.month)+'th Month\\n-----------------------')\n self.showCollections()\n #print('You salary has been creadited rs '+ str(self.salary)+' INR')\n #self.balance=self.balance+self.salary\n print('Updated balance:' + str(self.balance))\n paymentStatus=0\n\n print('Your expences are:')\n for idx, exp in enumerate(self.expenses):\n print(str(idx+1) + ' ' + str(exp[0])+ ' = Rs ' + str(exp[1]) + ' INR')\n\n print('Enter expence number you want to pay - ')\n \n dummyExpenses = self.expenses\n expStatus=['Unpaid' for i in range(len(dummyExpenses))]\n while(paymentStatus < len(dummyExpenses)+1):\n i=int(input())\n if i==1 and expStatus[0] == 'Unpaid':\n self.balance=self.balance-dummyExpenses[0][1]\n print('Paid Rs '+str(dummyExpenses[0][1])+'/- for '+str(dummyExpenses[0][0])+'. Updated Balance = '+str(self.balance))\n paymentStatus=paymentStatus+1\n expStatus[0]='Paid'\n elif i==2 and expStatus[1] == 'Unpaid':\n self.balance=self.balance-dummyExpenses[1][1]\n print('Paid Rs '+str(dummyExpenses[1][1])+'/- for '+str(dummyExpenses[1][0])+'. Updated Balance = '+str(self.balance))\n paymentStatus=paymentStatus+1\n expStatus[1]='Paid'\n elif i==3 and expStatus[2] == 'Unpaid':\n self.balance=self.balance-dummyExpenses[2][1]\n print('Paid Rs '+str(dummyExpenses[2][1])+'/- for '+str(dummyExpenses[2][0])+'. Updated Balance = '+str(self.balance))\n paymentStatus=paymentStatus+1\n expStatus[2]='Paid'\n elif i==4:\n if paymentStatus==len(dummyExpenses):\n print('Done all the payments for '+str(self.month)+'th Month.\\nRemaining balance = '+str(self.balance)+'\\n\\n')\n self.month=self.month+1\n break\n else:\n print('You have not paid all the bills for this month. Try again...')\n else:\n print('Either you provided Invalid input, or you selected an expense which was already paid. Please try again')\n \n def spendMoney(self):\n while(True):\n print('You can choose to spend your remaining balance (Rs '+str(self.balance)+' INR) on below items:\\n')\n for idx, spd in enumerate(self.spendingChoices):\n print( str(idx+1) + ' ' + str(spd[0]))\n print('\\nEnter option# you want to spend on - ')\n i=int(input())\n if i==1:\n print('Enter the amount you want to put in Fixed deposite:')\n amt=int(input())\n if amt0:\n self.spendMoney()\n else:\n print('Sorry, you ran out of balance!')\n break\n print()\n self.showAssets()\n time.sleep(3)\n print('You are leaving with total balance: '+str(self.balance))\n\n def showAssets(self):\n print('Hello '+str(self.user)+' you own below Assets:')\n for asset in self.Assets:\n print(str(asset[0]) + ' ' + str(asset[1]))\n\n def showCollections(self):\n print('Hello '+str(self.user)+' your onthly collections are:')\n for coll in self.Collections:\n if 'Fixed' in coll[0]:\n print('FD interest: ' + str((coll[1]*0.5)/100) + ' INR')\n self.balance=self.balance + float(((coll[1]*0.5)/100))\n if 'Salary' in coll[0]:\n print('Salary: '+ str(self.salary))\n self.balance=self.balance + self.salary\n\n#----------------------\nob=ratRace('Mr A',21410.5)\nob.showAssets()\nob.startRace(2)\n","sub_path":"ratRace.py","file_name":"ratRace.py","file_ext":"py","file_size_in_byte":6016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"101817013","text":"# Copyright (c) 2016-2022 Association of Universities for Research in Astronomy, Inc. (AURA)\n# For license information see LICENSE or https://opensource.org/licenses/BSD-3-Clause\n\nfrom pathlib import Path\n\nfrom definitions import ROOT_DIR\n\n# Checks the headers of all .py files to make sure that they contain the above Copyright message.\n\nHEADER = ('# Copyright (c) 2016-2022 Association of Universities for Research in Astronomy, Inc. (AURA)\\n'\n '# For license information see LICENSE or https://opensource.org/licenses/BSD-3-Clause')\n\n\ndef pre_adder(file):\n with open(file, 'r+') as f:\n content = f.read()\n f.seek(0, 0)\n f.write(HEADER + '\\n' + content)\n\n\nif __name__ == '__main__':\n COPYRIGHT = '# Copyright'\n\n bad_files = []\n for path in Path(ROOT_DIR).rglob('*.py'):\n with open(path, 'r') as f:\n if not f.readline().startswith(COPYRIGHT):\n bad_files.append(path)\n\n if bad_files:\n print('*** FILES MISSING COPYRIGHT ***')\n for bad_file in bad_files:\n print(bad_file)\n pre_adder(bad_file)\n exit(1)\n else:\n print('All files copyrighted.')\n exit(0)\n","sub_path":"scheduler/scripts/check_headers.py","file_name":"check_headers.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"501898031","text":"# coding:utf-8\n'''\nCreated on 2017/12/1.\n\n@author: chk01\n'''\nfrom class_four.week_four.NST.nst_utils import *\nimport tensorflow as tf\nimport scipy\n\nSTYLE_LAYERS = [\n ('conv1_1', 0.0),\n ('conv2_1', 0.1),\n ('conv3_1', 0.1),\n ('conv4_1', 0.3),\n ('conv5_1', 0.5)]\n\n\ndef gram_matrix(A):\n return tf.matmul(A, tf.transpose(A))\n\n\ndef compute_layer_style_cost(a_S, a_G):\n m, n_H, n_W, n_C = a_G.get_shape().as_list()\n\n a_S = tf.reshape(tf.transpose(a_S), shape=[n_C, -1])\n a_G = tf.reshape(tf.transpose(a_G), shape=[n_C, -1])\n GS = gram_matrix(a_S)\n GG = gram_matrix(a_G)\n\n J_style_layer = 1 / (4 * n_H * n_H * n_W * n_W * n_C * n_C) * tf.reduce_sum(tf.square(tf.subtract(GS, GG)))\n\n return J_style_layer\n\n\ndef compute_content_cost(a_C, a_G):\n m, n_H, n_W, n_C = a_G.get_shape().as_list()\n a_C_unrolled = tf.reshape(a_C, shape=[n_C, -1])\n a_G_unrolled = tf.reshape(a_G, shape=[n_C, -1])\n J_content = 1 / (4 * n_C * n_H * n_W) * tf.reduce_sum(tf.square(tf.subtract(a_C_unrolled, a_G_unrolled)))\n\n return J_content\n\n\ndef compute_style_cost(model, STYLE_LAYERS):\n J_style = 0\n for layer_name, coeff in STYLE_LAYERS:\n out = model[layer_name]\n a_S = sess.run(out)\n a_G = out\n J_style_layer = compute_layer_style_cost(a_S, a_G)\n J_style += coeff * J_style_layer\n\n return J_style\n\n\ndef total_cost(J_content, J_style, alpha=10, beta=40):\n return alpha * J_content + beta * J_style\n\n\ntf.reset_default_graph()\n\n# Start interactive session\nsess = tf.InteractiveSession()\ncontent_image = scipy.misc.imread(\"images/1.jpg\")\ncontent_image = reshape_and_normalize_image(content_image)\nstyle_image = scipy.misc.imread(\"images/starry_night.jpg\")\nstyle_image = reshape_and_normalize_image(style_image)\ngenerated_image = generate_noise_image(content_image)\n\nmodel = load_vgg_model(\"pretrained-model/imagenet-vgg-verydeep-19.mat\")\nsess.run(model['input'].assign(content_image))\n\nout = model['conv4_2']\na_C = sess.run(out)\na_G = out\nJ_content = compute_content_cost(a_C, a_G)\n\nprint(model['input'])\nsess.run(model['input'].assign(style_image))\n\n# Compute the style cost\nJ_style = compute_style_cost(model, STYLE_LAYERS)\nJ = total_cost(J_content, J_style)\n# define optimizer (1 line)\noptimizer = tf.train.AdamOptimizer(0.5)\n\n# define train_step (1 line)\ntrain_step = optimizer.minimize(J)\n\n\ndef model_nn(sess, input_image, num_iterations=200):\n tf.global_variables_initializer().run()\n sess.run(model['input'].assign(input_image))\n\n for i in range(num_iterations):\n\n sess.run(train_step)\n\n generated_image = sess.run(model['input'])\n\n if i % 20 == 0:\n Jt, Jc, Js = sess.run([J, J_content, J_style])\n print(\"Iteration \" + str(i) + \" :\")\n print(\"total cost = \" + str(Jt))\n print(\"content cost = \" + str(Jc))\n print(\"style cost = \" + str(Js))\n\n save_image(\"output/\" + str(i) + \".png\", generated_image)\n\n save_image('output/generated_image.jpg', generated_image)\n\n return generated_image\n\n\nmodel_nn(sess, generated_image, num_iterations=3000)\n","sub_path":"Andrew_NG_learning/class_four/week_four/NST/Dxq_2.py","file_name":"Dxq_2.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"473480700","text":"import pytest\nfrom selenium import webdriver\n\ndef pytest_addoption(parser):\n parser.addoption(\"--driverName\", action=\"store\", default=\"Chrome\")\n\n@pytest.fixture(scope=\"class\")\ndef setup(request):\n driverName = request.config.getoption(\"--driverName\")\n if driverName == \"Chrome\":\n driver = webdriver.Chrome(executable_path=\"C:\\\\chromedriver.exe\")\n elif driverName == \"Firefox\":\n driver = webdriver.Firefox(executable_path=\"C:\\\\geckodriver.exe\")\n elif driverName == \"IE\":\n driver = webdriver.Ie(executable_path=\"C:\\\\IEDriverServer.exe\")\n request.cls.driver = driver\n yield\n driver.close()","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"384971270","text":"import urllib.request, urllib.parse, urllib.error\r\nfrom bs4 import BeautifulSoup\r\n\r\nurl = input(\"Enter Url: \")\r\ncount = 7\r\nposition = 18\r\nfor i in range(count):\r\n html = urllib.request.urlopen(url).read()\r\n soup = BeautifulSoup(html)\r\n\r\n tags = soup(\"a\")\r\n s = list() \r\n t = list()\r\n for tag in tags:\r\n x = tag.get(\"href\", None)\r\n s.append(x)\r\n y = tag.text\r\n t.append(y)\r\n\r\n print(s[position - 1])\r\n print(t[position - 1])\r\n url = s[position - 1]\r\n","sub_path":"beautifulsoup.py","file_name":"beautifulsoup.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"56878045","text":"from __future__ import print_function\nimport sys\nfrom os import path, makedirs\n\nsys.path.append(\".\")\nsys.path.append(\"..\")\n\nimport argparse\nfrom copy import deepcopy\nimport json\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom collections import namedtuple\nfrom utils.io_ import seeds, Writer, get_logger, prepare_data, rearrange_splits\nfrom utils.models.parsing_gating import BiAffine_Parser_Gated\nfrom utils import load_word_embeddings\nfrom utils.tasks import parse\nimport time\nfrom torch.nn.utils import clip_grad_norm_\nfrom torch.optim import Adam, SGD\nimport uuid\n\nuid = uuid.uuid4().hex[:6]\n\nlogger = get_logger('GraphParser')\n\ndef read_arguments():\n args_ = argparse.ArgumentParser(description='Sovling GraphParser')\n args_.add_argument('--dataset', choices=['ontonotes', 'ud'], help='Dataset', required=True)\n args_.add_argument('--domain', help='domain/language', required=True)\n args_.add_argument('--rnn_mode', choices=['RNN', 'LSTM', 'GRU'], help='architecture of rnn',\n required=True)\n args_.add_argument('--gating',action='store_true', help='use gated mechanism')\n args_.add_argument('--num_gates', type=int, default=0, help='number of gates for gating mechanism')\n args_.add_argument('--num_epochs', type=int, default=200, help='Number of training epochs')\n args_.add_argument('--batch_size', type=int, default=64, help='Number of sentences in each batch')\n args_.add_argument('--hidden_size', type=int, default=256, help='Number of hidden units in RNN')\n args_.add_argument('--arc_space', type=int, default=128, help='Dimension of tag space')\n args_.add_argument('--arc_tag_space', type=int, default=128, help='Dimension of tag space')\n args_.add_argument('--num_layers', type=int, default=1, help='Number of layers of RNN')\n args_.add_argument('--num_filters', type=int, default=50, help='Number of filters in CNN')\n args_.add_argument('--kernel_size', type=int, default=3, help='Size of Kernel for CNN')\n args_.add_argument('--use_pos', action='store_true', help='use part-of-speech embedding.')\n args_.add_argument('--use_char', action='store_true', help='use character embedding and CNN.')\n args_.add_argument('--word_dim', type=int, default=300, help='Dimension of word embeddings')\n args_.add_argument('--pos_dim', type=int, default=50, help='Dimension of POS embeddings')\n args_.add_argument('--char_dim', type=int, default=50, help='Dimension of Character embeddings')\n args_.add_argument('--initializer', choices=['xavier'], help='initialize model parameters')\n args_.add_argument('--opt', choices=['adam', 'sgd'], help='optimization algorithm')\n args_.add_argument('--momentum', type=float, default=0.9, help='momentum of optimizer')\n args_.add_argument('--betas', nargs=2, type=float, default=[0.9, 0.9], help='betas of optimizer')\n args_.add_argument('--learning_rate', type=float, default=0.01, help='Learning rate')\n args_.add_argument('--decay_rate', type=float, default=0.05, help='Decay rate of learning rate')\n args_.add_argument('--schedule', type=int, help='schedule for learning rate decay')\n args_.add_argument('--clip', type=float, default=5.0, help='gradient clipping')\n args_.add_argument('--gamma', type=float, default=0.0, help='weight for regularization')\n args_.add_argument('--epsilon', type=float, default=1e-8, help='epsilon for adam')\n args_.add_argument('--p_rnn', nargs=2, type=float, required=True, help='dropout rate for RNN')\n args_.add_argument('--p_in', type=float, default=0.33, help='dropout rate for input embeddings')\n args_.add_argument('--p_out', type=float, default=0.33, help='dropout rate for output layer')\n args_.add_argument('--arc_decode', choices=['mst', 'greedy'], help='arc decoding algorithm', required=True)\n args_.add_argument('--unk_replace', type=float, default=0.,\n help='The rate to replace a singleton word with UNK')\n args_.add_argument('--punct_set', nargs='+', type=str, help='List of punctuations')\n args_.add_argument('--word_embedding', choices=['random', 'glove', 'fasttext', 'word2vec'],\n help='Embedding for words')\n args_.add_argument('--word_path', help='path for word embedding dict - in case word_embedding is not random')\n args_.add_argument('--freeze_word_embeddings', action='store_true', help='frozen the word embedding (disable fine-tuning).')\n args_.add_argument('--freeze_sequence_taggers', action='store_true', help='frozen the BiLSTMs of the pre-trained taggers.')\n args_.add_argument('--char_embedding', choices=['random','hellwig'], help='Embedding for characters',\n required=True)\n args_.add_argument('--pos_embedding', choices=['random','one_hot'], help='Embedding for pos',\n required=True)\n args_.add_argument('--char_path', help='path for character embedding dict')\n args_.add_argument('--pos_path', help='path for pos embedding dict')\n args_.add_argument('--set_num_training_samples', type=int, help='downsampling training set to a fixed number of samples')\n args_.add_argument('--model_path', help='path for saving model file.', required=True)\n args_.add_argument('--load_path', help='path for loading saved source model file.', default=None)\n args_.add_argument('--load_sequence_taggers_paths', nargs='+', help='path for loading saved sequence_tagger saved_models files.', default=None)\n args_.add_argument('--strict',action='store_true', help='if True loaded model state should contin '\n 'exactly the same keys as current model')\n args_.add_argument('--eval_mode', action='store_true', help='evaluating model without training it')\n args = args_.parse_args()\n args_dict = {}\n args_dict['dataset'] = args.dataset\n args_dict['domain'] = args.domain\n args_dict['rnn_mode'] = args.rnn_mode\n args_dict['gating'] = args.gating\n args_dict['num_gates'] = args.num_gates\n args_dict['arc_decode'] = args.arc_decode\n # args_dict['splits'] = ['train', 'dev', 'test']\n args_dict['splits'] = ['train', 'dev', 'test','poetry','prose']\n args_dict['model_path'] = args.model_path\n if not path.exists(args_dict['model_path']):\n makedirs(args_dict['model_path'])\n args_dict['data_paths'] = {}\n if args_dict['dataset'] == 'ontonotes':\n data_path = 'data/onto_pos_ner_dp'\n else:\n data_path = 'data/ud_pos_ner_dp'\n for split in args_dict['splits']:\n args_dict['data_paths'][split] = data_path + '_' + split + '_' + args_dict['domain']\n ################################### \n args_dict['data_paths']['poetry'] = 'data/Shishu_300' + '_' + 'poetry' + '_' + args_dict['domain']\n args_dict['data_paths']['prose'] = 'data/Shishu_300' + '_' + 'prose' + '_' + args_dict['domain']\n ###################################\n args_dict['alphabet_data_paths'] = {}\n for split in args_dict['splits']:\n if args_dict['dataset'] == 'ontonotes':\n args_dict['alphabet_data_paths'][split] = data_path + '_' + split + '_' + 'all'\n else:\n if '_' in args_dict['domain']:\n args_dict['alphabet_data_paths'][split] = data_path + '_' + split + '_' + args_dict['domain'].split('_')[0]\n else:\n args_dict['alphabet_data_paths'][split] = args_dict['data_paths'][split]\n args_dict['model_name'] = 'domain_' + args_dict['domain']\n args_dict['full_model_name'] = path.join(args_dict['model_path'],args_dict['model_name'])\n args_dict['load_path'] = args.load_path\n args_dict['load_sequence_taggers_paths'] = args.load_sequence_taggers_paths\n if args_dict['load_sequence_taggers_paths'] is not None:\n args_dict['gating'] = True\n args_dict['num_gates'] = len(args_dict['load_sequence_taggers_paths']) + 1\n else:\n if not args_dict['gating']:\n args_dict['num_gates'] = 0\n args_dict['strict'] = args.strict\n args_dict['num_epochs'] = args.num_epochs\n args_dict['batch_size'] = args.batch_size\n args_dict['hidden_size'] = args.hidden_size\n args_dict['arc_space'] = args.arc_space\n args_dict['arc_tag_space'] = args.arc_tag_space\n args_dict['num_layers'] = args.num_layers\n args_dict['num_filters'] = args.num_filters\n args_dict['kernel_size'] = args.kernel_size\n args_dict['learning_rate'] = args.learning_rate\n args_dict['initializer'] = nn.init.xavier_uniform_ if args.initializer == 'xavier' else None\n args_dict['opt'] = args.opt\n args_dict['momentum'] = args.momentum\n args_dict['betas'] = tuple(args.betas)\n args_dict['epsilon'] = args.epsilon\n args_dict['decay_rate'] = args.decay_rate\n args_dict['clip'] = args.clip\n args_dict['gamma'] = args.gamma\n args_dict['schedule'] = args.schedule\n args_dict['p_rnn'] = tuple(args.p_rnn)\n args_dict['p_in'] = args.p_in\n args_dict['p_out'] = args.p_out\n args_dict['unk_replace'] = args.unk_replace\n args_dict['set_num_training_samples'] = args.set_num_training_samples\n args_dict['punct_set'] = None\n if args.punct_set is not None:\n args_dict['punct_set'] = set(args.punct_set)\n logger.info(\"punctuations(%d): %s\" % (len(args_dict['punct_set']), ' '.join(args_dict['punct_set'])))\n args_dict['freeze_word_embeddings'] = args.freeze_word_embeddings\n args_dict['freeze_sequence_taggers'] = args.freeze_sequence_taggers\n args_dict['word_embedding'] = args.word_embedding\n args_dict['word_path'] = args.word_path\n args_dict['use_char'] = args.use_char\n args_dict['char_embedding'] = args.char_embedding\n args_dict['char_path'] = args.char_path\n args_dict['pos_embedding'] = args.pos_embedding\n args_dict['pos_path'] = args.pos_path\n args_dict['use_pos'] = args.use_pos\n args_dict['pos_dim'] = args.pos_dim\n args_dict['word_dict'] = None\n args_dict['word_dim'] = args.word_dim\n if args_dict['word_embedding'] != 'random' and args_dict['word_path']:\n args_dict['word_dict'], args_dict['word_dim'] = load_word_embeddings.load_embedding_dict(args_dict['word_embedding'],\n args_dict['word_path'])\n args_dict['char_dict'] = None\n args_dict['char_dim'] = args.char_dim\n if args_dict['char_embedding'] != 'random':\n args_dict['char_dict'], args_dict['char_dim'] = load_word_embeddings.load_embedding_dict(args_dict['char_embedding'],\n args_dict['char_path'])\n args_dict['pos_dict'] = None\n if args_dict['pos_embedding'] != 'random':\n args_dict['pos_dict'], args_dict['pos_dim'] = load_word_embeddings.load_embedding_dict(args_dict['pos_embedding'],\n args_dict['pos_path'])\n args_dict['alphabet_path'] = path.join(args_dict['model_path'], 'alphabets' + '_src_domain_' + args_dict['domain'] + '/')\n args_dict['model_name'] = path.join(args_dict['model_path'], args_dict['model_name'])\n args_dict['eval_mode'] = args.eval_mode\n args_dict['device'] = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n args_dict['word_status'] = 'frozen' if args.freeze_word_embeddings else 'fine tune'\n args_dict['char_status'] = 'enabled' if args.use_char else 'disabled'\n args_dict['pos_status'] = 'enabled' if args.use_pos else 'disabled'\n logger.info(\"Saving arguments to file\")\n save_args(args, args_dict['full_model_name'])\n logger.info(\"Creating Alphabets\")\n alphabet_dict = creating_alphabets(args_dict['alphabet_path'], args_dict['alphabet_data_paths'], args_dict['word_dict'])\n args_dict = {**args_dict, **alphabet_dict}\n ARGS = namedtuple('ARGS', args_dict.keys())\n my_args = ARGS(**args_dict)\n return my_args\n\n\ndef creating_alphabets(alphabet_path, alphabet_data_paths, word_dict):\n train_paths = alphabet_data_paths['train']\n extra_paths = [v for k,v in alphabet_data_paths.items() if k != 'train']\n alphabet_dict = {}\n alphabet_dict['alphabets'] = prepare_data.create_alphabets(alphabet_path,\n train_paths,\n extra_paths=extra_paths,\n max_vocabulary_size=100000,\n embedd_dict=word_dict)\n for k, v in alphabet_dict['alphabets'].items():\n num_key = 'num_' + k.split('_')[0]\n alphabet_dict[num_key] = v.size()\n logger.info(\"%s : %d\" % (num_key, alphabet_dict[num_key]))\n return alphabet_dict\n\ndef construct_embedding_table(alphabet, tokens_dict, dim, token_type='word'):\n if tokens_dict is None:\n return None\n scale = np.sqrt(3.0 / dim)\n table = np.empty([alphabet.size(), dim], dtype=np.float32)\n table[prepare_data.UNK_ID, :] = np.random.uniform(-scale, scale, [1, dim]).astype(np.float32)\n oov_tokens = 0\n for token, index in alphabet.items():\n if token in tokens_dict:\n embedding = tokens_dict[token]\n elif token.lower() in tokens_dict:\n embedding = tokens_dict[token.lower()]\n else:\n embedding = np.random.uniform(-scale, scale, [1, dim]).astype(np.float32)\n oov_tokens += 1\n table[index, :] = embedding\n print('token type : %s, number of oov: %d' % (token_type, oov_tokens))\n table = torch.from_numpy(table)\n return table\n\ndef save_args(args, full_model_name):\n arg_path = full_model_name + '.arg.json'\n argparse_dict = vars(args)\n with open(arg_path, 'w') as f:\n json.dump(argparse_dict, f)\n\ndef generate_optimizer(args, lr, params):\n params = filter(lambda param: param.requires_grad, params)\n if args.opt == 'adam':\n return Adam(params, lr=lr, betas=args.betas, weight_decay=args.gamma, eps=args.epsilon)\n elif args.opt == 'sgd':\n return SGD(params, lr=lr, momentum=args.momentum, weight_decay=args.gamma, nesterov=True)\n else:\n raise ValueError('Unknown optimization algorithm: %s' % args.opt)\n\n\ndef save_checkpoint(model, optimizer, opt, dev_eval_dict, test_eval_dict, full_model_name):\n path_name = full_model_name + '.pt'\n print('Saving model to: %s' % path_name)\n state = {'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'opt': opt,\n 'dev_eval_dict': dev_eval_dict,\n 'test_eval_dict': test_eval_dict}\n torch.save(state, path_name)\n\n\ndef load_checkpoint(args, model, optimizer, dev_eval_dict, test_eval_dict, start_epoch, load_path, strict=True):\n print('Loading saved model from: %s' % load_path)\n checkpoint = torch.load(load_path, map_location=args.device)\n if checkpoint['opt'] != args.opt:\n raise ValueError('loaded optimizer type is: %s instead of: %s' % (checkpoint['opt'], args.opt))\n model.load_state_dict(checkpoint['model_state_dict'], strict=strict)\n\n if strict:\n generate_optimizer(args, args.learning_rate, model.parameters())\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n for state in optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.to(args.device)\n dev_eval_dict = checkpoint['dev_eval_dict']\n test_eval_dict = checkpoint['test_eval_dict']\n start_epoch = dev_eval_dict['in_domain']['epoch']\n return model, optimizer, dev_eval_dict, test_eval_dict, start_epoch\n\n\ndef build_model_and_optimizer(args):\n word_table = construct_embedding_table(args.alphabets['word_alphabet'], args.word_dict, args.word_dim, token_type='word')\n char_table = construct_embedding_table(args.alphabets['char_alphabet'], args.char_dict, args.char_dim, token_type='char')\n pos_table = construct_embedding_table(args.alphabets['pos_alphabet'], args.pos_dict, args.pos_dim, token_type='pos')\n model = BiAffine_Parser_Gated(args.word_dim, args.num_word, args.char_dim, args.num_char,\n args.use_pos, args.use_char, args.pos_dim, args.num_pos,\n args.num_filters, args.kernel_size, args.rnn_mode,\n args.hidden_size, args.num_layers, args.num_arc,\n args.arc_space, args.arc_tag_space, args.num_gates,\n embedd_word=word_table, embedd_char=char_table, embedd_pos=pos_table,\n p_in=args.p_in, p_out=args.p_out, p_rnn=args.p_rnn,\n biaffine=True, arc_decode=args.arc_decode, initializer=args.initializer)\n print(model)\n optimizer = generate_optimizer(args, args.learning_rate, model.parameters())\n start_epoch = 0\n dev_eval_dict = {'in_domain': initialize_eval_dict()}\n test_eval_dict = {'in_domain': initialize_eval_dict()}\n if args.load_path:\n model, optimizer, dev_eval_dict, test_eval_dict, start_epoch = \\\n load_checkpoint(args, model, optimizer,\n dev_eval_dict, test_eval_dict,\n start_epoch, args.load_path, strict=args.strict)\n if args.load_sequence_taggers_paths:\n pretrained_dict = {}\n model_dict = model.state_dict()\n for idx, path in enumerate(args.load_sequence_taggers_paths):\n print('Loading saved sequence_tagger from: %s' % path)\n checkpoint = torch.load(path, map_location=args.device)\n for k, v in checkpoint['model_state_dict'].items():\n if 'rnn_encoder.' in k:\n pretrained_dict['extra_rnn_encoders.' + str(idx) + '.' + k.replace('rnn_encoder.', '')] = v\n model_dict.update(pretrained_dict)\n model.load_state_dict(model_dict)\n if args.freeze_sequence_taggers:\n print('Freezing Classifiers')\n for name, parameter in model.named_parameters():\n if 'extra_rnn_encoders' in name:\n parameter.requires_grad = False\n if args.freeze_word_embeddings:\n model.rnn_encoder.word_embedd.weight.requires_grad = False\n # model.rnn_encoder.char_embedd.weight.requires_grad = False\n # model.rnn_encoder.pos_embedd.weight.requires_grad = False\n device = args.device\n model.to(device)\n return model, optimizer, dev_eval_dict, test_eval_dict, start_epoch\n\n\ndef initialize_eval_dict():\n eval_dict = {}\n eval_dict['dp_uas'] = 0.0\n eval_dict['dp_las'] = 0.0\n eval_dict['epoch'] = 0\n eval_dict['dp_ucorrect'] = 0.0\n eval_dict['dp_lcorrect'] = 0.0\n eval_dict['dp_total'] = 0.0\n eval_dict['dp_ucomplete_match'] = 0.0\n eval_dict['dp_lcomplete_match'] = 0.0\n eval_dict['dp_ucorrect_nopunc'] = 0.0\n eval_dict['dp_lcorrect_nopunc'] = 0.0\n eval_dict['dp_total_nopunc'] = 0.0\n eval_dict['dp_ucomplete_match_nopunc'] = 0.0\n eval_dict['dp_lcomplete_match_nopunc'] = 0.0\n eval_dict['dp_root_correct'] = 0.0\n eval_dict['dp_total_root'] = 0.0\n eval_dict['dp_total_inst'] = 0.0\n eval_dict['dp_total'] = 0.0\n eval_dict['dp_total_inst'] = 0.0\n eval_dict['dp_total_nopunc'] = 0.0\n eval_dict['dp_total_root'] = 0.0\n return eval_dict\n\ndef in_domain_evaluation(args, datasets, model, optimizer, dev_eval_dict, test_eval_dict, epoch,\n best_model, best_optimizer, patient):\n # In-domain evaluation\n curr_dev_eval_dict = evaluation(args, datasets['dev'], 'dev', model, args.domain, epoch, 'current_results')\n is_best_in_domain = dev_eval_dict['in_domain']['dp_lcorrect_nopunc'] <= curr_dev_eval_dict['dp_lcorrect_nopunc'] or \\\n (dev_eval_dict['in_domain']['dp_lcorrect_nopunc'] == curr_dev_eval_dict['dp_lcorrect_nopunc'] and\n dev_eval_dict['in_domain']['dp_ucorrect_nopunc'] <= curr_dev_eval_dict['dp_ucorrect_nopunc'])\n\n if is_best_in_domain:\n for key, value in curr_dev_eval_dict.items():\n dev_eval_dict['in_domain'][key] = value\n curr_test_eval_dict = evaluation(args, datasets['test'], 'test', model, args.domain, epoch, 'current_results')\n for key, value in curr_test_eval_dict.items():\n test_eval_dict['in_domain'][key] = value\n best_model = deepcopy(model)\n best_optimizer = deepcopy(optimizer)\n patient = 0\n else:\n patient += 1\n if epoch == args.num_epochs:\n # save in-domain checkpoint\n if args.set_num_training_samples is not None:\n splits_to_write = datasets.keys()\n else:\n splits_to_write = ['dev', 'test']\n for split in splits_to_write:\n if split == 'dev':\n eval_dict = dev_eval_dict['in_domain']\n elif split == 'test':\n eval_dict = test_eval_dict['in_domain']\n else:\n eval_dict = None\n write_results(args, datasets[split], args.domain, split, best_model, args.domain, eval_dict)\n print(\"Saving best model\")\n save_checkpoint(best_model, best_optimizer, args.opt, dev_eval_dict, test_eval_dict, args.full_model_name)\n\n print('\\n')\n return dev_eval_dict, test_eval_dict, best_model, best_optimizer, patient\n\n\ndef evaluation(args, data, split, model, domain, epoch, str_res='results'):\n # evaluate performance on data\n model.eval()\n\n eval_dict = initialize_eval_dict()\n eval_dict['epoch'] = epoch\n for batch in prepare_data.iterate_batch(data, args.batch_size, args.device):\n word, char, pos, ner, heads, arc_tags, auto_label, masks, lengths = batch\n out_arc, out_arc_tag, masks, lengths = model.forward(word, char, pos, mask=masks, length=lengths)\n heads_pred, arc_tags_pred, _ = model.decode(out_arc, out_arc_tag, mask=masks, length=lengths,\n leading_symbolic=prepare_data.NUM_SYMBOLIC_TAGS)\n lengths = lengths.cpu().numpy()\n word = word.data.cpu().numpy()\n pos = pos.data.cpu().numpy()\n ner = ner.data.cpu().numpy()\n heads = heads.data.cpu().numpy()\n arc_tags = arc_tags.data.cpu().numpy()\n heads_pred = heads_pred.data.cpu().numpy()\n arc_tags_pred = arc_tags_pred.data.cpu().numpy()\n stats, stats_nopunc, stats_root, num_inst = parse.eval_(word, pos, heads_pred, arc_tags_pred, heads,\n arc_tags, args.alphabets['word_alphabet'], args.alphabets['pos_alphabet'],\n lengths, punct_set=args.punct_set, symbolic_root=True)\n ucorr, lcorr, total, ucm, lcm = stats\n ucorr_nopunc, lcorr_nopunc, total_nopunc, ucm_nopunc, lcm_nopunc = stats_nopunc\n corr_root, total_root = stats_root\n eval_dict['dp_ucorrect'] += ucorr\n eval_dict['dp_lcorrect'] += lcorr\n eval_dict['dp_total'] += total\n eval_dict['dp_ucomplete_match'] += ucm\n eval_dict['dp_lcomplete_match'] += lcm\n eval_dict['dp_ucorrect_nopunc'] += ucorr_nopunc\n eval_dict['dp_lcorrect_nopunc'] += lcorr_nopunc\n eval_dict['dp_total_nopunc'] += total_nopunc\n eval_dict['dp_ucomplete_match_nopunc'] += ucm_nopunc\n eval_dict['dp_lcomplete_match_nopunc'] += lcm_nopunc\n eval_dict['dp_root_correct'] += corr_root\n eval_dict['dp_total_root'] += total_root\n eval_dict['dp_total_inst'] += num_inst\n\n eval_dict['dp_uas'] = eval_dict['dp_ucorrect'] * 100 / eval_dict['dp_total'] # considering w. punctuation\n eval_dict['dp_las'] = eval_dict['dp_lcorrect'] * 100 / eval_dict['dp_total'] # considering w. punctuation\n print_results(eval_dict, split, domain, str_res)\n return eval_dict\n\n\ndef print_results(eval_dict, split, domain, str_res='results'):\n print('----------------------------------------------------------------------------------------------------------------------------')\n print('Testing model on domain %s' % domain)\n print('--------------- Dependency Parsing - %s ---------------' % split)\n print(\n str_res + ' on ' + split + ' W. Punct: ucorr: %d, lcorr: %d, total: %d, uas: %.2f%%, las: %.2f%%, ucm: %.2f%%, lcm: %.2f%% (epoch: %d)' % (\n eval_dict['dp_ucorrect'], eval_dict['dp_lcorrect'], eval_dict['dp_total'],\n eval_dict['dp_ucorrect'] * 100 / eval_dict['dp_total'],\n eval_dict['dp_lcorrect'] * 100 / eval_dict['dp_total'],\n eval_dict['dp_ucomplete_match'] * 100 / eval_dict['dp_total_inst'],\n eval_dict['dp_lcomplete_match'] * 100 / eval_dict['dp_total_inst'],\n eval_dict['epoch']))\n print(\n str_res + ' on ' + split + ' Wo Punct: ucorr: %d, lcorr: %d, total: %d, uas: %.2f%%, las: %.2f%%, ucm: %.2f%%, lcm: %.2f%% (epoch: %d)' % (\n eval_dict['dp_ucorrect_nopunc'], eval_dict['dp_lcorrect_nopunc'], eval_dict['dp_total_nopunc'],\n eval_dict['dp_ucorrect_nopunc'] * 100 / eval_dict['dp_total_nopunc'],\n eval_dict['dp_lcorrect_nopunc'] * 100 / eval_dict['dp_total_nopunc'],\n eval_dict['dp_ucomplete_match_nopunc'] * 100 / eval_dict['dp_total_inst'],\n eval_dict['dp_lcomplete_match_nopunc'] * 100 / eval_dict['dp_total_inst'],\n eval_dict['epoch']))\n print(str_res + ' on ' + split + ' Root: corr: %d, total: %d, acc: %.2f%% (epoch: %d)' % (\n eval_dict['dp_root_correct'], eval_dict['dp_total_root'],\n eval_dict['dp_root_correct'] * 100 / eval_dict['dp_total_root'], eval_dict['epoch']))\n print('\\n')\n\ndef write_results(args, data, data_domain, split, model, model_domain, eval_dict):\n str_file = args.full_model_name + '_' + split + '_model_domain_' + model_domain + '_data_domain_' + data_domain\n res_filename = str_file + '_res.txt'\n pred_filename = str_file + '_pred.txt'\n gold_filename = str_file + '_gold.txt'\n if eval_dict is not None:\n # save results dictionary into a file\n with open(res_filename, 'w') as f:\n json.dump(eval_dict, f)\n\n # save predictions and gold labels into files\n pred_writer = Writer(args.alphabets)\n gold_writer = Writer(args.alphabets)\n pred_writer.start(pred_filename)\n gold_writer.start(gold_filename)\n for batch in prepare_data.iterate_batch(data, args.batch_size, args.device):\n word, char, pos, ner, heads, arc_tags, auto_label, masks, lengths = batch\n out_arc, out_arc_tag, masks, lengths = model.forward(word, char, pos, mask=masks, length=lengths)\n heads_pred, arc_tags_pred, _ = model.decode(out_arc, out_arc_tag, mask=masks, length=lengths,\n leading_symbolic=prepare_data.NUM_SYMBOLIC_TAGS)\n lengths = lengths.cpu().numpy()\n word = word.data.cpu().numpy()\n pos = pos.data.cpu().numpy()\n ner = ner.data.cpu().numpy()\n heads = heads.data.cpu().numpy()\n arc_tags = arc_tags.data.cpu().numpy()\n heads_pred = heads_pred.data.cpu().numpy()\n arc_tags_pred = arc_tags_pred.data.cpu().numpy()\n # writing predictions\n pred_writer.write(word, pos, ner, heads_pred, arc_tags_pred, lengths, symbolic_root=True)\n # writing gold labels\n gold_writer.write(word, pos, ner, heads, arc_tags, lengths, symbolic_root=True)\n\n pred_writer.close()\n gold_writer.close()\n\ndef main():\n logger.info(\"Reading and creating arguments\")\n args = read_arguments()\n logger.info(\"Reading Data\")\n datasets = {}\n for split in args.splits:\n print(\"Splits are:\",split)\n dataset = prepare_data.read_data_to_variable(args.data_paths[split], args.alphabets, args.device,\n symbolic_root=True)\n datasets[split] = dataset\n if args.set_num_training_samples is not None:\n print('Setting train and dev to %d samples' % args.set_num_training_samples)\n datasets = rearrange_splits.rearranging_splits(datasets, args.set_num_training_samples)\n logger.info(\"Creating Networks\")\n num_data = sum(datasets['train'][1])\n model, optimizer, dev_eval_dict, test_eval_dict, start_epoch = build_model_and_optimizer(args)\n best_model = deepcopy(model)\n best_optimizer = deepcopy(optimizer)\n\n logger.info('Training INFO of in domain %s' % args.domain)\n logger.info('Training on Dependecy Parsing')\n logger.info(\"train: gamma: %f, batch: %d, clip: %.2f, unk replace: %.2f\" % (args.gamma, args.batch_size, args.clip, args.unk_replace))\n logger.info('number of training samples for %s is: %d' % (args.domain, num_data))\n logger.info(\"dropout(in, out, rnn): (%.2f, %.2f, %s)\" % (args.p_in, args.p_out, args.p_rnn))\n logger.info(\"num_epochs: %d\" % (args.num_epochs))\n print('\\n')\n\n if not args.eval_mode:\n logger.info(\"Training\")\n num_batches = prepare_data.calc_num_batches(datasets['train'], args.batch_size)\n lr = args.learning_rate\n patient = 0\n decay = 0\n for epoch in range(start_epoch + 1, args.num_epochs + 1):\n print('Epoch %d (Training: rnn mode: %s, optimizer: %s, learning rate=%.6f, eps=%.1e, decay rate=%.2f (schedule=%d, decay=%d)): ' % (\n epoch, args.rnn_mode, args.opt, lr, args.epsilon, args.decay_rate, args.schedule, decay))\n model.train()\n total_loss = 0.0\n total_arc_loss = 0.0\n total_arc_tag_loss = 0.0\n total_train_inst = 0.0\n\n train_iter = prepare_data.iterate_batch_rand_bucket_choosing(\n datasets['train'], args.batch_size, args.device, unk_replace=args.unk_replace)\n start_time = time.time()\n batch_num = 0\n for batch_num, batch in enumerate(train_iter):\n batch_num = batch_num + 1\n optimizer.zero_grad()\n # compute loss of main task\n word, char, pos, ner_tags, heads, arc_tags, auto_label, masks, lengths = batch\n out_arc, out_arc_tag, masks, lengths = model.forward(word, char, pos, mask=masks, length=lengths)\n loss_arc, loss_arc_tag = model.loss(out_arc, out_arc_tag, heads, arc_tags, mask=masks, length=lengths)\n loss = loss_arc + loss_arc_tag\n\n # update losses\n num_insts = masks.data.sum() - word.size(0)\n total_arc_loss += loss_arc.item() * num_insts\n total_arc_tag_loss += loss_arc_tag.item() * num_insts\n total_loss += loss.item() * num_insts\n total_train_inst += num_insts\n # optimize parameters\n loss.backward()\n clip_grad_norm_(model.parameters(), args.clip)\n optimizer.step()\n\n time_ave = (time.time() - start_time) / batch_num\n time_left = (num_batches - batch_num) * time_ave\n\n # update log\n if batch_num % 50 == 0:\n log_info = 'train: %d/%d, domain: %s, total loss: %.2f, arc_loss: %.2f, arc_tag_loss: %.2f, time left: %.2fs' % \\\n (batch_num, num_batches, args.domain, total_loss / total_train_inst, total_arc_loss / total_train_inst,\n total_arc_tag_loss / total_train_inst, time_left)\n sys.stdout.write(log_info)\n sys.stdout.write('\\n')\n sys.stdout.flush()\n print('\\n')\n print('train: %d/%d, domain: %s, total_loss: %.2f, arc_loss: %.2f, arc_tag_loss: %.2f, time: %.2fs' %\n (batch_num, num_batches, args.domain, total_loss / total_train_inst, total_arc_loss / total_train_inst,\n total_arc_tag_loss / total_train_inst, time.time() - start_time))\n\n dev_eval_dict, test_eval_dict, best_model, best_optimizer, patient = in_domain_evaluation(args, datasets, model, optimizer, dev_eval_dict, test_eval_dict, epoch, best_model, best_optimizer, patient)\n if patient >= args.schedule:\n lr = args.learning_rate / (1.0 + epoch * args.decay_rate)\n optimizer = generate_optimizer(args, lr, model.parameters())\n print('updated learning rate to %.6f' % lr)\n patient = 0\n print_results(test_eval_dict['in_domain'], 'test', args.domain, 'best_results')\n print('\\n')\n for split in datasets.keys():\n eval_dict = evaluation(args, datasets[split], split, best_model, args.domain, epoch, 'best_results')\n write_results(args, datasets[split], args.domain, split, model, args.domain, eval_dict)\n\n else:\n logger.info(\"Evaluating\")\n epoch = start_epoch\n for split in ['train', 'dev', 'test','poetry','prose']:\n eval_dict = evaluation(args, datasets[split], split, model, args.domain, epoch, 'best_results')\n write_results(args, datasets[split], args.domain, split, model, args.domain, eval_dict)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/GraphParser.py","file_name":"GraphParser.py","file_ext":"py","file_size_in_byte":33169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"454204073","text":"import selenium\nimport requests\nimport pprintpp as pp\n\n\"\"\"\n# CURL TO GRAB TRELLO BOARDS (IDS)\ncurl 'https://api.trello.com/1/members/me/boards?key=(yourKey)&token=(yourToken)'\n\n# CURL TO GRAB SPECIFIC TRELLO BOARD LISTS\ncurl 'https://api.trello.com/1/boards/board_num_here/lists?key=key_here&token=token_here' | json_pp\n\"\"\"\n\napi_key='11111111'\napi_secret='2222222'\ntoken='333333'\nvarsity_board = '4444444'\nlgs_board = '55555555'\n\n# UPCOMING EVENTS LIST ID\nupComingEvents_id = \"66666666\"\n# EVENTS LIVE NOW/WEEKEND LIST ID\neventsLiveNow = \"777777777\"\n\ndef varsity_client():\n response = requests.get(f\"https://api.trello.com/1/boards/{varsity_board}?fields=name,url&key={api_key}&token={token}\")\n print(response.text)\n print(dir(response))\n\ndef lgs_client():\n requests.get(f\"https://api.trello.com/1/boards/{lgs_board}?fields=name,url&key={api_key}&token={token}\")\n\ndef args():\n args_check = \"\"\n while args_check != 'y':\n commandline_arg = input(\"\\n\\n\\n\\nPress 'l' for LGS or 'v' for Varsity Trello.\\n\")\n if (commandline_arg != 'l') and (commandline_arg != 'v'):\n print(\"\\n\\n\\n\\n\\n\\n\\nPlease type only a 'l' or 'v'.\\nDon't be one of those users. You're not that user...\\nare you...? \\n:)\\n\")\n continue\n else:\n print(\"You Entered: \" + commandline_arg)\n args_check = input(\"If this is correct enter 'y', otherwise press Enter to try again.\\n\")\n while args_check != 'y':\n commandline_arg2 = int(input(\"\\n\\n\\n\\nPress 'l' for LGS or 'v' for Varsity Trello.\\n\"))\n if (commandline_arg2 >= 0 or commandline_arg2 <= 100):\n print(\"You Entered: \" + commandline_arg2)\n args_check = input(\"If this is correct enter 'y', otherwise press Enter to try again.\\n\")\n else:\n print(\"\\n\\n\\n\\n\\n\\n\\nPlease type only a Number between 1 - 100 (not 0 or 101 etc.).\\nDon't be one of those users. You're not that user...\\nare you...? \\n:)\\n\")\n continue\n print(\"You Entered: \" + commandline_arg2)\n args_check = input(\"If this is correct enter 'y', otherwise press Enter to try again.\\n\")\n \n return commandline_arg, commandline_arg2\n\ndef main():\n varsity_client()\n args()\n\nif __name__ == \"__main__\":\n main()","sub_path":"events_demo.py","file_name":"events_demo.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"645265857","text":"import os\n\nsource = '../src/Tween.js'\noutput = '../build/tween.min.js'\n\nos.system( 'java -jar compiler/compiler.jar --language_in=ECMASCRIPT5_STRICT --js ' + source + ' --js_output_file ' + output )\n\n# header\n\nwith open(output,'r') as f: text = f.read()\nwith open(output,'w') as f: f.write('// tween.js - http://github.com/sole/tween.js\\n' + text)\n","sub_path":"utils/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"218525274","text":"import xarray as xr\nimport numpy as np\n\ndef coriolis(lat): \n \"\"\"Compute the Coriolis parameter for the given latitude:\n ``f = 2*omega*sin(lat)``, where omega is the angular velocity \n of the Earth.\n \n Parameters\n ----------\n lat : array\n Latitude [degrees].\n \n from: http://www.meteo.mcgill.ca/~huardda/amelie/geowind.py copied and modified by rpn 23.5.2019\n Output: coriolis parameter in s^-1\n \"\"\"\n deg2rad = np.pi/180\n omega = 7.2921159e-05 # angular velocity of the Earth [rad/s]\n #return 2*omega*np.sin(lat/360.*2*np.pi)\n return 2*omega*np.sin(lat*deg2rad) \n\ndef import_gos_sla_adt_data(year='2016',month='12',days=['01','02','03','04','05']):\n \"\"\" import Absolute geostrophic velocity calculated from sea level height\n source: https://cds.climate.copernicus.eu\n NOTE: doesn't work well with only 1 day, try at least 2 days\n Code based on choosing data on https://cds.climate.copernicus.eu/cdsapp#!/search?type=dataset \n and then selecting \"Show API Request\"\n \"\"\"\n import cdsapi\n c = cdsapi.Client()\n c.retrieve('satellite-sea-level-global',\n {'variable':'all', 'year':year, 'month':month, \n 'day':days,'format':'tgz'},\n '/Users/North/Drive/Work/UniH_Work/DataAnalysis/Data/MET_132/Remote_Sensing/201612_dataset-satellite-sea-level-global.tar.gz')\n #c.retrieve('satellite-sea-level-global',\n # {'variable':'all', 'year':'2016', 'month':'11', \n # 'day':['17','18','19','20','21','22','23','24','25','27','28','29','30'],'format':'tgz'},\n # '/Users/North/Drive/Work/UniH_Work/DataAnalysis/Data/MET_132/Remote_Sensing/201611_dataset-satellite-sea-level-global.tar.gz')\n\n\ndef import_sst_avhrr_data(year='2016',month='12',days=['01','02','03','04','05'],sensor='NOAA'): \n # code based on choosing data on https://cds.climate.copernicus.eu/cdsapp#!/search?type=dataset \n # and then selecting \"Show API Request\"\n import cdsapi\n if sensor=='NOAA': sensor_in = 'avhrr_on_noaa_19'\n if sensor=='METOP': sensor_in = 'avhrr_on_metop_a'\n \n filename = str(year)+str(month)+'_dataset-satellite-sst-global.tar.gz'\n c = cdsapi.Client()\n c.retrieve('satellite-sea-surface-temperature',\n {'processinglevel': 'level_3c',\n 'sensor_on_satellite': sensor_in,\n 'version': '2_0',\n 'variable': 'all',\n 'year':year, 'month':month, \n 'day':days,'format':'tgz'}, '/Users/North/Drive/Work/UniH_Work/DataAnalysis/Data/MET_132/Remote_Sensing/downloaded_sst_nc_file_on_17.02.2021/AVHRR/'+filename)\n\n\ndef dll_dist(dlon, dlat, lon, lat):\n \"\"\"Converts lat/lon differentials into distances in meters\n\n PARAMETERS\n ----------\n dlon : xarray.DataArray longitude differentials\n dlat : xarray.DataArray latitude differentials\n lon : xarray.DataArray longitude values\n lat : xarray.DataArray latitude values\n\n RETURNS\n -------\n dx : xarray.DataArray distance inferred from dlon\n dy : xarray.DataArray distance inferred from dlat\n \"\"\"\n\n distance_1deg_equator = 111000.0\n dx = dlon * xr.ufuncs.cos(xr.ufuncs.deg2rad(lat)) * distance_1deg_equator\n dy = ((lon * 0) + 1) * dlat * distance_1deg_equator\n return dx, dy\n\ndef load_gos_data(gos_filenames):\n #import xgcm\n from xgcm import Grid\n from xgcm.autogenerate import generate_grid_ds\n # ====== load in all .nc files and combine into one xarray dataset\n gos_map = xr.open_mfdataset(gos_filenames) \n gos_map = gos_map.rename({'latitude': 'lat'}).rename({'longitude': 'lon'})\n gos_select = gos_map #.sel(time='2016-11-19',lon=slice(10,16),lat=slice(-28,-24))\n #gos_map.ugos\n #dx = gos_map.lon.diff('lon')\n #gos_map['rel_vort'] = gos_map.vgos.diff('lon')/gos_map.lon.diff('lon')\n\n #gos_select = gos_map #gos_map.sel(time='2016-11-19',lon=slice(10,16),lat=slice(-28,-24))\n # create grid for interpolation, differencing\n #grid = xgcm.Grid(gos_select)\n # for Satellite data:\n # https://xgcm.readthedocs.io/en/latest/autogenerate_examples.html\n ds_full = generate_grid_ds(gos_select, {'X':'lon', 'Y':'lat'})\n ds_full.vgos\n\n grid = Grid(ds_full, periodic=['X'])\n\n # compute the difference (in degrees) along the longitude and latitude for both the cell center and the cell face\n # need to specify the boundary_discontinutity in order to avoid the introduction of artefacts at the boundary\n dlong = grid.diff(ds_full.lon, 'X', boundary_discontinuity=360)\n dlonc = grid.diff(ds_full.lon_left, 'X', boundary_discontinuity=360)\n #dlonc_wo_discontinuity = grid.diff(ds_full.lon_left, 'X')\n dlatg = grid.diff(ds_full.lat, 'Y', boundary='fill', fill_value=np.nan)\n dlatc = grid.diff(ds_full.lat_left, 'Y', boundary='fill', fill_value=np.nan)\n\n # converted into approximate cartesian distances on a globe.\n ds_full.coords['dxg'], ds_full.coords['dyg'] = dll_dist(dlong, dlatg, ds_full.lon, ds_full.lat)\n ds_full.coords['dxc'], ds_full.coords['dyc'] = dll_dist(dlonc, dlatc, ds_full.lon, ds_full.lat)\n\n # Relative vorticity: ζ = ∂ v/∂ x – ∂ u/∂ y\n ds_full['dv_dx'] = grid.diff(ds_full.vgos, 'X') / ds_full.dxg\n ds_full['du_dy'] = grid.diff(ds_full.ugos, 'Y', boundary='fill', fill_value=np.nan)/ ds_full.dyg\n dv_dx = grid.interp(ds_full['dv_dx'],'Y', boundary='fill', fill_value=np.nan ) # get dv_dx and du_dy on same grid\n du_dy = grid.interp(ds_full['du_dy'],'X', boundary='fill', fill_value=np.nan )\n ds_full['Rel_Vort'] = dv_dx-du_dy\n\n # Vorticity Rossby Number = ζ / f\n ds_full['Ro'] = ds_full.Rel_Vort/coriolis(ds_full.Rel_Vort.lat_left)\n\n ds_full.coords['lon_left_180'] = np.append(np.arange(0.,180,0.25),np.arange(-180.,0.,0.25))\n ds_full.coords['lon_180'] = np.append(np.arange(0.125,180,0.25),np.arange(-180.,0.,0.25)) \n \n return ds_full\n\n\n#def import_PO.DAAC Drive_sst_data(sst_filenames):\n# # code source: #https://github.com/nasa/podaacpy/blob/master/examples/Using%20podaacpy%20to%20interact%20with%20PO.DAAC%20Drive.#ipynb\n# # data example: https://podaac-#tools.jpl.nasa.gov/drive/files/OceanTemperature/ghrsst/data/GDS2/L4/GLOB/JPL/MUR/v4.1/2016/011\n \n \n \n \n##################\n# Imports #\n##################\n## import the podaac package\n#import podaac.podaac as podaac\n## import the podaac_utils package\n#import podaac.podaac_utils as utils\n## import the mcc package\n#import podaac.mcc as mcc\n#from podaac import drive as drive\n#######################\n# Class instantiation #\n#######################\n## then create an instance of the Podaac class\n#p = podaac.Podaac()\n## then create an instance of the PodaacUtils class\n#u = utils.PodaacUtils()\n## then create an instance of the MCC class\n#m = mcc.MCC()\n#d = drive.Drive('podaac.ini',None,None)\n\n#result = p.granule_search(dataset_id='PODAAC-GHGMR-4FJ04',\n# start_time='2016-11-17T00:00:01Z',\n# end_time='2016-11-17T11:59:59Z',\n# bbox='-81,28,-67,40')\n\n#searchStr = 'totalResults'\n#numResultsStr = [ str(i) for i in result.strip().split() if searchStr in i ]\n#print(numResultsStr)\n##Here's the actual granule names\n#print(u.mine_granules_from_granule_search(granule_search_response=str(result)))\n##Now we simply need to reproduce the Drive URL's for the above granules.\n#granules = d.mine_drive_urls_from_granule_search(granule_search_response=(str(result)))\n#print(granules)\n##retrieve these granules from PO.DAAC Drive.\n##Note that the download_granules function actually decompresses\n##and removes the compressed archive files locally for us.\n#folder_name = '/Users/North/Drive/Work/UniH_Work/DataAnalysis/Data/MET_132/Remote_Sensing/'\n#d.download_granules(granule_collection=granules, path='.')\n\n\n\n\n\n\n\n\n\n\n#print(p.dataset_variables(dataset_id='PODAAC-GHGMR-4FJ04'))#\n\n#print(p.granule_metadata(dataset_id='PODAAC-GHGMR-4FJ04'), granule_name='20160111090000-JPL-L4_GHRSST-SSTfnd-MUR-GLOB-v02.0-fv04.1.nc')\n\n\n#from IPython.display import Image\n#from IPython.core.display import HTML \n#result = p.granule_preview(dataset_id='PODAAC-GHGMR-4FJ04')\n\n\n\n#from podaac import l2ss as l2ss\n#l = l2ss.L2SS()\n#granule_id = '20161117090000-JPL-L4_GHRSST-SSTfnd-MUR-GLOB-v02.0-fv04.1.nc'\n#query = {\n# \"email\": \"ryan.peter.north@uni-hamburg.de\",\n# \"query\":\n# [\n# {\n# \"urs_username\" : \"ryanpeternorth\",\n# \"urs_password\" : \"lB8d@fmUhvkEDhIWrRn\",\n# \"webdav_url\" : \"https://podaac-tools.jpl.nasa.gov/drive/files\",\n# \"compact\": \"true\",\n# \"datasetId\": \"PODAAC-GHGMR-4FJ04\",\n# \"bbox\": \"8,-30,20,-17\",\n# \"variables\": ['lat', 'lon', 'time', 'sea_surface_temperature', 'sst_dtime', 'rejection_flag'],\n# \"granuleIds\": [granule_id]\n# }\n# ]\n# }\n#l.granule_download(query_string=query)\n\n\n","sub_path":"src/met132_sea_level_functions.py","file_name":"met132_sea_level_functions.py","file_ext":"py","file_size_in_byte":8888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"314598709","text":"from pandas import read_csv\n\n# load dataset\nfile = 'vgsales.csv'\nnames = names = ['Rank', 'Name', 'Platform', 'Year', 'Genre', 'Publisher', 'NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales']\ndataset = read_csv(file, names=names)\n\n# print(dataset.shape)\n# print(dataset.describe)\n# print(dataset.head(10))\n# print(dataset.tail(10))\nqtde_wii = dataset[(dataset['Platform'] == 'Wii')]\n# print(len(qtde_wii))\n\nqtde_x360 = dataset[(dataset['Platform'] == 'X360')]\n# print(len(qtde_x360))\n\nqtde_ps4 = dataset[(dataset['Platform'] == 'PS4')]\n# print(len(qtde_ps4))\n\n# qtde_ps4.to_csv('ps4.csv')\n\nps4_ubisoft = dataset.query('Platform == \"PS4\" & Publisher == \"Ubisoft\"')\n#print(ps4_ubisoft.shape)\n#print(ps4_ubisoft.head())\n\ndataset2 = dataset.loc[1:3, ['Name', 'Platform']]\n#print(dataset2.head())\n\n","sub_path":"exercicio5.py","file_name":"exercicio5.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"114558753","text":"#!/usr/bin/env python\n#\n# Copyright Daniel Liljeberg 2018\n\nimport os\nimport sys\nimport logging\nimport requests\nimport json\n\nfrom base64 import b64encode\n\n# Enable verified HTTPS requests on older Pythons\n# http://urllib3.readthedocs.org/en/latest/security.html\nif sys.version_info[0] == 2:\n try:\n requests.packages.urllib3.contrib.pyopenssl.inject_into_urllib3()\n except AttributeError:\n # see https://github.com/certbot/certbot/issues/1883\n import urllib3.contrib.pyopenssl\n urllib3.contrib.pyopenssl.inject_into_urllib3()\n\n# Setup logging\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.StreamHandler())\n\n# Setup log level\nif os.environ.get('DEBUG'):\n logger.setLevel(logging.DEBUG)\nelse:\n logger.setLevel(logging.INFO)\n\n# Setup headers for our requests\ntry:\n authstr = 'Basic ' + b64encode(b':'.join((os.environ['ONAPP_EMAIL'], os.environ['ONAPP_KEY']))).strip()\n ONAPP_HEADERS = {\n 'Authorization': authstr,\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n }\n ONAPP_URL = \"https://{0}/\".format(os.environ['ONAPP_URL'])\nexcept KeyError:\n logger.error(\" + Unable to locate OnApp credentials in environment!\")\n sys.exit(1)\n\n# Method for deploying cert\ndef deploy_cert(args):\n domain, privkey_pem, cert_pem, fullchain_pem, chain_pem, timestamp = args\n logger.debug(' + ssl_certificate: {0}'.format(fullchain_pem))\n logger.debug(' + ssl_certificate_key: {0}'.format(privkey_pem))\n\n # Get the ID for the custom cert\n cdn_id = get_cdn_ssl_id(domain)\n if(cdn_id == None):\n return\n\n logger.debug(' + cdn_ssl_certificate_id: {0}'.format(cdn_id))\n \n # Update cert on onapp\n url = '{0}cdn_ssl_certificates/{1}.json'.format(ONAPP_URL, cdn_id)\n cert = open(fullchain_pem, 'r')\n key = open(privkey_pem, 'r')\n payload = {\n 'cdn_ssl_certificate': {\n 'name': domain,\n 'cert': cert.read().replace('\\n', '\\r\\n').strip(),\n 'key': key.read().replace('\\n', '\\r\\n').strip()\n }\n }\n cert.close()\n key.close()\n\n # Perform request to update cert\n r = requests.put(url, data=json.dumps(payload), headers=ONAPP_HEADERS)\n r.raise_for_status()\n logger.info(' + custom_ssl_update: {0}'.format(r.ok))\n return\n\n# Gets the SSL cert id\ndef get_cdn_ssl_id(domain):\n url = \"{0}cdn_ssl_certificates.json\".format(ONAPP_URL)\n r = requests.get(url, headers=ONAPP_HEADERS)\n r.raise_for_status()\n json = r.json()\n for index, item in enumerate(json):\n if item['cdn_ssl_certificate']['name'] == domain:\n return item['cdn_ssl_certificate']['id']\n \n return None\n\n# Main function to redirect to proper method\ndef main(argv):\n ops = {\n 'deploy_cert' : deploy_cert,\n }\n if argv[0] in ops:\n logger.info(\" + OnApp hook executing: {0}\".format(argv[0]))\n ops[argv[0]](argv[1:])\n\nif __name__ == '__main__':\n main(sys.argv[1:])","sub_path":"onapp_hook.py","file_name":"onapp_hook.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"423557038","text":"import time\r\nfrom platform import python_version\r\n\r\nimport nekos\r\nimport requests\r\nfrom PIL import Image\r\nfrom telethon import version\r\n\r\nfrom userbot import ALIVE_NAME, CMD_HELP, StartTime, catdef, catversion\r\n\r\nfrom ..utils import admin_cmd, edit_or_reply, sudo_cmd\r\n\r\nDEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else \"cat\"\r\nCAT_IMG = Config.ALIVE_PIC\r\n\r\n\r\n@borg.on(admin_cmd(outgoing=True, pattern=\"alive$\"))\r\n@borg.on(sudo_cmd(pattern=\"alive$\", allow_sudo=True))\r\nasync def amireallyalive(alive):\r\n if alive.fwd_from:\r\n return\r\n reply_to_id = alive.message\r\n uptime = await catdef.get_readable_time((time.time() - StartTime))\r\n _, check_sgnirts = check_data_base_heal_th()\r\n hmm = bot.uid\r\n if alive.reply_to_msg_id:\r\n reply_to_id = await alive.get_reply_message()\r\n if CAT_IMG:\r\n cat_caption = f\"**✮ BOT EKDAM DAMDAAR CHAL RAHA ✮**\\n\\n\"\r\n cat_caption += f\"**✧ Database :** `{check_sgnirts}`\\n\"\r\n cat_caption += f\"**✧ Telethon version :** `{version.__version__}\\n`\"\r\n cat_caption += f\"**✧GANGSTER :** `{catversion}`\\n\"\r\n cat_caption += f\"**✧ Python Version :** `{python_version()}\\n`\"\r\n cat_caption += f\"**✧ Uptime :** `{uptime}\\n`\"\r\n cat_caption += f\"**✧ My Master:** [{DEFAULTUSER}](tg://user?id={hmm})\\n\"\r\n await borg.send_file(\r\n alive.chat_id, CAT_IMG, caption=cat_caption, reply_to=reply_to_id\r\n )\r\n await alive.delete()\r\n else:\r\n await edit_or_reply(\r\n alive,\r\n f\"**✮ MY BOT IS RUNNING SUCCESFULLY ✮**\\n\\n\"\r\n f\"**✧ Database :** `{check_sgnirts}`\\n\"\r\n f\"**✧ Telethon Version :** `{version.__version__}\\n`\"\r\n f\"**✧ GANGSTER:** `{catversion}`\\n\"\r\n f\"**✧ Python Version :** `{python_version()}\\n`\"\r\n f\"**✧ Uptime :** `{uptime}\\n`\"\r\n f\"**✧ My Master:** [{DEFAULTUSER}](tg://user?id={hmm})\\n\",\r\n )\r\n\r\n\r\n@borg.on(admin_cmd(outgoing=True, pattern=\"ialive$\"))\r\n@borg.on(sudo_cmd(pattern=\"ialive$\", allow_sudo=True))\r\nasync def amireallyalive(alive):\r\n if alive.fwd_from:\r\n return\r\n tgbotusername = Var.TG_BOT_USER_NAME_BF_HER\r\n reply_to_id = alive.message\r\n if alive.reply_to_msg_id:\r\n reply_to_id = await alive.get_reply_message()\r\n hmm = bot.uid\r\n cat_caption = f\"**Catuserbot is Up and Running**\\n\"\r\n cat_caption += f\"** -Telethon version :** `{version.__version__}\\n`\"\r\n cat_caption += f\"** -Catuserbot Version :** `{catversion}`\\n\"\r\n cat_caption += f\"** -Python Version :** `{python_version()}\\n`\"\r\n cat_caption += f\"** -My peru Master:** [{DEFAULTUSER}](tg://user?id={hmm})\\n\"\r\n results = await bot.inline_query(tgbotusername, cat_caption) # pylint:disable=E0602\r\n await results[0].click(alive.chat_id, reply_to=reply_to_id, hide_via=True)\r\n await alive.delete()\r\n\r\n\r\n@borg.on(admin_cmd(pattern=\"cat$\"))\r\n@borg.on(sudo_cmd(pattern=\"cat$\", allow_sudo=True))\r\nasync def _(event):\r\n try:\r\n await event.delete()\r\n except BaseException:\r\n pass\r\n reply_to_id = event.message\r\n if event.reply_to_msg_id:\r\n reply_to_id = await event.get_reply_message()\r\n with open(\"temp.png\", \"wb\") as f:\r\n f.write(requests.get(nekos.cat()).content)\r\n img = Image.open(\"temp.png\")\r\n img.save(\"temp.webp\", \"webp\")\r\n img.seek(0)\r\n await bot.send_file(event.chat_id, open(\"temp.webp\", \"rb\"), reply_to=reply_to_id)\r\n\r\n\r\n# UniBorg Telegram UseRBot\r\n# Copyright (C) 2020 @UniBorg\r\n# This code is licensed under\r\n# the \"you can't use this for anything - public or private,\r\n# unless you know the two prime factors to the number below\" license\r\n# 543935563961418342898620676239017231876605452284544942043082635399903451854594062955\r\n# വിവരണം അടിച്ചുമാറ്റിക്കൊണ്ട് പോകുന്നവർ\r\n# ക്രെഡിറ്റ് വെച്ചാൽ സന്തോഷമേ ഉള്ളു..!\r\n# uniborg\r\n\r\n\r\ndef check_data_base_heal_th():\r\n # https://stackoverflow.com/a/41961968\r\n is_database_working = False\r\n output = \"No Database is set\"\r\n if not Var.DB_URI:\r\n return is_database_working, output\r\n from userbot.plugins.sql_helper import SESSION\r\n\r\n try:\r\n # to check database we will execute raw query\r\n SESSION.execute(\"SELECT 1\")\r\n except Exception as e:\r\n output = f\"❌ {str(e)}\"\r\n is_database_working = False\r\n else:\r\n output = \"Functioning Normally\"\r\n is_database_working = True\r\n return is_database_working, output\r\n\r\n\r\nCMD_HELP.update(\r\n {\r\n \"alive\": \"**Plugin :** `alive`\\\r\n \\n\\n**Syntax : **`.alive` :\\\r\n \\n**Usage : ** status of bot.\\\r\n \\n\\n**Syntax : **`.ialive` :\\\r\n \\n**Usage : ** inline alive.\\\r\n \\n\\n**Synatx :** `.cat`\\\r\n \\n**Usage : **Random cat stickers\"\r\n }\r\n)\r\n","sub_path":"userbot/plugins/alive.py","file_name":"alive.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"588788926","text":"import math\ndef is_triangular(num):\n # формула T(n) = 1/2 * n * (n + 1)\n # n^2 + n - 2num = 0\n a = 1\n b = 1\n c = -2 * num\n disc = math.sqrt(b**2 - 4 * a * c)\n if disc - int(disc) != 0:\n return False\n return True\n\n\ndef find_num_of_factors(num):\n counter = 1\n for factor in range(1, num // 2 + 1):\n if num % factor == 0:\n counter += 1\n\n return counter\n\n\nimport math\n\n# def find_numbers_of_factors(num):\n# # находим кананочное разложение числа\n# num_copy = num\n# cur_factor = 2\n# obj = {}\n# while True:\n# # if cur_factor > num_copy // 2:\n# # break\n# if num_copy <= 1:\n# break\n# if num_copy % cur_factor == 0:\n# num_copy /= cur_factor\n# if cur_factor in obj:\n# obj[cur_factor] += 1\n# else:\n# obj[cur_factor] = 1\n# else:\n# cur_factor += 1\n#\n# print(obj)\n# max = 0\n# for key in obj:\n# if obj[key] > max:\n# max = obj[key]\n# max += 1# учитываем 0\n# A = len(obj) * max\n# #print(max)\n# print(A)\n#\n# # используем формулу комбинаторики A\n# # result = math.factorial(sum) / math.factorial((sum - len(obj)))\n# # result = str(int(result))\n# # objLen = int(len(obj))\n# # resultLen = int(len(result))\n# #\n# # result = int(result) - objLen * (resultLen - 1) * 10\n# # print(result)\n#\n# print(find_numbers_of_factors(10040)) мои потуги оптимизации find_num_of_factors через каноничное разложение\n\n\nthe_number = 1\nwhile True:\n if is_triangular(the_number) and find_num_of_factors(the_number) > 500:\n print(the_number)\n break\n else:\n print(the_number)\n\n the_number += 1\n\n# 76576500 скрипт работал 2-3 часа","sub_path":"Проект Эйлера/12 (Highly divisible triangular number).py","file_name":"12 (Highly divisible triangular number).py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"416594655","text":"#import dependencies\nfrom bs4 import BeautifulSoup \nfrom splinter import Browser\nimport os\nimport pandas as pd\nimport time\nimport re\nfrom selenium import webdriver\n\ndef init_browser():\n \n executable_path = {\"executable_path\":\"C:\\webdrivers\\chromedriver.exe\"}\n return Browser(\"chrome\", **executable_path, headless = False)\n\ndef scrape():\n browser = init_browser()\n mars_data = {}\n\n url='https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'\n \n browser.visit(url)\n time.sleep(2)\n\n html = browser.html\n soup = BeautifulSoup(html,\"html.parser\")\n\n #scrapping latest news about mars from nasa\n news_title = soup.find(\"div\",class_=\"content_title\").text\n news_paragraph = soup.find(\"div\", class_=\"article_teaser_body\").text\n mars_data['news_title'] = news_title\n mars_data['news_paragraph'] = news_paragraph \n \n #Mars Featured Image\n url_image = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\n browser.visit(url_image)\n time.sleep(2)\n\n from urllib.parse import urlsplit\n base_url = \"{0.scheme}://{0.netloc}/\".format(urlsplit(url_image))\n \n \n\n #bring the full resolution image\n full_image = browser.find_by_id('full_image')\n full_image.click()\n \n \n # Find the more info button and click that\n browser.is_element_present_by_text('more info', wait_time=1)\n more_info_elem = browser.find_link_by_partial_text('more info')\n more_info_elem.click() \n \n #get image url using BeautifulSoup\n html_image = browser.html\n soup = BeautifulSoup(html_image, \"html.parser\")\n # find the relative image url\n img_url_rel = soup.select_one('figure.lede a img').get(\"src\")\n full_img_url = base_url + img_url_rel\n mars_data[\"featured_image\"] = full_img_url\n \n \n \n \n \n\n #get mars weather's latest tweet from the website\n #url_weather = \"https://twitter.com/marswxreport?lang=en\"\n #browser.visit(url_weather)\n #html = browser.html\n #weather_soup = BeautifulSoup(html, 'html.parser')\n #mars_weather_tweet = weather_soup.find('span', text = re.compile('2020-04-27')).text\n #mars_data[\"mars_weather\"] = mars_weather_tweet\n\n #Mars Facts\n\n fact_url = \"https://space-facts.com/mars/\"\n browser.visit(fact_url)\n time.sleep(2)\n table = pd.read_html(fact_url)\n \n\n df_mars_facts = table[0]\n df_mars_facts.columns = [\"attribute\", \"Values\"]\n mars_table = df_mars_facts.set_index([\"attribute\"])\n mars_html_table = mars_table.to_html()\n mars_html_table = mars_html_table.replace(\"\\n\", \"\")\n mars_data[\"mars_facts_table\"] = mars_html_table\n\n #Mars Hemisperes\n\n url_hemisphere = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n browser.visit(url_hemisphere)\n\n #Getting the base url\n hemisphere_base_url = \"{0.scheme}://{0.netloc}/\".format(urlsplit(url_hemisphere))\n hemisphere_img_urls = []\n hemisphere_img_urls\n\n #Cerberus-Hemisphere-image-url\n\n \n results = browser.find_by_xpath( \"//*[@id='product-section']/div[2]/div[1]/a/img\").click()\n time.sleep(2)\n\n cer_img = browser.find_by_id('wide-image-toggle')\n cer_img.click()\n\n\n cerberus_image = browser.html\n soup = BeautifulSoup(cerberus_image, \"html.parser\")\n cerberus_url = soup.find(\"img\", class_=\"wide-image\")[\"src\"]\n cerberus_img_url = hemisphere_base_url + cerberus_url\n cerberus_title = soup.find(\"h2\",class_=\"title\").text\n back_button = browser.visit(\"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\")\n cerberus = {\"image title\":cerberus_title, \"image url\": cerberus_img_url}\n hemisphere_img_urls.append(cerberus)\n\n\n #Schiaparelli-Hemisphere-image-url\n\n results1 = browser.find_by_xpath( \"//*[@id='product-section']/div[2]/div[2]/a/img\").click()\n time.sleep(2)\n\n sch_img = browser.find_by_id('wide-image-toggle')\n sch_img.click()\n\n\n sch_image = browser.html\n soup = BeautifulSoup(sch_image, \"html.parser\")\n sch_url = soup.find(\"img\", class_=\"wide-image\")[\"src\"]\n sch_img_url = hemisphere_base_url + sch_url\n sch_title = soup.find(\"h2\",class_=\"title\").text\n back_button = browser.visit(\"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\")\n\n Schiaparelli = {\"image title\":sch_title, \"image url\": sch_img_url}\n hemisphere_img_urls.append(Schiaparelli)\n\n\n #Syrtis Major Hemisphere\n\n results2 = browser.find_by_xpath( \"//*[@id='product-section']/div[2]/div[3]/a/img\").click()\n time.sleep(2)\n\n syr_img = browser.find_by_id('wide-image-toggle')\n syr_img.click()\n\n\n syr_image = browser.html\n soup = BeautifulSoup(syr_image, \"html.parser\")\n syr_url = soup.find(\"img\", class_=\"wide-image\")[\"src\"]\n syr_img_url = hemisphere_base_url + syr_url\n syr_title = soup.find(\"h2\",class_=\"title\").text\n back_button = browser.visit(\"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\")\n Syrtis = {\"image title\":syr_title, \"image url\": syr_img_url}\n hemisphere_img_urls.append(Syrtis)\n\n\n #Valles Marineris Hemisphere\n\n results3 = browser.find_by_xpath( \"//*[@id='product-section']/div[2]/div[4]/a/img\").click()\n time.sleep(2)\n\n valles_img = browser.find_by_id('wide-image-toggle')\n valles_img.click()\n valles_image = browser.html\n soup = BeautifulSoup(valles_image, \"html.parser\")\n valles_url = soup.find(\"img\", class_=\"wide-image\")[\"src\"]\n valles_img_url = hemisphere_base_url + valles_url\n valles_title = soup.find(\"h2\",class_=\"title\").text\n back_button = browser.visit(\"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\")\n Valles = {\"image title\": valles_title, \"image url\": valles_img_url}\n hemisphere_img_urls.append(Valles)\n\n\n mars_data[\"hemisphere_img_url\"] = hemisphere_img_urls\n\n \n\n return mars_data","sub_path":"scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":6081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"33971378","text":"# p11-1-qsort.py\n\ndef qsort(a) :\n n = len(a)\n if n <= 1 :\n return a\n pivot = a[-1]\n a1 = []\n a2 = []\n for i in range(n-1) :\n if a[i] < pivot :\n a1.append(a[i])\n else :\n a2.append(a[i])\n return qsort(a1) + [pivot] + qsort(a2)\n\nd = [6,8,3,9,10,1,2,4,7,5]\nprint(qsort(d))\n\n\n\n# :(\n# p11-2-qsort2.py\n\ndef qsort_sub(a, start, end) :\n if end <= start :\n return \n \n pivot = a[end]\n p_loc = start\n for i in range(start, end) :\n if a[i] <= pivot : \n a[i], a[p_loc] = a[p_loc], a[i]\n p_loc += 1\n a[p_loc], a[end] = a[end], a[p_loc]\n qsort_sub(a, start, p_loc-1)\n qsort_sub(a,p_loc+1, end)\n\ndef qsort2(a) :\n qsort_sub(a,0,len(a)-1)\n\nd = [6,8,3,9,10,1,2,4,7,5]\nqsort2(d)\nprint(d)\n\n\n# Problem\n# 11-1\n# Bubble sort\n\ndef bsort(a):\n n = len(a)\n\n for i in range(n-1) :\n for j in range(n-1-i) :\n if a[j + 1] < a[j] :\n a[j], a[j+1] = a[j+1], a[j]\n \n\nd = [6,8,3,9,10,1,2,4,7,5]\nbsort(d)\nprint(d)","sub_path":"program/p11.py","file_name":"p11.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"139013056","text":"from nmigen import *\nfrom nmigen.back.pysim import *\nfrom nmigen.asserts import *\nfrom nmigen.test.utils import *\nfrom nmigen.build import *\nfrom nmigen.build import ResourceError\nfrom nmigen.vendor.lattice_ecp5 import *\nfrom nmigen_boards.resources import *\nfrom functools import reduce\n\nimport itertools\nimport os\nimport subprocess\n\n__all__ = [\"TXUART\", \"VersaECP5Platform\"]\n\n\"\"\"\nRS-232 Transmitter adapted for line capturer\nSee http://zipcpu.com/tutorial/lsn-10-fifo.pdf for more details\n\"\"\"\n\nclass TXUART(Elaboratable):\n\tdef __init__(self):\n\t\tself.i_wr = Signal(1, reset=0)\n\t\tself.i_data = Signal(8, reset=0)\n\t\tself.o_busy = Signal(1, reset=0)\n\t\tself.o_uart_tx = Signal(1, reset=1)\n\tdef ports(self):\n\t\treturn [\n\t\t\tself.i_wr,\n\t\t\tself.i_data,\n\t\t\tself.o_busy,\n\t\t\tself.o_uart_tx\n\t\t]\n\tdef elaborate(self, platform):\n\t\tm = Module()\n\n\t\tCLOCKS_PER_BAUD = 4\n\n\t\tif platform is not None and platform != \"formal\":\n\t\t\tBAUD_RATE = 115200\n\t\t\tCLOCKS_PER_BAUD = int(platform.default_clk_frequency // BAUD_RATE)\n\t\t\t# self.o_uart_tx = platform.request('uart').tx.o\n\n\t\tcounter = Signal(range(CLOCKS_PER_BAUD), reset=0)\n\n\t\tdata_copy = Signal(8, reset=0)\n\n\t\tstate = Signal(4, reset=0)\n\n\t\tm.d.comb += self.o_busy.eq(state != 0)\n\n\t\twith m.FSM():\n\t\t\twith m.State('IDLE'):\n\t\t\t\tm.next = 'IDLE'\n\t\t\t\tm.d.sync += state.eq(0)\n\t\t\t\tm.d.sync += self.o_uart_tx.eq(1)\n\t\t\t\tm.d.sync += counter.eq(0)\n\t\t\t\twith m.If(self.i_wr):\n\t\t\t\t\tm.next = 'START'\n\t\t\t\t\tm.d.sync += state.eq(1)\n\t\t\t\t\tm.d.sync += self.o_uart_tx.eq(0)\n\t\t\t\t\tm.d.sync += data_copy.eq(self.i_data)\n\t\t\twith m.State('START'):\n\t\t\t\tm.next = 'START'\n\t\t\t\tm.d.sync += state.eq(1)\n\t\t\t\tm.d.sync += self.o_uart_tx.eq(0)\n\t\t\t\tm.d.sync += counter.eq(counter + 1)\n\t\t\t\twith m.If(counter == CLOCKS_PER_BAUD - 1):\n\t\t\t\t\tm.next = 'BIT0'\n\t\t\t\t\tm.d.sync += state.eq(2)\n\t\t\t\t\tm.d.sync += self.o_uart_tx.eq(data_copy[0])\n\t\t\t\t\tm.d.sync += counter.eq(0)\n\t\t\twith m.State('BIT0'):\n\t\t\t\tm.next = 'BIT0'\n\t\t\t\tm.d.sync += state.eq(2)\n\t\t\t\tm.d.sync += self.o_uart_tx.eq(data_copy[0])\n\t\t\t\tm.d.sync += counter.eq(counter + 1)\n\t\t\t\twith m.If(counter == CLOCKS_PER_BAUD - 1):\n\t\t\t\t\tm.next = 'BIT1'\n\t\t\t\t\tm.d.sync += state.eq(3)\n\t\t\t\t\tm.d.sync += self.o_uart_tx.eq(data_copy[1])\n\t\t\t\t\tm.d.sync += counter.eq(0)\n\t\t\twith m.State('BIT1'):\n\t\t\t\tm.next = 'BIT1'\n\t\t\t\tm.d.sync += state.eq(3)\n\t\t\t\tm.d.sync += self.o_uart_tx.eq(data_copy[1])\n\t\t\t\tm.d.sync += counter.eq(counter + 1)\n\t\t\t\twith m.If(counter == CLOCKS_PER_BAUD - 1):\n\t\t\t\t\tm.next = 'BIT2'\n\t\t\t\t\tm.d.sync += state.eq(4)\n\t\t\t\t\tm.d.sync += self.o_uart_tx.eq(data_copy[2])\n\t\t\t\t\tm.d.sync += counter.eq(0)\n\t\t\twith m.State('BIT2'):\n\t\t\t\tm.next = 'BIT2'\n\t\t\t\tm.d.sync += state.eq(4)\n\t\t\t\tm.d.sync += self.o_uart_tx.eq(data_copy[2])\n\t\t\t\tm.d.sync += counter.eq(counter + 1)\n\t\t\t\twith m.If(counter == CLOCKS_PER_BAUD - 1):\n\t\t\t\t\tm.next = 'BIT3'\n\t\t\t\t\tm.d.sync += state.eq(5)\n\t\t\t\t\tm.d.sync += self.o_uart_tx.eq(data_copy[3])\n\t\t\t\t\tm.d.sync += counter.eq(0)\n\t\t\twith m.State('BIT3'):\n\t\t\t\tm.next = 'BIT3'\n\t\t\t\tm.d.sync += state.eq(5)\n\t\t\t\tm.d.sync += self.o_uart_tx.eq(data_copy[3])\n\t\t\t\tm.d.sync += counter.eq(counter + 1)\n\t\t\t\twith m.If(counter == CLOCKS_PER_BAUD - 1):\n\t\t\t\t\tm.next = 'BIT4'\n\t\t\t\t\tm.d.sync += state.eq(6)\n\t\t\t\t\tm.d.sync += self.o_uart_tx.eq(data_copy[4])\n\t\t\t\t\tm.d.sync += counter.eq(0)\n\t\t\twith m.State('BIT4'):\n\t\t\t\tm.next = 'BIT4'\n\t\t\t\tm.d.sync += state.eq(6)\n\t\t\t\tm.d.sync += self.o_uart_tx.eq(data_copy[4])\n\t\t\t\tm.d.sync += counter.eq(counter + 1)\n\t\t\t\twith m.If(counter == CLOCKS_PER_BAUD - 1):\n\t\t\t\t\tm.next = 'BIT5'\n\t\t\t\t\tm.d.sync += state.eq(7)\n\t\t\t\t\tm.d.sync += self.o_uart_tx.eq(data_copy[5])\n\t\t\t\t\tm.d.sync += counter.eq(0)\n\t\t\twith m.State('BIT5'):\n\t\t\t\tm.next = 'BIT5'\n\t\t\t\tm.d.sync += state.eq(7)\n\t\t\t\tm.d.sync += self.o_uart_tx.eq(data_copy[5])\n\t\t\t\tm.d.sync += counter.eq(counter + 1)\n\t\t\t\twith m.If(counter == CLOCKS_PER_BAUD - 1):\n\t\t\t\t\tm.next = 'BIT6'\n\t\t\t\t\tm.d.sync += state.eq(8)\n\t\t\t\t\tm.d.sync += self.o_uart_tx.eq(data_copy[6])\n\t\t\t\t\tm.d.sync += counter.eq(0)\n\t\t\twith m.State('BIT6'):\n\t\t\t\tm.next = 'BIT6'\n\t\t\t\tm.d.sync += state.eq(8)\n\t\t\t\tm.d.sync += self.o_uart_tx.eq(data_copy[6])\n\t\t\t\tm.d.sync += counter.eq(counter + 1)\n\t\t\t\twith m.If(counter == CLOCKS_PER_BAUD - 1):\n\t\t\t\t\tm.next = 'BIT7'\n\t\t\t\t\tm.d.sync += state.eq(9)\n\t\t\t\t\tm.d.sync += self.o_uart_tx.eq(data_copy[7])\n\t\t\t\t\tm.d.sync += counter.eq(0)\n\t\t\twith m.State('BIT7'):\n\t\t\t\tm.next = 'BIT7'\n\t\t\t\tm.d.sync += state.eq(9)\n\t\t\t\tm.d.sync += self.o_uart_tx.eq(data_copy[7])\n\t\t\t\tm.d.sync += counter.eq(counter + 1)\n\t\t\t\twith m.If(counter == CLOCKS_PER_BAUD - 1):\n\t\t\t\t\tm.next = 'STOP1'\n\t\t\t\t\tm.d.sync += state.eq(10)\n\t\t\t\t\tm.d.sync += self.o_uart_tx.eq(1)\n\t\t\t\t\tm.d.sync += counter.eq(0)\n\t\t\twith m.State('STOP1'):\n\t\t\t\tm.next = 'STOP1'\n\t\t\t\tm.d.sync += state.eq(10)\n\t\t\t\tm.d.sync += self.o_uart_tx.eq(1)\n\t\t\t\tm.d.sync += counter.eq(counter + 1)\n\t\t\t\twith m.If(counter == CLOCKS_PER_BAUD - 1):\n\t\t\t\t\tm.next = 'STOP2'\n\t\t\t\t\tm.d.sync += state.eq(11)\n\t\t\t\t\tm.d.sync += self.o_uart_tx.eq(1)\n\t\t\t\t\tm.d.sync += counter.eq(0)\n\t\t\twith m.State('STOP2'):\n\t\t\t\tm.next = 'STOP2'\n\t\t\t\tm.d.sync += state.eq(11)\n\t\t\t\tm.d.sync += self.o_uart_tx.eq(1)\n\t\t\t\tm.d.sync += counter.eq(counter + 1)\n\t\t\t\twith m.If(counter == CLOCKS_PER_BAUD - 1):\n\t\t\t\t\tm.next = 'IDLE'\n\t\t\t\t\tm.d.sync += state.eq(0)\n\t\t\t\t\tm.d.sync += self.o_uart_tx.eq(1)\n\t\t\t\t\tm.d.sync += counter.eq(0)\n\n\t\tif platform == 'formal':\n\t\t\t\"\"\"\n\t\t\tIndicators of when Past() is valid\n\t\t\t\"\"\"\n\t\t\tf_past_valid = Signal(1, reset=0)\n\t\t\tm.d.sync += f_past_valid.eq(1)\n\t\t\tf_pastn_valid = Signal(1, reset=0)\n\t\t\tf_pastn_ctr = Signal(range(CLOCKS_PER_BAUD), reset=0)\n\t\t\tm.d.sync += f_pastn_ctr.eq(f_pastn_ctr + 1)\n\t\t\twith m.If(f_pastn_ctr == CLOCKS_PER_BAUD - 1):\n\t\t\t\tm.d.sync += f_pastn_ctr.eq(f_pastn_ctr)\n\t\t\t\tm.d.sync += f_pastn_valid.eq(1)\n\n\t\t\t\"\"\"\n\t\t\tProperties of o_busy\n\t\t\t\"\"\"\n\t\t\t# o_busy should be asserted if and only if a transmission is taking place\n\t\t\tm.d.comb += Assert(self.o_busy == (state != 0))\n\n\t\t\t\"\"\"\n\t\t\tProperties of o_uart_tx\n\t\t\t\"\"\"\n\t\t\t# In each given state, o_uart_tx should have the corresponding output\n\t\t\twith m.Switch(state):\n\t\t\t\twith m.Case(0):\n\t\t\t\t\tm.d.comb += Assert(self.o_uart_tx == 1)\n\t\t\t\twith m.Case(1):\n\t\t\t\t\tm.d.comb += Assert(self.o_uart_tx == 0)\n\t\t\t\twith m.Case(2):\n\t\t\t\t\tm.d.comb += Assert(self.o_uart_tx == data_copy[0])\n\t\t\t\twith m.Case(3):\n\t\t\t\t\tm.d.comb += Assert(self.o_uart_tx == data_copy[1])\n\t\t\t\twith m.Case(4):\n\t\t\t\t\tm.d.comb += Assert(self.o_uart_tx == data_copy[2])\n\t\t\t\twith m.Case(5):\n\t\t\t\t\tm.d.comb += Assert(self.o_uart_tx == data_copy[3])\n\t\t\t\twith m.Case(6):\n\t\t\t\t\tm.d.comb += Assert(self.o_uart_tx == data_copy[4])\n\t\t\t\twith m.Case(7):\n\t\t\t\t\tm.d.comb += Assert(self.o_uart_tx == data_copy[5])\n\t\t\t\twith m.Case(8):\n\t\t\t\t\tm.d.comb += Assert(self.o_uart_tx == data_copy[6])\n\t\t\t\twith m.Case(9):\n\t\t\t\t\tm.d.comb += Assert(self.o_uart_tx == data_copy[7])\n\t\t\t\twith m.Case(10):\n\t\t\t\t\tm.d.comb += Assert(self.o_uart_tx == 1)\n\t\t\t\twith m.Case(11):\n\t\t\t\t\tm.d.comb += Assert(self.o_uart_tx == 1)\n\t\t\t\twith m.Default():\n\t\t\t\t\tm.d.comb += Assert(0) # This should never happen\n\n\t\t\t\"\"\"\n\t\t\tCounter properties\n\t\t\t\"\"\"\n\t\t\t# The counter should never reach or exceed CLOCKS_PER_BAUD\n\t\t\tm.d.comb += Assert(counter < CLOCKS_PER_BAUD)\n\t\t\t# When idle, the counter should be always zero\n\t\t\twith m.If(state == 0):\n\t\t\t\tm.d.comb += Assert(counter == 0)\n\t\t\t# At the beginning of a transmission, counter should start at zero\n\t\t\twith m.If(f_past_valid & (Past(state) == 0) & Past(self.i_wr)):\n\t\t\t\tm.d.comb += Assert(counter == 0)\n\t\t\t# During a transmission, counter should always count up by 1, modulo\n\t\t\t# CLOCKS_PER_BAUD\n\t\t\twith m.If(f_past_valid & (Past(state) != 0)):\n\t\t\t\tm.d.comb += Assert(counter == ((Past(counter) + 1) % CLOCKS_PER_BAUD))\n\n\t\t\t\"\"\"\n\t\t\tProperties of data_copy\n\t\t\t\"\"\"\n\t\t\t# When idle, on i_wr, data_copy should take the value of i_data on the next\n\t\t\t# clock cycle\n\t\t\twith m.If(f_past_valid & (Past(state) == 0) & Past(self.i_wr)):\n\t\t\t\tm.d.comb += Assert(data_copy == Past(self.i_data))\n\t\t\t# data_copy should remain stable during a transmission, even if i_data\n\t\t\t# changes\n\t\t\twith m.If(f_past_valid & (Past(state) != 0) & (state != 0)):\n\t\t\t\tm.d.comb += Assert(Stable(data_copy))\n\n\t\t\t\"\"\"\n\t\t\tState properties\n\t\t\t\"\"\"\n\t\t\t# The circuit should always be in a valid state\n\t\t\tm.d.comb += Assert(state < 12)\n\t\t\t# The circuit should initially be idle\n\t\t\twith m.If(~f_past_valid):\n\t\t\t\tm.d.comb += Assert(state == 0)\n\t\t\t# When idle, on i_wr, the circuit should transition to the START state on\n\t\t\t# the next clock cycle\n\t\t\twith m.If(f_past_valid & (Past(state) == 0) & Past(self.i_wr)):\n\t\t\t\tm.d.comb += Assert(state == 1)\n\t\t\t# Except for the IDLE state, the circuit should remain in each state for\n\t\t\t# exactly CLOCKS_PER_BAUD clock cycles, and the state transitions are\n\t\t\t# correct\n\t\t\twith m.If(f_pastn_valid & (Past(state, CLOCKS_PER_BAUD) != 0) & \\\n\t\t\t\t(Past(counter, CLOCKS_PER_BAUD) == 0)):\n\t\t\t\tfor i in range(1, CLOCKS_PER_BAUD):\n\t\t\t\t\tm.d.comb += Assert(Past(state, CLOCKS_PER_BAUD) == Past(state, i))\n\t\t\t\tm.d.comb += Assert(state == ((Past(state, CLOCKS_PER_BAUD) + 1) % 12))\n\n\t\t\t\"\"\"\n\t\t\tTwo of the above assertions pass 100 levels of BMC but fail induction,\n\t\t\tprobably because the number of clock cycles in which the circuit remains\n\t\t\tin the IDLE state is unbounded (and therefore no amount of base cases can\n\t\t\tmake the induction go through). Let us make a (likely) harmless assumption\n\t\t\tthat there is an upper bound on the amount of clock cycles in which the\n\t\t\tcircuit remains idle, say, 10 * CLOCKS_PER_BAUD\n\t\t\t\"\"\"\n\t\t\tf_past10n_valid = Signal(1, reset=0)\n\t\t\tf_past10n_ctr = Signal(range(10 * CLOCKS_PER_BAUD), reset=0)\n\t\t\tm.d.sync += f_past10n_ctr.eq(f_past10n_ctr + 1)\n\t\t\twith m.If(f_past10n_ctr == 10 * CLOCKS_PER_BAUD - 1):\n\t\t\t\tm.d.sync += f_past10n_ctr.eq(f_past10n_ctr)\n\t\t\t\tm.d.sync += f_past10n_valid.eq(1)\n\n\t\t\twith m.If(f_past10n_valid & reduce(lambda a, b: a & b, \\\n\t\t\t\t(((Past(state, i) == 0) & ~Past(self.i_wr, i)) \\\n\t\t\t\tfor i in range(1, 10 * CLOCKS_PER_BAUD + 1)))):\n\t\t\t\tm.d.comb += Assume(self.i_wr)\n\t\t\t# Aaaaaand ... with this assumption, our k-induction passes with k >= 66 ;-)\n\n\t\treturn m\n\nclass VersaECP5Platform(LatticeECP5Platform):\n\tdevice = \"LFE5UM-45F\"\n\tpackage = \"BG381\"\n\tspeed = \"8\"\n\tdefault_clk = \"clk100\"\n\tdefault_rst = \"rst\"\n\tresources = [\n\t\tResource(\"rst\", 0, PinsN(\"T1\", dir=\"i\"), Attrs(IO_TYPE=\"LVCMOS33\")),\n\t\tResource(\"clk100\", 0, DiffPairs(\"P3\", \"P4\", dir=\"i\"), Clock(100e6), Attrs(IO_TYPE=\"LVDS\")),\n\t\tResource(\"pclk\", 0, DiffPairs(\"A4\", \"A5\", dir=\"i\"), Attrs(IO_TYPE=\"LVDS\")),\n\n\t\t*LEDResources(pins=\"E16 D17 D18 E18 F17 F18 E17 F16\", attrs=Attrs(IO_TYPE=\"LVCMOS25\")),\n\n\t\tResource(\"alnum_led\", 0,\n\t\t\tSubsignal(\"a\", PinsN(\"M20\", dir=\"o\")),\n\t\t\tSubsignal(\"b\", PinsN(\"L18\", dir=\"o\")),\n\t\t\tSubsignal(\"c\", PinsN(\"M19\", dir=\"o\")),\n\t\t\tSubsignal(\"d\", PinsN(\"L16\", dir=\"o\")),\n\t\t\tSubsignal(\"e\", PinsN(\"L17\", dir=\"o\")),\n\t\t\tSubsignal(\"f\", PinsN(\"M18\", dir=\"o\")),\n\t\t\tSubsignal(\"g\", PinsN(\"N16\", dir=\"o\")),\n\t\t\tSubsignal(\"h\", PinsN(\"M17\", dir=\"o\")),\n\t\t\tSubsignal(\"j\", PinsN(\"N18\", dir=\"o\")),\n\t\t\tSubsignal(\"k\", PinsN(\"P17\", dir=\"o\")),\n\t\t\tSubsignal(\"l\", PinsN(\"N17\", dir=\"o\")),\n\t\t\tSubsignal(\"m\", PinsN(\"P16\", dir=\"o\")),\n\t\t\tSubsignal(\"n\", PinsN(\"R16\", dir=\"o\")),\n\t\t\tSubsignal(\"p\", PinsN(\"R17\", dir=\"o\")),\n\t\t\tSubsignal(\"dp\", PinsN(\"U1\", dir=\"o\")),\n\t\t\tAttrs(IO_TYPE=\"LVCMOS25\")),\n\t\t\n\t\t*SwitchResources(pins={0: \"H2\", 1: \"K3\", 2: \"G3\", 3: \"F2\" }, attrs=Attrs(IO_TYPE=\"LVCMOS15\")),\n\t\t*SwitchResources(pins={4: \"J18\", 5: \"K18\", 6: \"K19\", 7: \"K20\"}, attrs=Attrs(IO_TYPE=\"LVCMOS25\")),\n\n\t\tUARTResource(0,\n\t\t\trx=\"C11\", tx=\"A11\",\n\t\t\tattrs=Attrs(IO_TYPE=\"LVCMOS33\", PULLMODE=\"UP\")\n\t\t),\n\n\t\t*SPIFlashResources(0,\n\t\t\tcs=\"R2\", clk=\"U3\", miso=\"W2\", mosi=\"V2\", wp=\"Y2\", hold=\"W1\",\n\t\t\tattrs=Attrs(IO_STANDARD=\"LVCMOS33\")\n\t\t),\n\n\t\tResource(\"eth_clk125\", 0, Pins(\"L19\", dir=\"i\"), Clock(125e6), Attrs(IO_TYPE=\"LVCMOS25\")),\n\t\tResource(\"eth_clk125_pll\", 0, Pins(\"U16\", dir=\"i\"), Clock(125e6), Attrs(IO_TYPE=\"LVCMOS25\")), # NC by default\n\t\tResource(\"eth_rgmii\", 0,\n\t\t\tSubsignal(\"rst\", PinsN(\"U17\", dir=\"o\")),\n\t\t\tSubsignal(\"mdc\", Pins(\"T18\", dir=\"o\")),\n\t\t\tSubsignal(\"mdio\", Pins(\"U18\", dir=\"io\")),\n\t\t\tSubsignal(\"tx_clk\", Pins(\"P19\", dir=\"o\")),\n\t\t\tSubsignal(\"tx_ctl\", Pins(\"R20\", dir=\"o\")),\n\t\t\tSubsignal(\"tx_data\", Pins(\"N19 N20 P18 P20\", dir=\"o\")),\n\t\t\tSubsignal(\"rx_clk\", Pins(\"L20\", dir=\"i\")),\n\t\t\tSubsignal(\"rx_ctl\", Pins(\"U19\", dir=\"i\")),\n\t\t\tSubsignal(\"rx_data\", Pins(\"T20 U20 T19 R18\", dir=\"i\")),\n\t\t\tAttrs(IO_TYPE=\"LVCMOS25\")\n\t\t),\n\t\tResource(\"eth_sgmii\", 0,\n\t\t\tSubsignal(\"rst\", PinsN(\"U17\", dir=\"o\"), Attrs(IO_TYPE=\"LVCMOS25\")),\n\t\t\tSubsignal(\"mdc\", Pins(\"T18\", dir=\"o\"), Attrs(IO_TYPE=\"LVCMOS25\")),\n\t\t\tSubsignal(\"mdio\", Pins(\"U18\", dir=\"io\"), Attrs(IO_TYPE=\"LVCMOS25\")),\n\t\t\tSubsignal(\"tx\", DiffPairs(\"W13\", \"W14\", dir=\"o\")),\n\t\t\tSubsignal(\"rx\", DiffPairs(\"Y14\", \"Y15\", dir=\"i\")),\n\t\t),\n\n\t\tResource(\"eth_clk125\", 1, Pins(\"J20\", dir=\"i\"), Clock(125e6), Attrs(IO_TYPE=\"LVCMOS25\")),\n\t\tResource(\"eth_clk125_pll\", 1, Pins(\"C18\", dir=\"i\"), Clock(125e6), Attrs(IO_TYPE=\"LVCMOS25\")), # NC by default\n\t\tResource(\"eth_rgmii\", 1,\n\t\t\tSubsignal(\"rst\", PinsN(\"F20\", dir=\"o\")),\n\t\t\tSubsignal(\"mdc\", Pins(\"G19\", dir=\"o\")),\n\t\t\tSubsignal(\"mdio\", Pins(\"H20\", dir=\"io\")),\n\t\t\tSubsignal(\"tx_clk\", Pins(\"C20\", dir=\"o\")),\n\t\t\tSubsignal(\"tx_ctrl\", Pins(\"E19\", dir=\"o\")),\n\t\t\tSubsignal(\"tx_data\", Pins(\"J17 J16 D19 D20\", dir=\"o\")),\n\t\t\tSubsignal(\"rx_clk\", Pins(\"J19\", dir=\"i\")),\n\t\t\tSubsignal(\"rx_ctrl\", Pins(\"F19\", dir=\"i\")),\n\t\t\tSubsignal(\"rx_data\", Pins(\"G18 G16 H18 H17\", dir=\"i\")),\n\t\t\tAttrs(IO_TYPE=\"LVCMOS25\")\n\t\t),\n\t\tResource(\"eth_sgmii\", 1,\n\t\t\tSubsignal(\"rst\", PinsN(\"F20\", dir=\"o\"), Attrs(IO_TYPE=\"LVCMOS25\")),\n\t\t\tSubsignal(\"mdc\", Pins(\"G19\", dir=\"o\"), Attrs(IO_TYPE=\"LVCMOS25\")),\n\t\t\tSubsignal(\"mdio\", Pins(\"H20\", dir=\"io\"), Attrs(IO_TYPE=\"LVCMOS25\")),\n\t\t\tSubsignal(\"tx\", DiffPairs(\"W17\", \"W18\", dir=\"o\")),\n\t\t\tSubsignal(\"rx\", DiffPairs(\"Y16\", \"Y17\", dir=\"i\")),\n\t\t),\n\n\t\tResource(\"ddr3\", 0,\n\t\t\tSubsignal(\"rst\", PinsN(\"N4\", dir=\"o\")),\n\t\t\tSubsignal(\"clk\", DiffPairs(\"M4\", \"N5\", dir=\"o\"), Attrs(IO_TYPE=\"LVDS\")),\n\t\t\tSubsignal(\"clk_en\", Pins(\"N2\", dir=\"o\")),\n\t\t\tSubsignal(\"cs\", PinsN(\"K1\", dir=\"o\")),\n\t\t\tSubsignal(\"we\", PinsN(\"M1\", dir=\"o\")),\n\t\t\tSubsignal(\"ras\", PinsN(\"P1\", dir=\"o\")),\n\t\t\tSubsignal(\"cas\", PinsN(\"L1\", dir=\"o\")),\n\t\t\tSubsignal(\"a\", Pins(\"P2 C4 E5 F5 B3 F4 B5 E4 C5 E3 D5 B4 C3\", dir=\"o\")),\n\t\t\tSubsignal(\"ba\", Pins(\"P5 N3 M3\", dir=\"o\")),\n\t\t\tSubsignal(\"dqs\", DiffPairs(\"K2 H4\", \"J1 G5\", dir=\"io\"), Attrs(IO_TYPE=\"LVDS\")),\n\t\t\tSubsignal(\"dq\", Pins(\"L5 F1 K4 G1 L4 H1 G2 J3 D1 C1 E2 C2 F3 A2 E1 B1\", dir=\"io\")),\n\t\t\tSubsignal(\"dm\", Pins(\"J4 H5\", dir=\"o\")),\n\t\t\tSubsignal(\"odt\", Pins(\"L2\", dir=\"o\")),\n\t\t\tAttrs(IO_TYPE=\"LVCMOS15\")\n\t\t)\n\t]\n\tconnectors = [\n\t\tConnector(\"expcon\", 1, \"\"\"\n\t\t- - - B19 B12 B9 E6 D6 E7 D7 B11 B6 E9 D9 B8 C8 D8 E8 C7 C6\n\t\t- - - - - - - - - - - - - - - - - - - -\n\t\t\"\"\"), # X3\n\t\tConnector(\"expcon\", 2, \"\"\"\n\t\tA8 - A12 A13 B13 C13 D13 E13 A14 C14 D14 E14 D11 C10 A9 B10 D12 E12 - -\n\t\tB15 - C15 - D15 - E15 A16 B16 - C16 D16 B17 - C17 A17 B18 A7 A18 -\n\t\t\"\"\"), # X4\n\t]\n\n\t@property\n\tdef file_templates(self):\n\t\treturn {\n\t\t\t**super().file_templates,\n\t\t\t\"{{name}}-openocd.cfg\": r\"\"\"\n\t\t\tinterface ftdi\n\t\t\t{# FTDI descriptors is identical between non-5G and 5G recent Versa boards #}\n\t\t\tftdi_vid_pid 0x0403 0x6010\n\t\t\tftdi_channel 0\n\t\t\tftdi_layout_init 0xfff8 0xfffb\n\t\t\treset_config none\n\t\t\tadapter_khz 25000\n\t\t\t# ispCLOCK device (unusable with openocd and must be bypassed)\n\t\t\t#jtag newtap ispclock tap -irlen 8 -expected-id 0x00191043\n\t\t\t# ECP5 device\n\t\t\t{% if \"5G\" in platform.device -%}\n\t\t\tjtag newtap ecp5 tap -irlen 8 -expected-id 0x81112043 ; # LFE5UM5G-45F\n\t\t\t{% else -%}\n\t\t\tjtag newtap ecp5 tap -irlen 8 -expected-id 0x01112043 ; # LFE5UM-45F\n\t\t\t{% endif %}\n\t\t\t\"\"\"\n\t\t}\n\n\tdef toolchain_program(self, products, name):\n\t\topenocd = os.environ.get(\"OPENOCD\", \"openocd\")\n\t\twith products.extract(\"{}-openocd.cfg\".format(name), \"{}.svf\".format(name)) \\\n\t\t\tas (config_filename, vector_filename):\n\t\t\tsubprocess.check_call([openocd,\n\t\t\t\t\"-f\", config_filename,\n\t\t\t\t\"-c\", \"transport select jtag; init; svf -quiet {}; exit\".format(vector_filename)\n\t\t\t])\n\nif __name__ == \"__main__\":\n\t\"\"\"\n\tSimulation\n\t\"\"\"\n\tm = Module()\n\tm.submodules.txuart = txuart = TXUART()\n\n\tsim = Simulator(m)\n\n\tmsg = \"Hello World!\\n\"\n\n\tdef process():\n\t\tfor i in range(25):\n\t\t\tyield\n\t\tfor c in msg:\n\t\t\tbyte = ord(c)\n\t\t\tyield txuart.i_wr.eq(1)\n\t\t\tyield txuart.i_data.eq(byte)\n\t\t\tyield\n\t\t\tyield txuart.i_wr.eq(0)\n\t\t\tyield txuart.i_data.eq(0)\n\t\t\tfor i in range(50):\n\t\t\t\tyield\n\n\tsim.add_clock(1e-8)\n\tsim.add_sync_process(process)\n\n\twith sim.write_vcd('txuart.vcd', 'txuart.gtkw', traces=txuart.ports()):\n\t\tsim.run()\n\n\t\"\"\"\n\tFormal Verification\n\t\"\"\"\n\tclass TXUARTTest(FHDLTestCase):\n\t\tdef test_txuart(self):\n\t\t\t# Yes, under our current assumptions on how long the transmitter can stay idle\n\t\t\t# before it receives its next i_wr, it requires at least 66 steps to pass\n\t\t\t# induction ;-)\n\t\t\tself.assertFormal(TXUART(), mode='prove', depth=66)\n\tTXUARTTest().test_txuart()","sub_path":"fv-beginner/ex-10-fifo/txuart.py","file_name":"txuart.py","file_ext":"py","file_size_in_byte":16923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"289251571","text":"import numpy as np\nimport sunpy.map\nfrom sunpy import sun\n\nimport astropy.time\nimport astropy.units as u\n\n\nimport logging\n\n\ndef make_sunpy(evtdata, hdr):\n\t\"\"\" Make a sunpy map based on the NuSTAR data.\n\t\n\tParameters\n\t----------\n evtdata: FITS data structure\n\t\tThis should be an hdu.data structure from a NuSTAR FITS file.\n\n\thdr: FITS header containing the astrometric information\n\t\n\tReturns\n -------\n\n\tnustar_map:\n\t\tA sunpy map objecct\n\t\n\t\"\"\"\n\n\t# Parse Header keywords\n\tfor field in hdr.keys():\n\t\tif field.find('TYPE') != -1:\n\t\t\tif hdr[field] == 'X':\n\t\t\t\tprint(hdr[field][5:8])\n\t\t\t\txval = field[5:8]\n\t\t\tif hdr[field] == 'Y':\n\t\t\t\tprint(hdr[field][5:8])\n\t\t\t\tyval = field[5:8]\n\t\t\n\tmin_x= hdr['TLMIN'+xval]\n\tmin_y= hdr['TLMIN'+yval]\n\tmax_x= hdr['TLMAX'+xval]\n\tmax_y= hdr['TLMAX'+yval]\n\n\tdelx = hdr['TCDLT'+xval]\n\tdely = hdr['TCDLT'+yval]\n\n\tx = evtdata['X'][:]\n\ty = evtdata['Y'][:]\n\tmet = evtdata['TIME'][:]*u.s\n\tmjdref=hdr['MJDREFI']\n\tmid_obs_time = astropy.time.Time(mjdref*u.d+met.mean(), format = 'mjd')\n\n\t# Use the native binning for now\n\n\t# Assume X and Y are the same size\n\tresample = 1.0\n\tscale = delx * resample\n\tbins = (max_x - min_x) / (resample)\n\n\tH, yedges, xedges = np.histogram2d(y, x, bins=bins, range = [[min_y,max_y], [min_x, max_x]])\n\n\n\tdict_header = {\n\t\"DATE-OBS\": mid_obs_time.iso,\n\t\"CDELT1\": scale,\n\t\"NAXIS1\": bins,\n\t\"CRVAL1\": 0.,\n\t\"CRPIX1\": bins*0.5,\n\t\"CUNIT1\": \"arcsec\",\n\t\"CTYPE1\": \"HPLN-TAN\",\n\t\"CDELT2\": scale,\n\t\"NAXIS2\": bins,\n\t\"CRVAL2\": 0.,\n\t\"CRPIX2\": bins*0.5 + 0.5,\n\t\"CUNIT2\": \"arcsec\",\n\t\"CTYPE2\": \"HPLT-TAN\",\n\t\"HGLT_OBS\": 0,\n\t\"HGLN_OBS\": 0,\n\t\"RSUN_OBS\": sun.solar_semidiameter_angular_size(mid_obs_time).value,\n\t\"RSUN_REF\": sun.constants.radius.value,\n\t\"DSUN_OBS\": sun.sunearth_distance(mid_obs_time).value\n\t}\n\t# For some reason the DSUN_OBS crashed the save...\n\n\theader = sunpy.map.MapMeta(dict_header)\n\n\tnustar_map = sunpy.map.Map(H, header)\n\t\n\treturn nustar_map\n\n\n","sub_path":"nustar_pysolar/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"403969802","text":"from getinput import get_input\nimport itertools\nimport textwrap\n\n\ndef parse_input(s):\n return list(int(line) for line in s.splitlines(keepends=False))\n\n\ndef part_1(input_str):\n changes = parse_input(input_str)\n return sum(changes)\n\n\ndef part_2(input_str):\n changes = parse_input(input_str)\n freqs = set()\n freq = 0\n cursor = 0\n while freq not in freqs:\n freqs.add(freq)\n freq += changes[cursor]\n cursor = (cursor + 1) % len(changes)\n return freq\n\n\ndef test_input():\n return textwrap.dedent(\"\"\"\\\n \"\"\")\n\n\ndef main():\n input_str = get_input(1)\n print('Part 1:', part_1(input_str))\n print('Part 2:', part_2(input_str))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"2018/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"593730963","text":"from django.shortcuts import render\nfrom django.urls import path, re_path\nfrom rest_framework import routers\nfrom . import views\n\nrouter = routers.DefaultRouter()\n\nrouter.register(r'get-all-courses', views.GetAllCoursesView, basename='get-all-courses')\nrouter.register(r'get-courses', views.CourseView, basename='Courses')\nrouter.register(r'get-lessons', views.LessonView, basename='lessons')\nrouter.register(r'get-questions', views.QuestionView, basename='questions')\nrouter.register(r'get-answers', views.AnswerView, basename='answers')\n\nurlpatterns = [\n path('courses', views.CrudCourseView.as_view()),\n re_path(r'^courses/(?P[0-9]+)/$', views.CrudCourseView.as_view()),\n path('lessons', views.CrudLessonView.as_view()),\n re_path(r'^lessons/(?P[0-9]+)/$', views.CrudLessonView.as_view()),\n path('questions', views.CrudQuestionView.as_view()),\n re_path(r'questions/(?P[0-9]+)/$', views.CrudQuestionView.as_view()),\n path('answers', views.CrudAnswerView.as_view()),\n re_path(r'answers/(?P[0-9]+)/$', views.CrudAnswerView.as_view()),\n path('available-courses', views.GetAvailableCourses.as_view()),\n path('available-lessons', views.GetAvailableLesson.as_view()),\n]\n\nurlpatterns += router.urls","sub_path":"apps/courses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"170613370","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.urls import reverse\nfrom django.http import HttpResponse\nfrom .models import Feedback\nfrom .forms import FeedbackForm\n\n# Create your views here.\n\n\ndef home(request):\n context = {\n 'categories': Feedback.CATEGORIES,\n }\n return render(request, 'feedback/feedback.html', context)\n\n\ndef feedback_form(request):\n form = FeedbackForm({'status': '未处理'})\n if request.method == 'POST':\n form = FeedbackForm(request.POST, request.FILES)\n if form.is_valid():\n # data = dict(form.cleaned_data)\n # feedback = Feedback(**form.cleaned_data)\n # feedback.status = '待处理'\n # feedback.save()\n if request.FILES['screenshot']:\n upload_file(request.FILES['screenshot'])\n\n return redirect('/')\n # return HttpResponse('数据验证失败')\n return render(request, 'feedback/feedback-form.html', {'form':form})\n\n\ndef get_feedback_data(request):\n if request.method == 'GET':\n data = dict(request.GET)\n return HttpResponse(str(data))\n\n\n#写入文件到本地硬盘\ndef upload_file(f):\n with open('uploads\\{}'.format(f.name), 'wb+')as file:\n for chunk in f.chunks():\n file.write(chunk)\n\n\ndef feedback_list(request):\n \"\"\"信息列表页\"\"\"\n if 'admin' in request.session:\n q = request.GET.get('q', '')\n items = Feedback.objects.filter(subject__contains=q).order_by('-posted_time')\n return render(request, 'feedback/feedback-list.html', {'items':items})\n else:\n return redirect(reverse('feedback:login'))\n\n\ndef feedback_editor(request,fid):\n # feedback = Feedback.objects.get(pk=fid)\n feedback = get_object_or_404(Feedback, pk=fid)\n return render(request, 'feedback/feedback-editor.html', {'item': feedback})\n\n\ndef feedback_edit(request, fid):\n fb = get_object_or_404(Feedback, pk=fid)\n form = FeedbackForm(initial={\n 'subject': fb.subject,\n 'category': fb.category,\n 'username': fb.username,\n 'email': fb.email,\n 'description': fb.description,\n 'subscription': fb.subscription,\n 'status': fb.status,\n 'posted_time': fb.posted_time,\n })\n if request.method == 'POST':\n form = FeedbackForm(request.POST, request.FILES)\n if form.is_valid():\n for k, v in form.cleaned_data.items():\n setattr(fb, k, v)\n fb.save()\n return redirect(reverse('feedback:feedback_list'))\n return render(request, 'feedback/feedback-edit.html', {'form':form})\n\n\ndef feedback_delete(request,fid):\n fd = get_object_or_404(Feedback, pk=fid)\n fd.delete()\n return redirect(reverse('feedback:feedback_list'))\n\n\ndef login(request):\n if request.method == 'POST':\n username = request.POST.get('username', None)\n pwd = request.POST.get('pwd', None)\n if username == 'Tom' and pwd =='123456':\n request.session['admin'] = username\n return redirect(reverse('feedback:feedback_list'))\n else:\n return redirect(reverse('feedback:login'))\n return render(request, 'feedback/login.html')\n\n\ndef logout(request):\n if 'admin' in request.session:\n request.session.flush()\n return redirect(reverse('feedback:login'))\n\n\n","sub_path":"Django_moudle_sample/tutorial/feedback/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"161590114","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nfrom pprint import pprint\r\n\r\nclass Pixiv():\r\n\r\n def __init__(self, search, page):\r\n self.search = search\r\n self.page = page\r\n self.result = set()\r\n self.headers = {\r\n 'X-Requested-With': 'XMLHttpRequest',\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '\r\n 'Chrome/56.0.2924.87 Safari/537.36'}\r\n\r\n @property\r\n def cookies(self):\r\n with open(\"cook.txt\", 'r') as f:\r\n _cookies = {}\r\n for row in f.read().split(';'):\r\n k, v = row.strip().split('=', 1)\r\n _cookies[k] = v\r\n print(\"success?\"+'\\n')\r\n return _cookies\r\n\r\n def run(self):\r\n fmt = 'https://www.pixiv.net/search.php?word={}&order=date_d&p={}'\r\n urls = [fmt.format(self.search, p) for p in range(1, self.page)]\r\n total = 1\r\n print(\"Inside the run function\"+'\\n')\r\n for url in urls:\r\n print(url+ \"\\n\")\r\n req = requests.get(url, headers=self.headers, cookies=self.cookies).text\r\n print(url+\"after request\"+\"\\n\")\r\n #print(req)\r\n bs = BeautifulSoup(req, 'lxml').find('ul', class_=\"_image-items\")\r\n bs2 = BeautifulSoup(req, 'lxml').find('body')\r\n \r\n #print(bs.prettify())\r\n \r\n for b in bs.find_all('li', class_=\"image-item\"):\r\n #print(b.prettify())\r\n try:\r\n href = b.find('a',href=True)\r\n tit = b.find('h1')\r\n\r\n href2 = \"https://www.pixiv.net\"+href['href']\r\n \r\n print (href2)\r\n print (tit['title'])\r\n\r\n \r\n #print(\"THIS IS THE END?\" + href.prettify())\r\n star = b.find('ul', class_=\"count-list\").find('li').find('a').text\r\n self.result.add((\"https://www.pixiv.net{}\".format(href), int(star)))\r\n #print(total)\r\n total += 1\r\n except:\r\n pass\r\n # pprint(sorted(self.result, key=lambda v: v[1], reverse=True)) # 按star数降序排序\r\n\r\nif __name__ == \"__main__\":\r\n print('Pixiv Crawler - get artwork links that are only accessible by Pixiv Premium user')\r\n spider = Pixiv(\"link\", 6)\r\n spider.run()\r\n","sub_path":"pixiv_premium.py","file_name":"pixiv_premium.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"304113737","text":"#!/usr/bin/python \n# -*- coding: utf-8 -*-f\nimport ConfigParser\n\nimport cgi, cgitb\nimport Cookie, os, time \n\ncgitb.enable()\nform=cgi.FieldStorage()\n\nconfig= ConfigParser.RawConfigParser()\n\n# Uncomment add if you create new section or uncomment read when you want to update\nconfig.add_section('NumDevice')\nconfig.add_section('Device1')\n\n#config.read(r'setting/devices.ini')\n\nconfig.set('NumDevice','amount','4')\n\nconfig.set('Device1','name','Temp.')\nconfig.set('Device1','api-key','ABCDEF')\nconfig.set('Device1','alert_type','max')\nconfig.set('Device1','decision_point','50')\nconfig.set('Device1','status','on')\n\n# use 'ab' mode for append , use 'wb' mode for renew\nwith open(r'setting/devices.ini', 'wb') as configfile:\n config.write(configfile)\n","sub_path":"configDevices.py","file_name":"configDevices.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"279043390","text":"import re\n\n'''Matching Multiple Groups with the Pipe\nThis will return either Batman or Tina Fey and the first occurrence in the matching text will be returned as the matching object'''\nheroRegex = re.compile(r'Batman|Tina Fey.')\nmo1 = heroRegex.search('Batman and Tina Fey')\nprint(mo1.group()) # returns Batman the first occurrence\n\nmo2 = heroRegex.search('Tina Fey and Batman')\nprint(mo2.group()) # returns Tina Fey the first occurence\n\nbatRegex = re.compile(r'Bat(man|mobile|copter|bat)')\nmo = batRegex.search('Batmobile lost a wheel')\nprint(mo.group())\nprint(mo.group(1))\n\n\n# the following lines does optional matching\nbatRegex = re.compile(r'Bat(wo)?man')\nmo1 = batRegex.search('The Adventures of Batman')\nprint(mo1.group())\n\nmo2 = batRegex.search('The Adventures of Batwoman')\nprint(mo2.group())\n\n'''letting the regex look for phone numbers that do or do not have area code by making the area code optional\nthe '#' actually matches nothing or one of the group preceeding this question mark '''\n\nphoneNumbRegex = re.compile(r'(\\d\\d\\d-)?\\d\\d\\d-\\d\\d\\d\\d\\d')\nmo1 = phoneNumbRegex.search('My phone number is 234-602-06819')\nprint(mo1.group())\n\nmo2 = phoneNumbRegex.search('My phone number is 602-06819')\nprint(mo2.group())\n\n'''Matching zero or more with asterisk'''\n\nbatRegex = re.compile(r'Bat(wo)*man')\nmo1 = batRegex.search('The Adventures of Batman')\nprint(mo1.group())\n\nmo2 = batRegex.search('The Adventure of Batwoman')\nprint(mo2.group())\n\nmo3 = batRegex.search('The Adventures of Batwowowowoman')\nprint(mo3.group())\n\n'''Matching one or more with plus '+' '''\nbatRegex = re.compile(r'Bat(wo)+man')\nmo1 = batRegex.search('The Adventures of Batwoman')\nprint(mo1.group())\n\nmo2 = batRegex.search('The Adventures of Batwowowowoman')\nprint(mo2.group())\n\n'''mo3 = batRegex.search('The Adventures of Batman')\nprint(mo3.group())\nThe above statement will not match because atleast one word of wo must be in our definition'''\n\n'''Matching Specific Repetitions with Curly Brackets'''\nhaRegex = re.compile(r'(Ha){0,3}') # specifying the bounds\nmo1 = haRegex.search('HaHaHa')\nprint(mo1.group())\n\n'''Writing somethng like what am about to write below will not work, this should give an attribute error since Ha is to be matched atleast trice'''\nmo2 = haRegex.search('Ha')\nprint(mo2.group())\n\n'''Greedy and NonGreedy Match'''\ngreedyHaRegex = re.compile(r'(Ha){3,5}')\nmo1 = greedyHaRegex.search('HaHaHaHaHa')\nprint(mo1.group())\n\nnonGreedyHaRegex = re.compile(r'(Ha){3,5}?') # following the curly bracket with a question mark does the nongreedy match\nmo2 = nonGreedyHaRegex.search('HaHaHaHaHa')\nprint(mo2.group())\n\n# Never forget that the question mark in regular expression could mean a nongreedy match or an optional matchd\n\n'''The findall() method do not just return the first match text but returns a list of every string that matches our definition'''\n\nphoneNumbRegex = re.compile(r'\\D\\d\\d\\d-\\d\\d\\d-\\d\\d\\d\\d\\d') # has no groups\nmo1 = phoneNumbRegex.search('Cell:+234-602-06819 Work: +234-178-59562')\nprint(mo1.group())\n\nmo2 = phoneNumbRegex.findall('Cell:+234-602-06819 Work: +234-178-59562')\nprint(mo2) # As long as there are no groups in the regular expression\n\nphoneNumbRegex = re.compile(r'(\\D\\d\\d\\d)-(\\d\\d\\d)-(\\d\\d\\d\\d\\d)') # has groups\nmo3 = phoneNumbRegex.findall('Cell:+234-602-06819 Work: +234-178-59562') #returns list of tuples\nprint(mo3)\n\n# Character Classes\nxmasRegex = re.compile(r'\\d+\\s\\w+')\n''' The findall() method returns all matching strings of the regex pattern in a list.'''\nmo1 = xmasRegex.findall('12 drummer, 11 pipers, 10 lords, 9 ladies, 8 maids, 7 swans, 6 geese, 5 rings, 4 birds, 3 hens, 2 doves, 1 partridge')\nprint(mo1)\n\n# Making Your Own Character Classes\nvowelRegex = re.compile(r'[aeiouAEIOU]') # defining your own characters using square brackets\nmo1 = vowelRegex.findall('Robocop eats baby food. BABY FOOD.') # will match any vowel both upper case and lower case\nprint(mo1)\n\nconsonantRegex = re.compile(r'[^aeiouAEIOU]') # Placing ^ will make a negative character class\nmo2 = consonantRegex.findall('Robocop eats baby food. BABY FOOD') # matching every character that is not a vowel\nprint(mo2)\n\n#The Caret and Dollar Sign Character\nbeginsWithHello = re.compile(r'^Hello')\nmo1 = beginsWithHello.search('Hello World!')\nprint(mo1)\nmo2 = beginsWithHello.search('He said hello.')\nprint(mo2) # the text does not begin with a Hello, so it returns none\n\nendsWithHello = re.compile(r'\\d$') # matches string that ends with numeric character 0-9\nmo1 = endsWithHello.search('My age is 23')\nprint(mo1)\nmo2 = endsWithHello.search('My age is twenty three')\nprint(mo2) # since the string is not ending with a number it returns none\n\nwholeStringIsNum = re.compile(r'^\\d+$')\nmo1 = wholeStringIsNum.search('1234567890')\nprint(mo1)\n\nmo2 = wholeStringIsNum.search('1234xyz567890')\nprint(mo2)\n\n# The Wildcard Character\n'''The wildcard character will match any character except for a newline and note that it will only match one character'''\natRegex = re.compile(r'.at')\nmo1 = atRegex.findall('The cat in the hat sat on the flat mat')\nprint(mo1)\n\n# Matching Everything with Dot-Star\nnameRegex = re.compile(r'First Name: (.*) Last Name: (.*)') # note that dot-star is greedy, will always want to match as much as possible\nmo = nameRegex.search('First Name: Ibrahim Last Name: Lawal')\nprint(mo.group(1))\nprint(mo.group(2))\nprint(mo.group())\n\n# For the nongreedy mode:\nnonGreedyRegex = re.compile(r'<.*?>')\nmo = nonGreedyRegex.search(' for dinner.>') # matches the shortest possible strings\nprint(mo.group())\n\ngreedyRegex = re.compile(r'<.*>')\nmo = greedyRegex.search(' for dinner.>') # matches the longest possible strings\nprint(mo.group())\n\n# Making Newlines with the Dot Character\n'''The dot-star will match everything except newline. By passing re.DOTALL to the re.compile you match everything including newline too'''\nnoNewLineRegex = re.compile(r'.*')\nnonGreedyHaRegex.search('Serve the public trust.\\nProtect the innocent.\\nUphold the law.').group()\n\nnewLineRegex = re.compile('.*', re.DOTALL)\nnewLineRegex.search('Serve the public trust.\\nProtect the innocent.\\nUphold the law.').group()\n\n# case insensitive matching, regular expresssions matches texts with the case specified\nregex1 = re.compile('ROBOCOP')\nregex2 = re.compile('robocop')\nregex3 = re.compile('ROBOcop')\n\n# to make it case insensitive you can pass in re.IGNORE or re.I as second arguement to the re.compile\n\nrobocop = re.compile(r'robocop')\nrobocop.search('RoboCop is part man, part machine')","sub_path":"chapter7/regexPipe.py","file_name":"regexPipe.py","file_ext":"py","file_size_in_byte":6521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"354986488","text":"import QuantLib as ql\nimport pandas as pd\nimport sys\nsys.path.append('D:\\\\programs\\\\多因子策略开发\\\\单因子研究')\nsys.path.append('D:\\\\programs\\\\多因子策略开发\\\\掘金多因子开发测试\\\\工具')\n# 引入工具函数和学习器\nfrom utils import get_factor_from_wind_v2, get_trading_date_from_now\n\n\nclass MasterStratery(object):\n def __init__(self, code_list, date):\n self.code_list = code_list # 选股的股票代码\n self.date = date # 选股的日期\n\n def _get_data(self):\n df = None # 实现具体选股用到的数据\n return df\n\n def select_code(self):\n df = self._get_data()\n code_list = None # 根据选股用到的数据具体选股\n return code_list\n\n\nclass 彼得_林奇基层调查选股策略说明(MasterStratery):\n '''选股条件:\n1.公司的资产负债率小于等于 25%;\n2.公司每股净现金大于 0;\n3.当前股价与每股自由现金流量比小于10;\n4.公司的存货成长率小于其营收增长率;\n5.(长期盈余成长率+股息率)/市盈率大于等于 2;'''\n def _get_data(self):\n from single_factor import DebetToAsset, CFPS, MarketValueToFreeCashFlow, NetProfitGrowRateV2, DividendYield, PE, InventoryTurnRatio\n factor_list = [DebetToAsset, CFPS, MarketValueToFreeCashFlow, NetProfitGrowRateV2, DividendYield, PE]\n factor_InvTurn_now = [InventoryTurnRatio]\n factor_InvTurn_one_year = [InventoryTurnRatio]\n date_one_year = get_trading_date_from_now(self.date, -1, ql.Years)\n df = get_factor_from_wind_v2(self.code_list, factor_list, self.date)\n # 存货增长率与营收增长率的比较判断数据,使用存货周转率判断\n df_invturn_now = get_factor_from_wind_v2(self.code_list, factor_InvTurn_now, self.date)\n df_invturn_now.rename(columns={'存货周转率': '存货周转率_今年'}, inplace=True)\n df_invturn_one_year = get_factor_from_wind_v2(self.code_list, factor_InvTurn_one_year, date_one_year)\n df_invturn_one_year.rename(columns={'存货周转率': '存货周转率_去年'}, inplace=True)\n df = pd.concat([df, df_invturn_now, df_invturn_one_year], axis=1)\n df = df.dropna()\n return df\n\n def select_code(self):\n df = self._get_data()\n df = df[df['资产负债率'] < 0.25]\n df = df[df['每股现金流CFPS'] > 0.0]\n df = df[df['市值/企业自由现金流'] < 10.0]\n df = df[((df['净利润增长率'] + df['股息率指标'])/df['市盈率PE']) >= 2.0]\n df = df[df['存货周转率_今年'] > df['存货周转率_去年']]\n code_list = list(df.index.values)\n return code_list\n\n\nclass 史蒂夫路佛价值选股法(MasterStratery):\n '''选股条件:\n1.市净率低于全市场平均值。\n2.以五年平均盈余计算的PE 低于全市场平均值。\n3.股息收益率不低于全市场平均值。\n4.股价现金流量比低于全市场平均值。\n5.长期借款占总资本比率低于50%'''\n def _get_data(self):\n from single_factor import PB, PE, DividendYield, PriceFreeCashFlowPerShare, LongTermLiabilityToWorkCapital\n factor_list = [PB, DividendYield, PriceFreeCashFlowPerShare, LongTermLiabilityToWorkCapital]\n df = get_factor_from_wind_v2(self.code_list, factor_list, self.date)\n # 五年PE值获取\n df_PE = []\n for i in range(5):\n date_temp = get_trading_date_from_now(self.date, -i, ql.Years)\n df_temp = get_factor_from_wind_v2(self.code_list, [PE], date_temp)\n df_temp.rename(columns={'市盈率PE': '市盈率_'+str(i)}, inplace=True)\n df_PE.append(df_temp)\n df = pd.concat([df]+df_PE, axis=1)\n df = df.dropna()\n return df\n\n def select_code(self):\n df = self._get_data()\n PE_median = (df['市盈率_0'] + df['市盈率_1'] + df['市盈率_2'] + df['市盈率_3'] + df['市盈率_4']).median()\n df = df[(df['市盈率_0'] + df['市盈率_1'] + df['市盈率_2'] + df['市盈率_3'] + df['市盈率_4']) < PE_median]\n df = df[df['PB市净率指标'] < df['PB市净率指标'].median()]\n df = df[df['股息率指标'] >= df['股息率指标'].median()]\n df = df[df['股价_每股企业自由现金流'] < df['股价_每股企业自由现金流'].median()]\n df = df[df['长期负债/营运资金'] < 0.5]\n code_list = list(df.index.values)\n return code_list","sub_path":"掘金多因子开发测试/大师选股策略/master_strategy.py","file_name":"master_strategy.py","file_ext":"py","file_size_in_byte":4475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"624926743","text":"from math import log # Math.log(number) 如果 number 为正,则此函数返回该数字的自然对数。如果 number 为负,则函数返回 NaN。如果 number 为 0,则此功能返回 -∞\n\nimport operator # 本模块主要包括一些Python内部操作符对应的函数。这些函数主要分为几类:对象比较、逻辑比较、算术运算和序列操作\n\ndef calcShannonEnt(dataSet):\n #dataset 为list 并且里面每一个list的最后一个元素为label\n # 如[[1,1,'yes'],\n # [1,1,'yes'],\n # [1,0,'no'],\n # [0,0,'no'],\n # [0,1,'no']]\n \n # 获得list的长度 即实例总数\n numEntried = len(dataSet)\n \n # 创建一个字典,来存储数据集合中不同label的数量 如 dataset包含3 个‘yes’ 2个‘no’ (用键-值对来存储)\n labelCounts = {}\n\n # 对上面数据集的每一个样本进行for遍历\n for featVec in dataSet:\n \n #获得list里面每一个子list的最后一个位置的内容,也就是每个子list的label值\n currentLabel = featVec[-1]\n\n # 如果当前标签在字典键值中不存在\n if currentLabel not in labelCounts.keys():labelCounts[currentLabel] = 0\n #若已经存在 该键所对应的值加1\n labelCounts[currentLabel] += 1\n # 初值熵值为0.0\n ShannonEnt = 0.0\n\n #对于每一个label\n for key in labelCounts:\n # 概率probability,也就是这个分类出现的次数除以总共的分类数量\n prob = float(labelCounts[key])/numEntried\n # -= 减法赋值运算符 c -= a 等效于 c = c - a\n ShannonEnt -= prob * log(prob,2)\n return ShannonEnt\n\n\n############# 利用createDataSet()简单鉴定鱼数据集#################\ndef createDataSet():\n dataSet = [[1, 1, 'yes'],\n [1, 1, 'yes'],\n [1, 0, 'no'],\n [0, 1, 'no'],\n [0, 1, 'no']]\n labels = ['no surfacing','filippers']\n return dataSet, labels\n\n\n\n##############按照给定特征划分数据集##############################\n# 定义划分数据集函数\n#参数:待划分的数据集、划分数据集的列、划分数据集的列的对应值\ndef splitDataSet(dataSet, axis, value):\n #声明retDataSet 是一个列表\n retDataSet = []\n \n # 遍历每个子list\n for featVec in dataSet:\n # 如果第axis列的值是某个特征value\n if featVec[axis] == value:\n \n #featVec[:axis] 返回的是一个列表,其元素是featVec这个列表的索引从0到axis-1的元素\n # 也就是不包括axis这个索引上的值,若axis为0,则返回空列表\n reducedFeatVec = featVec[:axis]\n \n # 其中featVec[axis + 1: ]返回的是一个列表,其元素是featVec这个列表的索引从axis + 1开始的所有元素\n #featVec[:axis]和featVec[axis+1 :]组合起来了,就是要把axis这一列剔除掉,因为这一列是某个特征所在的列\n # 把抽取出该特征以后的所有特征组成一个列表\n reducedFeatVec.extend(featVec[axis+1 :])\n \n # 补充:方法extend和append的区别:\n #例子:\n #\n # >>>a = [1, 2, 4]\n #\n #>>>b = [5, 6, 7]\n #\n #>>>a.extend(b)\n #\n #[1, 2, 4, 5, 6, 7]\n #\n #>>>a = [1, 2, 4]\n #\n #>>>a.append(b)\n #\n #[1, 2, 4, [5, 6, 7]]\n \n # 创建抽取该特征以后的dataset\n retDataSet.append(reducedFeatVec)\n return retDataSet\n\n\n\n##################选择最好的数据集划分方式#############################################\n##################该函数实现选取特征,划分数据集,计算得出最好的划分数据集的特征##############\ndef chooseBestFeatureToSplit(dataSet):\n # 取出list中的第一个元素 再取长度-1 就为特征的个数\n # 比如a=[[1,1,'Yes'],[1,0,'No']] 那么a[0] = [1,1,'Yes'],然后len(a[0])=3,然后3-1=2,就是有两个特征\n numFeatures = len(dataSet[0]) - 1\n \n # 调用函数计算熵 Entropy(S),计算数据集中的原始香农熵\n baseEntropy = calcShannonEnt(dataSet)\n \n \n bestInfoGain = 0.0;bestFeature = -1\n # 为属性的索引值。由于从0开始。所以初始值设为-1\n \n \n for i in range(numFeatures):\n # 返回 dataset所有元素 中的 第1元素 并且为list\n featList = [example[i] for example in dataSet]\n \n # 在这里作用相当于matlab的 unique() 去除重复元素\n # python中的集合(set)数据类型,与列表类型相似,唯一不同的是 set类型中元素不可重复\n uniqueVals = set(featList)\n \n newEntropy = 0.0\n for value in uniqueVals:\n \n # 调用函数返回属性i下值为value的子集\n subDataSet = splitDataSet(dataSet, i, value)\n \n #计算每个类别的熵\n prob = len(subDataSet)/float(len(dataSet))\n newEntropy += prob * calcShannonEnt(subDataSet)\n \n # 求信息增益\n infoGain = baseEntropy - newEntropy\n if (infoGain > bestInfoGain):\n bestInfoGain = infoGain\n bestFeature = i\n # 返回分类能力最好的属性索引值\n return bestFeature\n\n\n\n#################多数表决的方法决定该叶子节点的分类####################################\n\ndef majorityCnt(classList):\n \n #声明 classCount是一个字典\n classCount = {}\n \n # 遍历字典的每一个元素\n for vote in classList:\n \n #如果这个元素(其实就是特征)不在字典的键里面,那么就是0,如果在,那么就加1\n if vote not in classCount.keys() : classCount[vote] = 0\n classCount[vote] += 1\n \n #classCount.items()将classCount字典分解为元组列表,operator.itemgetter(1)按照第二个元素的次序对元组进行排序,reverse=True是降序\n sortedClassCount = sorted(classCount.items(),key = operator.itemgetter(1), reverse=True)\n \n #找到次数最多的那个特征,把特征返回来\n return sortedClassCount[0][0]\n\n\n################程序清单3-4 创建树的函数代码,给节点做标注#################################################\n\n# #输入参数:数据集和标签列表\ndef createTree(dataSet,labels):\n \n # 取dataSet每个实例的最后一个元素,也即label,包含了所有类标签\n classList = [example[-1] for example in dataSet]\n \n # 类别完全相同则停止划分,取第一个就行了,第一个的个数等于所有的个数\n # 当计算在最后一列数据中与第一个值相同的元素个数与最后一列数据个数相同时,直接返回第一个元素值,意思是所有类标签都相同\n if classList.count(classList[0]) == len(classList):\n return classList[0] #当所有类都相等时停止分裂\n \n if len(dataSet[0]) == 1: #停止分裂时,没有更多的特征在数据集\n return majorityCnt(classList)\n \n # 返回最佳特征值划分的索引\n bestFeat = chooseBestFeatureToSplit(dataSet)\n # 得到最佳特征值索引的标签\n bestFeatLabel = labels[bestFeat]\n # 使用字典类型存储树的信息\n myTree = {bestFeatLabel:{}}\n \n # 从标签列表中删除最好特征值对应的那个标签\n del(labels[bestFeat])\n \n # 得到最佳特征值对应的数据集中的那一列数据组成列表\n featValues = [example[bestFeat] for example in dataSet]\n \n # 唯一化\n uniqueVals = set(featValues)\n \n # 遍历唯一化列表\n for value in uniqueVals:\n \n # 复制类标签,当函数��数是列表类型时,参数是按照引用方式传递的,保证每次调用函数时都不改变原始列表的内容,就是开一块新内存。\n subLabels = labels[:]\n \n # 等号前第一个中括号是指字典键值,键值可任意类型;第二个中括号是第一个键值延伸的嵌套的字典类型键值;在等号后,先把原数据集按特征值分开,然后递归调用该函数\n myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value), subLabels)\n #返回最终的字典信息\n return myTree\n\n\n\n################### 程序清单 3-8 使用决策树的分类函数 ##################################\ndef classify(inputTree, featLabels, testVec):\n firstStr = list(inputTree.keys())[0]\n secondDict = inputTree[firstStr]\n featIndex = featLabels.index(firstStr)\n for key in secondDict.keys():\n if testVec[featIndex] == key:\n if type(secondDict[key]).__name__ == 'dict':\n classLabel = classify(secondDict[key], featLabels, testVec)\n else:\n classLabel = secondDict[key]\n return classLabel\n\n\n############# 程序清单 3-9 使用pickle模块存储决策树 ######################################\ndef storeTree(inputTree, filename):\n import pickle\n fw = open(filename,'wb')\n pickle.dump(inputTree,fw)\n fw.close\n\ndef grabTree(filename) :\n import pickle\n fr = open(filename,'rb')\n return pickle.load(fr)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Decision Tree/trees.py","file_name":"trees.py","file_ext":"py","file_size_in_byte":9387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"97373436","text":"mode_of_travel = input(\"please tell me are you walking, biking or ride a car?\")\ndistance = float(input(\"please tell me the distance in km\"))\ntime_to_get_going = input(\"please tell me how long it takes to get going in minutes\")\ntime_to_find_parking = input(\"please tell me how long it takes to find a parking spot in minutes\")\n\nwalking_speed = 5 # in km/h\nbiking_speed = 15 # in km/h\ncar_speed = 60 # in km/h\n\nhour_time_to_get_going_int = int(time_to_get_going) / 60\nhour_time_to_find_parking_int = int(time_to_find_parking) / 60\n\nif mode_of_travel == \"walking\":\n time = distance / 5\nelif mode_of_travel == \"biking\":\n time = distance / 15\nelif mode_of_travel == \"ride a car\":\n time = distance / 60\n\nhours_to_get_somewhere = (time + hour_time_to_get_going_int + hour_time_to_find_parking_int)\nminutes_to_get_somewhere = (hours_to_get_somewhere % 1) * 60\n\nprint(\"hours to get somewhere\", hours_to_get_somewhere-(hours_to_get_somewhere % 1))\nprint(\"minutes to get somewhere\", minutes_to_get_somewhere)\n","sub_path":"week_2/time_to_get_somewhere.py","file_name":"time_to_get_somewhere.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"36055936","text":"# Time Complexity :O(Log(n))\n# Space Complexity :O(1)\n# Did this code successfully run on Leetcode : yes\n# Any problem you faced while coding this : forgot the = in line 16 once :'D\n\n\n# Your code here along with comments explaining your approach\n# Python code to implement iterative Binary \n# Search. \n \n# It returns location of x in given array arr \n# if present, else returns -1 \ndef binarySearch(arr, l, r, x): \n start = l\n end = r\n while start <= end:\n mid = (start+end)//2\n if arr[mid] == x:\n return mid\n elif arr[mid] > x:\n end = mid-1\n else:\n start = mid+1\n return -1\n \n \n \n# Test array \narr = [ 2, 3, 4, 10, 40 ] \nx = 2\n \n# Function call \nresult = binarySearch(arr, 0, len(arr)-1, x) \n \nif result != -1: \n print(\"Element is present at index % d\" % result )\nelse: \n print( \"Element is not present in array\" )\n","sub_path":"Exercise_1.py","file_name":"Exercise_1.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"270212831","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport logging\nimport re\nimport urllib2\nfrom OpenSSL import crypto\nfrom django.utils.datetime_safe import strftime\nfrom django.utils.translation import gettext_lazy as _\nfrom backend.dictionaries import knowledge\n\nlog = logging.getLogger(\"backend\")\n\nclass Scope:\n root = 0\n leaf = 1\n middle = 2\n each = 3\n\n class Static:\n def __call__(self, scope):\n if scope == Scope.root:\n return \"root\"\n if scope == Scope.leaf:\n return \"leaf\"\n if scope == Scope.middle:\n return \"middle\"\n if scope == Scope.each:\n return \"each\"\n return \"unknown\"\n\n __unicode__ = Static()\n\nclass Severity:\n none = 0\n notice = 1\n warning = 2\n error = 3\n\n class Static:\n def __call__(self, severity):\n if severity == Severity.none:\n return \"none\"\n if severity == Severity.notice:\n return \"notice\"\n if severity == Severity.warning:\n return \"warning\"\n if severity == Severity.error:\n return \"error\"\n return \"unknown\"\n\n get_name = Static()\n __unicode__ = Static()\n\nclass Validator:\n\n valid = True\n errors = None\n scope = None\n\n def __init__(self, scope):\n self.valid = True\n self.errors = list()\n self.scope = scope\n\n def validate(self, certificate):\n raise NotImplementedError(\"The method validate must be overridden\")\n\n def reset(self):\n self.errors = list()\n\n def add_error(self, error, severity = Severity.error, description = None):\n self.valid = False\n self.errors.append((severity, error, description))\n\n def get_errors(self):\n return self.errors\n\nclass IsNotInvalidated(Validator):\n\n def validate(self, certificate):\n log.debug(\"entered validate function in isNotInvalidated\")\n\n if certificate.issuer.valid == -1:\n self.add_error(_(\"Certificate issuer %(name)s is deprecated\") % {\"name\": certificate.issuer.name}, Severity.error)\n if certificate.size.valid == -1:\n self.add_error(_(\"Using certificate key length %(name)s is deprecated\") % {\"name\": certificate.size.name}, Severity.error)\n if certificate.type.valid == -1:\n self.add_error(_(\"Using certificate of type %(name)s is deprecated\") % {\"name\": certificate.type.name}, Severity.error)\n if certificate.algorithm.valid == -1:\n self.add_error(_(\"Using certificate algorithm %(name)s is deprecated\") % {\"name\": certificate.algorithm.name}, Severity.error)\n for usage in certificate.get_usages():\n if usage.valid == -1:\n self.add_error(_(\"Certificate usage %(name)s is deprecated\") % {\"name\": usage.name}, Severity.error)\n for crl in certificate.get_crls():\n if crl.valid == -1:\n self.add_error(_(\"Certificate CLR %(uri)s is deprecated\") % {\"uri\": crl.uri}, Severity.warning)\n return self.valid\n\nclass IsNotExpired(Validator):\n\n def validate(self, certificate):\n log.debug(\"entered validate function in IsNotExpired\")\n\n now = datetime.datetime.now()\n if (certificate.start - now) > datetime.timedelta(days=0):\n self.add_error(_(\"Certificate is not valid until %(date)s\") % {\"date\": strftime(certificate.start, \"%Y-%m-%d\")}, Severity.error)\n if (certificate.until - now) < datetime.timedelta(days=0):\n self.add_error(_(\"Certificate has expired\"), Severity.error)\n if (certificate.until - now) < datetime.timedelta(days=60):\n self.add_error(_(\"Certificate will expire %(date)s\") % {\"date\": strftime(certificate.until, \"%Y-%m-%d\")}, Severity.warning)\n return self.valid\n\nclass IsValidSignature(Validator) :\n\n def validate(self, certificate):\n log.debug(\"entered validate function in IsValidSignature\")\n\n # If this is a root certificate,\n # then we cannot verify any more\n if certificate.parent is None:\n # TODO: We can check if it is considered a trusted root certificate here\n return self.valid\n\n # Load current certificate as X509\n cert = crypto.load_certificate(crypto.FILETYPE_PEM, certificate.blob)\n # Load parent certificate as X509\n auth = crypto.load_certificate(crypto.FILETYPE_PEM, certificate.parent.blob)\n # Get certificate signature\n sign = cert.digest(cert.get_signature_algorithm())\n\n # Verify the signature\n try:\n crypto.verify(auth, sign, certificate.blob, cert.get_signature_algorithm())\n except AttributeError:\n self.add_error(_(\"Invalid certificate digital signature.\"), Severity.error)\n except crypto.Error:\n self.add_error(_(\"Invalid certificate digital signature.\"), Severity.error)\n\n return self.valid\n\nclass IsNotRevoked(Validator):\n\n def validate(self, certificate):\n log.debug(\"entered validate function in isNotRevoked\")\n for crl in certificate.get_crls():\n file = urllib2.urlopen(url=crl.uri, timeout=10)\n data = file.read()\n # TODO Validate the CRL against the issuer?\n list = crypto.load_crl(crypto.FILETYPE_ASN1, data)\n\n if list.get_revoked():\n for revoked in list.get_revoked(): # If get_reason() is None it is not the same as Unspecified!\n if revoked.get_serial():\n if certificate.serial == revoked.get_serial():\n reason = \"unknown\"\n date = datetime.datetime.strptime(revoked.get_rev_date(), \"%Y%m%d%H%M%SZ\")\n\n if revoked.get_reason() is not None:\n reason = unicode(revoked.get_reason())\n\n self.add_error(_(\"Certificate was revoked at %(date)s with reason \\\"%(reason)s\\\"\") % {\"date\": strftime(date, \"%Y-%m-%d\"), \"reason\": reason}, Severity.error, knowledge[\"revoked_certificates\"])\n\n return self.valid\n\nclass IsValidExpiration(Validator):\n\n def validate(self, certificate):\n log.debug(\"entered validate function in isValidExpiration\")\n leaf = current = certificate\n while True:\n log.debug(\"Valid until \" + unicode(current.until))\n parent = current.parent\n if parent is None:\n break\n if current.until > parent.until:\n if parent.until < leaf.until:\n self.add_error(_(\"%(parent)s expires on %(date)s invalidating %(leaf)s\") % {\"parent\": parent.subject.name, \"leaf\": leaf.subject.name, \"date\": strftime(parent.until, \"%Y-%m-%d\")}, Severity.warning, knowledge[\"shortened_expiration\"])\n else:\n self.add_error(_(\"%(parent)s expires before %(name)s\") % {\"parent\": parent.subject.name, \"name\": current.subject.name}, Severity.notice, knowledge[\"reversed_expiration\"])\n current = parent\n return self.valid\n\nclass IsValidSetup(Validator):\n\n def validate(self, certificate):\n # TODO Connect to server and test different cipherspecs...\n return self.valid\n\nclass IsNotSelfSigned(Validator):\n\n def validate(self, certificate):\n log.debug(\"entered validate function in IsNotSelfSigned\")\n\n if certificate.subject == certificate.issuer:\n self.add_error(_(\"The certificate %(name)s is self-signed\") % {\"name\": certificate.subject.name}, Severity.notice, knowledge[\"self_signed\"])\n\n return self.valid\n\nclass IsIssuedToCorrectDomain(Validator):\n\n def validate(self, certificate):\n log.debug(\"entered validate function in IsIssuedToCorrectDomain\")\n\n if not len(certificate.get_connections()) > 0:\n return self.valid\n\n valid = False\n connection = certificate.get_connections()[0]\n\n # First check common name\n if re.match(certificate.subject.name.replace(\".\", r\"\\.\").replace(\"*\", r\".*\") + \"$\", connection.hostname):\n valid = True\n\n # Then check alternative name extensions\n if not valid:\n for identity in certificate.get_identities():\n if re.match(identity.value.replace(\".\", r\"\\.\").replace(\"*\", r\".*\") + \"$\", connection.hostname):\n valid = True\n break\n\n if not valid:\n self.add_error(_(\"Certificate is not issued to the domain %(domain)s\") % {\"domain\": connection.hostname}, Severity.warning, knowledge[\"hostname_mismatch\"])\n\n return self.valid\n\nclass IsNotIncompleteChain(Validator):\n\n def validate(self, certificate):\n log.debug(\"entered validate function in IsNotIncompleteChain\")\n\n if not certificate.issuer == certificate.subject:\n self.add_error(_(\"Could not complete certificate chain\"), Severity.warning, knowledge[\"incomplete_chain\"])\n\n return self.valid","sub_path":"backend/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":9015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"233382038","text":"import os\nimport csv\nimport cv2\nimport itertools\nimport numpy as np\nimport sklearn\nimport sklearn.model_selection\n\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout\nfrom keras.layers.convolutional import Convolution2D\n\n# Constants\nMAX_ANGLE = 25.\nDIR_SEPARATOR = \"\\\\\"\nDATA_LOCATION = \"data/\"\nCH, ROW, COL = 3, 160, 320 # Original image format\n\n# Hyperparameters\ncrop_top, crop_down = 70, 26\nh = 20.\nbsize = 32\nepochs = 3\ncenter_only = False\nvalidation_share = 0.2\ndropout = 0.1\n\n# Find all csv files and IMG folders in subfolders inside DATA_LOCATION\ncsv_locations = []\nimage_locations = []\nfor root, dirs, files in os.walk(DATA_LOCATION):\n for f in files:\n if f.endswith(\".csv\"):\n csv_locations.append(os.path.join(root, f))\n image_locations.append(os.path.join(root, \"IMG/\"))\n\n# Read each line from all found csv files into a \"samples\" list\nsamples = []\nfor i in range(len(csv_locations)):\n with open(csv_locations[i]) as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n samples.append([image_locations[i], line])\n\nprint(\"Num Samples:\", len(samples))\n\n# Augmentation configuration\n# A list of all possible combinations of the lists \"samples\", \"flipped\" and\n# \"camera\" is created to be used during data generation and easy shuffling.\n# \"samples\" is overwritten and now contains the tuples of 3 items\ncamera = [-1, 0, 1] # right, center, left camera\nif center_only:\n camera = [0]\nflipped = [True, False]\nsamples = list(itertools.product(samples, camera, flipped))\nprint(\"Num Augmented Samples:\", len(samples))\n\n# Splitting the data into training and validation set\ntrain_samples, validation_samples = sklearn.model_selection.train_test_split(samples, test_size=validation_share)\nprint(\"Num Samples in Training Set:\", len(train_samples))\nprint(\"Num Samples in Validation Set:\", len(validation_samples))\n\ndef generator(samples, batch_size=32):\n \"\"\"\n Data generator used for sampling batches of size \"batch_size\" from the\n tuple configuration \"sample, camera, is_flipped\" where\n - sample is the line of the csv file from the car simulator\n - camera indicates which camera should be used from the data\n - is_flipped indicates if the image and steering angle should be flipped\n \"\"\"\n num_samples = len(samples)\n # Loop forever so the generator never terminates\n while 1:\n # shuffle the data at the beginning\n sklearn.utils.shuffle(samples)\n # take batches until the data set is exhausted\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n angles = []\n for batch_sample, camera, is_flipped in batch_samples:\n # extract the image location\n image_location = batch_sample[0]\n sample_data = batch_sample[1]\n name = image_location + sample_data[camera%3].split(DIR_SEPARATOR)[-1]\n # read image\n image = cv2.imread(name)\n # read steering angle (target)\n angle = float(sample_data[3])\n # depending of the camera, adjust the steering angle\n # (see writeup for more details)\n if camera != 0:\n alpha = np.radians(angle * MAX_ANGLE)\n beta = np.arctan(np.tan(alpha) + float(camera) / h)\n angle = np.degrees(beta) / MAX_ANGLE\n # flip image and steering angle\n if is_flipped:\n image = cv2.flip(image, 1)\n angle *= -1\n images.append(image)\n angles.append(angle)\n\n X_train = np.array(images)\n y_train = np.array(angles)\n yield X_train, y_train\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=bsize)\nvalidation_generator = generator(validation_samples, batch_size=bsize)\n\ndef normalize_image(x):\n \"\"\"\n Normalization layer for batches of images\n (Extra function so that saving and loading the model works properly)\n \"\"\"\n from keras.backend import tf as ktf\n return ktf.map_fn(lambda img: ktf.image.per_image_standardization(img), x)\n\n# Neural network model based on Nvidia's network\nmodel = Sequential()\n# Preprocess incoming data\n# Crop image to road area\nmodel.add(Cropping2D(cropping=((crop_top, crop_down), (0, 0)),\n input_shape=(ROW, COL, CH)))\n# Center around zero with small standard deviation\nmodel.add(Lambda(normalize_image))\n# Convolutional layers\nmodel.add(Convolution2D(24, 5, 5, subsample=(2, 2), activation='relu'))\nmodel.add(Convolution2D(36, 5, 5, subsample=(2, 2), activation='relu'))\nmodel.add(Convolution2D(48, 5, 5, subsample=(2, 2), activation='relu'))\nmodel.add(Convolution2D(64, 3, 3, activation='relu'))\nmodel.add(Convolution2D(64, 3, 3, activation='relu'))\nmodel.add(Flatten())\n# Fully connected layers with dropout\nmodel.add(Dense(100, activation='relu'))\nmodel.add(Dropout(dropout))\nmodel.add(Dense(50, activation='relu'))\nmodel.add(Dropout(dropout))\nmodel.add(Dense(10, activation='relu'))\nmodel.add(Dropout(dropout))\nmodel.add(Dense(1))\n\n# Train using adam optimizer\nmodel.compile(loss='mse', optimizer='adam')\nmodel.fit_generator(train_generator, samples_per_epoch=len(train_samples),\n validation_data=validation_generator,\n nb_val_samples=len(validation_samples), nb_epoch=epochs)\n# Save the model\nmodel.save('model.h5')\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"333131427","text":"__author__ = 'yihanjiang'\nfrom load_mnist import *\n\n\n\n\n\ndef main():\n # load MNIST data\n images, labels =load_mnist('training', digits=[0],\n path = \"/Users/yihanjiang/Desktop/Academic_knowledge/Sreeram_Lab_related/Sparse_Neuron_Network/code/mnist\")\n pl.imshow(images[0], cmap = pl.cm.gray)\n pl.show()\n\n # set parameters\n learning_rate = 0.01\n reg_lambda = 0.01\n\n # load model\n\n\n # train model\n\n #\n\n\nif __name__==\"__main__\":\n main()\n","sub_path":"train_mnist.py","file_name":"train_mnist.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"2706121","text":"import pickle\nimport sys\nimport numpy\nimport numpy as np\nimport os\n\nthre = 0.01\n \ndict = {\n 0: 'R color',\n 1: 'G color',\n 2: 'B color'\n}\n \n\npickle.dump( dict, open( \"dict.pickle\", \"wb\" ) )\n\ntest = pickle.load( open( \"dict.pickle\", \"rb\" ) )\n\nprint(test)\n\ndir = sys.argv[1]\nshadername = sys.argv[2]\n\nsave_trace_dict_file = os.path.join(dir, 'trace_dict_%s.pickle' % shadername)\n\nif len(sys.argv) > 3:\n dict_file = sys.argv[3]\n trace_dict = pickle.load( open( dict_file, \"rb\" ) )\nelse:\n if os.path.exists(save_trace_dict_file):\n trace_dict = pickle.load( open( save_trace_dict_file, \"rb\" ) )\n else:\n trace_dict = {}\n\nchannelwise_file = os.path.join(dir, 'encoder_channelwise_taylor_vals_%s.npy' % shadername)\nchannelwise_score = np.load(channelwise_file)\n\nassert channelwise_score.shape[0] == 48\n\nind_txt_file = os.path.join(dir, 'encoder_max_channelwise_ind_%s.txt' % shadername)\nind_strs = open(ind_txt_file).read().split('\\n')\n\n\n\ncount = 0\nprocessed = True\n\nsummary_str = ''\n\nfor line in ind_strs:\n if line.startswith('encoder channel '):\n current_ch = int(line[16:-1])\n processed = False\n assert current_ch == count\n count += 1\n else:\n if len(line):\n assert not processed\n \n processed = True\n \n summary_str += '\\nencoder channel %d:\\n' % current_ch\n \n inds = [int(val) for val in line.split(',')]\n \n current_score = np.sort(channelwise_score[current_ch])[::-1]\n \n for i in range(len(inds)):\n if current_score[i] < thre:\n break\n \n if not inds[i] in trace_dict.keys():\n print('We are scanning channel %d now!' % current_ch)\n print('need manual input for trace %d in shader %s' % (inds[i], shadername))\n ans = input()\n trace_dict[inds[i]] = ans\n pickle.dump( trace_dict, open( save_trace_dict_file, \"wb\" ) )\n \n summary_str += '%d %.3f (%s)\\n' % (inds[i], current_score[i], trace_dict[inds[i]])\n\nsummary_str = 'Looking for at most 20 feature with contribution score > %f\\n\\n' % thre + summary_str\nopen(os.path.join(dir, 'auto_generated_summary_%s.txt' % shadername), 'w').write(summary_str)\n \n \n ","sub_path":"CAN24_AN/generate_unified_channelwise_summary.py","file_name":"generate_unified_channelwise_summary.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"139771349","text":"a=int(input())\nfor k in range(0,a):\n a=input().split(' ')\n qi=int(a[0])\n zhong=int(a[1])\n result=[]\n for i in range(qi,zhong+1):\n iszi=1\n for j in range(2,i):\n if i%j==0:\n iszi=0\n break\n if iszi==1:\n result.append(i)\n if 1 in result:\n result.remove(1)\n print(\" \".join(str(i) for i in result),end=\" \")\n print()","sub_path":"Code/CodeRecords/2194/60776/297609.py","file_name":"297609.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"524911177","text":"import requests\nfrom requests import session\nimport html as html2\nfrom lxml import etree\nfrom lxml import html\nimport re\nimport xml.etree.ElementTree as ET\nfrom django.conf import settings\nfrom functions.my_database import Database\n\ndefault_url = settings.MES_DEFAULT_URL\ndefault_badge = settings.MES_DEFAULT_BADGE\ndefault_username = settings.MES_DEFAULT_USERNAME\n\n\nclass Page:\n def __init__(self,page):\n\n self.text = page.text\n\n self.p = html.fromstring(page.content)\n p = html.fromstring(page.content) \n try:\n self.title = p.find(\".//title\").text\n except:\n self.title = 'none'\n #print(self.title)\n try:\n self.action = default_url + p.xpath('.//form[@name=\"DefaultFormName\"]')[0].get('action')\n except:\n try:\n self.action = p.xpath(\"//meta[translate(@http-equiv, 'REFSH','refsh')='refresh']/@content\")[0].split(\";\")[1][5:]\n except:\n self.action = ''\n\n self.redirect = page.url\n\n self.params = {}\n for item in p.xpath('.//form[@name=\"DefaultFormName\"]/descendant::input'):\n self.params[item.name]= item.value or ''\n\n # add other params\n self.params['_FORM_SUBMIT_BUTTON'] = 'SubmitButton'\n self.params['Accessibility'] ='N'\n \n if self.title == 'MES Workstation':\n self.params['GlobalActionsList'] = '6'\n self.params['_FORM_SUBMIT_BUTTON'] = 'GlobalActionsGoBtn'\n\n if self.title == 'Search Jobs':\n self.params['SearchByChoice'] = '1'\n self.params['_FORM_SUBMIT_BUTTON'] = 'SearchJobsGoBtn'\n\n if self.title in ['Move and Complete Assembly: Transaction','Move Assembly: Transaction']:\n self.params['_FORM_SUBMIT_BUTTON'] = 'FinishButton'\n self.params['FromStep'] = '1'\n if 'OverCompletion' in self.params: del self.params['OverCompletion']\n\n \n self.header = {\n 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Encoding':'gzip, deflate',\n 'Accept-Language':'en-US,en;q=0.5',\n 'Connection':'keep-alive',\n 'Content-Length':'411',\n 'Content-Type':'application/x-www-form-urlencoded', \n 'Host':default_url.split(\"//\")[-1].split(\"/\")[0],\n 'Referer': self.action,\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0'\n }\n def get_search_results(self):\n try:\n return self.p.xpath('.//td[@headers=\"JobColumn\"]/text()')[0]\n except:\n return True\n\n def current_job_status(self):\n status = self.p.xpath('.//td[@headers=\"JobStatusColumn\"]/span/text()')\n if status:\n return status[0]\n \n status = self.p.xpath('.//td/span[@id=\"JobsAdvTable:JobStatus:0\"]/text()')\n\n if status:\n return status[0]\n else:\n return \"error\" \n\n def move_and_complete_data(self):\n self.params['JobOpsAdvTable:length'] = self.get_number_of_ops() # number_of_ops,\n self.params['JobsAdvTable:selected'] = '0'\n self.params['JobsAdvTable:length'] = '1'\n return self.params\n\n def pick_release(self):\n self.params['event'] = 'invokePickRelease'\n self.params['source'] = 'invokePickRelease'\n self.params['_FORM_SUBMIT_BUTTON'] = ''\n self.params['JobOpsAdvTable:selected'] = 0\n self.params['JobOpActionsList'] = 1\n return self.params\n\n def get_number_of_ops(self):\n return len(self.p.xpath('.//span[starts-with(@id,\"JobOpsAdvTable:QtyQueue:\") ]'))\n\n def error(self):\n error = self.p.xpath('.//table[@id=\"FwkErrorBeanId\"]/descendant::div[@class=\"x3z\"]/text()')\n error2 = self.p.xpath('.//td[@class=\"errorText\"]/text()')\n if error:\n return error[0]\n elif error2:\n return error2[0]\n\n return False\n\n def get_completed_qty(self):\n return self.p.xpath('.//span[@id=\"JobsAdvTable:JobQtyCompleted:0\"]/text()')[0]\n\ndef mes(**args):\n #print(args)\n \n \"\"\" \n\n params needed:\n 1. jobs ({job1, job2})\n 2. type (move or complete)\n \n\n optional params:\n 1. quantity\n 2. to op (900)\n 3. badge\n 4. username\n\n examples:\n\n mes(jobs='379963,9696',type='move', move_to=9000)\n mes(jobs='379963',type='complete')\n mes(jbos='1234567',type='complete',username='asdf',badge='123')\n\n\n todo\n\n * allow from op\n * allow scrap\n * allow clock in/out\n\n \"\"\"\n\n # required params\n jobs = args['jobs']\n transaction_type = args['type']\n\n # optional params\n badge = args['badge'] if 'badge' in args else default_badge\n quantity = args['quantity'] if 'quantity' in args else 999999999999999999999\n username = args['username'] if 'username' in args else default_username\n\n # some usernames are tied to printers. need to block them\n if 'jit' in username:\n username = default_username\n\n with session() as s:\n\n \"\"\"\n 1. open oracle\n 2. follow redirect to oracle login page\n 3. attempt to login\n 4. redirect to main menu\n 5. open MES screen\n 6. open job search screen\n\n \"\"\"\n try:\n page = Page(s.get(default_url))\n except: \n return \"no oracle\"\n\n page = Page(s.get(page.action, data=page.params, headers = page.header))\n\n data = page.params\n\n data['usernameField'] = username\n data['passwordField'] = 'bimba'\n\n page = Page(s.post(page.action, data=page.params, headers = page.header))\n \n if page.title == \"Login\": return \"Error: failed to login.
    \"\n\n page = Page(s.get(page.redirect, data=page.params, headers = page.header)) \n page = Page(s.get(default_url + '/OA_HTML/RF.jsp?function_id=28044&resp_id=50452&resp_appl_id=706&security_group_id=0&lang_code=US'))\n\n if page.title != \"MES Workstation\": return \"Error: failed to open MES.
    \"\n\n search_page = Page(s.post(page.action, data=page.params, headers = page.header))\n\n if search_page.title != \"Search Jobs\": return \"Error: failed to open job search.
    \"\n\n\n # search for job\n result = ''\n for job in jobs.split(','):\n # get job info: org id, etc?\n me = Database()\n me.oracle_connect()\n\n try:\n org = me.run_url_f_r('get_job_org.sql',[['jobname',job]]).fetchall()[0][0]\n except:\n # can't do a return and continue together :)\n result += job + \" Error: invalid job.
    \"\n continue\n\n me.close()\n\n # set job specific data\n data = search_page.params\n data['JobNameCriteria'] = job\n data['OrgId'] = org\n\n page = Page(s.post(search_page.action, data=data, headers = search_page.header))\n\n # some error checks\n if page.title != \"Search Jobs\":\n result = result + job + \" could not be found.\" + '
    '\n continue\n\n if page.get_search_results() == \"No results found.\":\n result = result + job + \" could not be found\" + '
    '\n continue\n\n if page.current_job_status() != \"Released\": \n result = result + job + \" is \"+ page.current_job_status() + ' and cannot be processed.
    '\n continue\n \n\n # do transaction\n if transaction_type == 'complete':\n \n \"\"\"\n \n to complete a job\n 1. scroll through jobs to find the op we need\n 2. click move and complete button\n 3. click submit\n \n \"\"\"\n\n me = Database()\n me.oracle_connect()\n data = me.run_url_f_r('get_active_ops.sql',[['jobname',job],['opname',9999999999]]).fetchall()\n me.close()\n\n for op in data:\n my_quantity = min(int(op[3]),int(quantity))\n from_op_seq = op[2]\n from_op_row = op[4]\n from_dept_id = op[6]\n from_dept = op[5]\n from_op_code = op[7]\n\n data = page.move_and_complete_data()\n\n data['JobOpsAdvTable:selected'] = from_op_row%10 # number is <10. so take mod 10\n \n # if we are on the 2nd page\n if round(from_op_row//10) > 0:\n for i in range(round(from_op_row//10)):\n # here we need to click the next button \n sub_data = page.params\n sub_data['source'] = 'JobOpsAdvTable'\n sub_data['partialTargets'] = 'JobOpsAdvTable'\n sub_data['partial'] = 'true'\n sub_data['value'] = 10* from_op_row//10 + 1\n #sub_data['size'] = 10\n sub_data['JobNameCriteria'] = job\n sub_data['OrgId'] = org\n sub_data['_FORM_SUBMIT_BUTTON'] = ''\n sub_data['event'] = 'goto'\n sub_data['JobOpActionsList'] = 1\n sub_data['JobOpsAdvTable:selected'] = 0\n\n next_page = Page(s.post(page.action, data = sub_data, headers = page.header))\n # update params\n for param in next_page.params:\n sub_data[param] = next_page.params[param]\n data[param] = sub_data[param]\n \n data['JobOpActionsList'] = round(from_op_row//10)\n data['value'] = ''\n data['partialTargets'] = ''\n data['source'] = ''\n data['event'] = ''\n data['size'] = ''\n data['partial'] = ''\n\n data['JobNameCriteria'] = job\n data['MyBadge'] = badge\n data['OrgId'] = org \n data['JobOpsAdvTable:selected'] = from_op_row%10 # number is <10. so take mod 10\n data['_FORM_SUBMIT_BUTTON'] = 'JobOpAction31'\n \n page = Page(s.post(page.action, data=data, headers=page.header))\n # if there are no errors..\n if page.error() != False:\n result += job + \" Error: \" + page.error()\n continue\n\n data = page.params\n data['op'] = from_op_seq\n data['TxnQty'] = my_quantity\n data['FromOpSeq'] = from_op_seq\n data['FromDeptID'] = from_dept_id\n data['FromDeptHidden'] = from_dept \n data['FromOpSeqHidden'] = from_op_seq\n data['FromOpCodeHidden'] = from_op_code\n\n page = Page(s.post(page.action, data=data, headers=page.header)) \n \n # if there are no errors..\n if page.error() != False:\n result += job + \" Error: \" + page.error() + \"
    \"\n continue\n \n # print pick release. this should only be done if its user has a certain login name.\n if page.title == \"Search Jobs\" and 'pick' in username.lower():\n #print(\"picking\")\n pick = Page(s.post(page.action, data=page.pick_release(), headers = page.header))\n\n if pick.error() != False:\n result += job + \" Message: \" + pick.error() + \"
    \"\n \n # log job status\n result += job + \" is \" + page.current_job_status()\n result += \" with \" + page.get_completed_qty() + \" completed.
    \" if page.current_job_status() == 'Released' and page.get_completed_qty() != '0' else \"
    \" \n\n # skip back to next iteration of loop\n continue\n\n elif transaction_type == 'move' and 'move_to' in args:\n move_to = int(args['move_to'])\n\n \"\"\"\n use oracle db to get job information. in somecases the info is on page 2+\n of the job screen in MES. Its much easier to just grab it from db intead\n of scrolling through pages in MES.. as faster.\n\n \"\"\"\n me = Database()\n me.oracle_connect()\n data = me.run_url_f_r('get_active_ops.sql',[['jobname',job],['opname',move_to]]).fetchall()\n me.close()\n\n # verify op is valid. Somehow oracle will let us move to an op that does not exist :D\n if len(data) == 0:\n result += job + \" Error: Invalid Operation.
    \"\n continue\n\n if move_to != data[-1][2]:\n result += job + ' Error: Entered Operation is Invalid
    '\n continue\n\n to_op_data = data[-1]\n\n for op in data[:-1]: \n if op[2] < move_to:\n \n my_quantity = min(int(op[3]),int(quantity))\n from_op_seq = op[2]\n from_op_row = op[4]\n from_dept_id = op[6]\n from_dept = op[5]\n from_op_code = op[7]\n to_op = to_op_data[2]\n to_dept = to_op_data[5]\n to_dept_id = to_op_data[6]\n to_op_code = to_op_data[7]\n\n data = page.move_and_complete_data()\n\n # if we are on the 2nd page\n if round(from_op_row//10) > 0:\n for i in range(round(from_op_row//10)):\n # here we need to click the next button \n sub_data = page.params\n sub_data['source'] = 'JobOpsAdvTable'\n sub_data['partialTargets'] = 'JobOpsAdvTable'\n sub_data['partial'] = 'true'\n sub_data['value'] = 10* from_op_row//10 + 1\n #sub_data['size'] = 10\n sub_data['JobNameCriteria'] = job\n sub_data['OrgId'] = org\n sub_data['_FORM_SUBMIT_BUTTON'] = ''\n sub_data['event'] = 'goto'\n sub_data['JobOpActionsList'] = 1\n sub_data['JobOpsAdvTable:selected'] = 0\n\n next_page = Page(s.post(page.action, data = sub_data, headers = page.header))\n # update params\n for param in next_page.params:\n sub_data[param] = next_page.params[param]\n data[param] = sub_data[param]\n \n data['JobOpActionsList'] = round(from_op_row//10)\n data['value'] = ''\n data['partialTargets'] = ''\n data['source'] = ''\n data['event'] = ''\n data['size'] = ''\n data['partial'] = ''\n\n data['JobOpsAdvTable:selected'] = from_op_row%10 # number is <10. so take mod 10\n data['_FORM_SUBMIT_BUTTON'] = 'JobOpAction15'\n data['JobNameCriteria'] = job\n data['MyBadge'] = badge\n data['OrgId'] = org\n \n page = Page(s.post(page.action, data = data, headers = page.header))\n if page.error() != False:\n result += job +' Error: ' + str(page.error()) + \"
    \"\n continue\n\n data = page.params\n data['ToDeptID'] = to_dept_id\n data['ToDeptHidden'] = to_dept\n data['ToOpCodeHidden'] = to_op_code or ''\n data['ToOpSeqHidden'] = to_op\n data['ToOpSeq'] = to_op\n data['TxnQty'] = my_quantity\n data['FromOpSeq'] = from_op_seq\n data['FromDeptID'] = from_dept_id\n data['FromDeptHidden'] = from_dept \n data['FromOpSeqHidden'] = from_op_seq\n data['FromOpCodeHidden'] = from_op_code or ''\n data['ToStep'] = 1\n\n page = Page(s.post(page.action, data = data, headers = page.header))\n if page.error() != False:\n result += job +' Error: ' + str(page.error()) + \"
    \"\n continue\n\n # log job status\n result += job + \" has moved.\"\n result += \" with \" + page.get_completed_qty() + \" completed.
    \" if page.current_job_status() == 'Released' and page.get_completed_qty() != '0' else \"
    \"\n continue\n \n elif transaction_type == 'scrap':\n # do scrap transactions here\n print(\"error\")\n\n else:\n result += job + ' Error: transaction type incorrectly specified.'\n\n s.close()\n #print(result)\n return result\n","sub_path":"functions/mes.py","file_name":"mes.py","file_ext":"py","file_size_in_byte":18165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"588361858","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\nimport sys\nimport xml.etree.cElementTree as ET\nimport numpy as np\nimport tensorflow as tf\nimport glob\nimport cv2\nimport os\nfrom pktool import rovoc_parse, thetaobb2pointobb, mkdir_or_exist,simpletxt_parse,get_files\n\n\nsys.path.append('../../')\n\nfrom libs.label_name_dict.label_dict import LabelMap\n# from utils.tools import makedirs, view_bar\nfrom libs.configs import cfgs\n# 'sdc','sdc-multidet'\ndataset_Name = 'sdc-multidet'\ncfgs.DATASET_NAME = dataset_Name\n\ntf.app.flags.DEFINE_string('VOC_dir', '/data2/pd/sdc/multidet/v0/', 'Voc dir')\ntf.app.flags.DEFINE_string('txt_dir', 'labels', 'xml dir')\ntf.app.flags.DEFINE_string('image_dir', 'images', 'image dir')\ntf.app.flags.DEFINE_string('save_dir', '/data2/pd/sdc/multidet/v0/tfrecord/', 'save name')\ntf.app.flags.DEFINE_string('img_format', '.png', 'format of image')\ntf.app.flags.DEFINE_string('dataset', dataset_Name, 'dataset')\nFLAGS = tf.app.flags.FLAGS\n\ndef int64_feature(values):\n if not isinstance(values,(tuple,list)):\n values = [values]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=values))\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef read_xml_gtbox_and_label(xml_path):\n \"\"\"\n :param xml_path: the path of voc xml\n :return: a list contains gtboxes and labels, shape is [num_of_gtboxes, 9],\n and has [x1, y1, x2, y2, x3, y3, x4, y4, label] in a per row\n \"\"\"\n # label_map = LabelMap(cfgs)\n tree = ET.parse(xml_path)\n root = tree.getroot()\n img_width = None\n img_height = None\n box_list = []\n for child_of_root in root:\n # if child_of_root.tag == 'filename':\n # assert child_of_root.text == xml_path.split('/')[-1].split('.')[0] \\\n # + FLAGS.img_format, 'xml_name and img_name cannot match'\n\n if child_of_root.tag == 'size':\n for child_item in child_of_root:\n if child_item.tag == 'width':\n img_width = int(child_item.text)\n if child_item.tag == 'height':\n img_height = int(child_item.text)\n\n if child_of_root.tag == 'object':\n label = None\n for child_item in child_of_root:\n if child_item.tag == 'name':\n label = 1#label_map.name2label()[child_item.text]\n if child_item.tag == 'bndbox':\n tmp_box = []\n for node in child_item:\n tmp_box.append(float(node.text))\n assert label is not None, 'label is none, error'\n tmp_box.append(label)\n box_list.append(tmp_box)\n\n gtbox_label = np.array(box_list, dtype=np.int32)\n\n return img_height, img_width, gtbox_label\n\n\ndef convert_pascal_to_tfrecord():\n \"\"\"convert txt (points + label format) to tfrecord\n VOC_dir:\n --trainval\n --images\n --labels\n --test\n --images\n --labels\n \"\"\"\n allNeedConvert = ['trainval','test']\n for train_or_test in allNeedConvert:\n\n label_Path = FLAGS.VOC_dir + \"{}/\".format(train_or_test)+ FLAGS.txt_dir\n image_path = FLAGS.VOC_dir + \"{}/\".format(train_or_test)+ FLAGS.image_dir\n # xml_path = os.path.join(FLAGS.VOC_dir, FLAGS.xml_dir)\n # image_path = os.path.join(FLAGS.VOC_dir, FLAGS.image_dir)\n print(image_path)\n save_path = os.path.join(FLAGS.save_dir, FLAGS.dataset + '_' + train_or_test + '.tfrecord')\n mkdir_or_exist(FLAGS.save_dir)\n\n writer = tf.python_io.TFRecordWriter(path=save_path)\n\n txtFullPathList,_ = get_files(label_Path,_ends=['*.txt'])\n for count, txt in enumerate(txtFullPathList):\n (txtPath,tmpTxtName) = os.path.split(txt)\n (txt_name,extension) = os.path.splitext(tmpTxtName)\n\n img_name = txt_name + FLAGS.img_format\n img_path = image_path + '/' + img_name\n\n if not os.path.exists(img_path):\n print('{} is not exist!'.format(img_path))\n continue\n ships = simpletxt_parse(txt,space=' ',boxType='points')\n label_map = LabelMap(cfgs)\n # print(label_map.name2label())\n gtboxes_and_label=[]\n for ship in ships:\n gtbox_label=[0,0,0,0,0,0,0,0,0]\n gtbox_label[:8]=ship['points']\n gtbox_label[8] = label_map.name2label()[ship['label']]\n gtboxes_and_label.append(gtbox_label)\n img_height, img_width=1024,1024\n gtboxes_and_label=np.array(gtboxes_and_label, dtype=np.int32)\n\n img = cv2.imread(img_path)[:, :, ::-1]\n img=np.array(img, dtype=np.int32)\n img_raw = img.tobytes()\n num_objects = gtboxes_and_label.shape[0]\n # shape = gtboxes_and_label.shape\n # gtboxes_and_label=gtboxes_and_label.tobytes()\n feature = tf.train.Features(feature={\n # do not need encode() in linux\n 'img_name': _bytes_feature(img_name.encode()),\n # 'img_name': _bytes_feature(img_name),\n 'img_height': _int64_feature(img_height),\n 'img_width': _int64_feature(img_width),\n 'img': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),\n 'num_objects':tf.train.Feature(int64_list=tf.train.Int64List(value=[num_objects])),\n 'gtboxes_and_label': _bytes_feature(gtboxes_and_label.tostring())\n })\n example = tf.train.Example(features=feature)\n\n writer.write(example.SerializeToString())\n\n print('Conversion is complete!save path:{}'.format(train_or_test,save_path))\n writer.close()\n\n\nif __name__ == '__main__':\n\n convert_pascal_to_tfrecord()\n","sub_path":"dataloader/dataset/convert_data_to_tfrecord.py","file_name":"convert_data_to_tfrecord.py","file_ext":"py","file_size_in_byte":6067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"358824947","text":"from django.urls import path\n\nfrom . import views\nfrom . import api\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('add-todo', views.add_todo, name='add_todo'),\n path('in-progress', views.in_progress, name='progress'),\n path('completed', views.completed, name='completed'),\n path('pending', views.pending, name='pending'),\n path('edit/', views.edit_todo, name='edit_todo'),\n path('delete/', views.delete_todo, name='delete_todo'),\n path('todos', api.todos, name='todos'),\n path('todo/', api.todo, name='todo'),\n]","sub_path":"todo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"199841866","text":"filew = open(\"HIGHEST.txt\", 'w')\nfilew.write('TP 123')\nfilew.close()\n\ndef bestPlayer():\n file = open(\"HIGHEST.txt\", 'r')\n player = file.readline()\n file.close()\n return player\n\ndef currHighestScore():\n bestplayer = bestPlayer()\n print(bestplayer)\n if bestplayer[-3] != ' ':\n return int(bestplayer[-3:])\n elif bestplayer[-2] != ' ':\n return int(bestplayer[-2:])\n else:\n return int(bestplayer[-1])\n\nprint(currHighestScore())\n\ndef scoreReading():\n numberOfPlayers = int(input(\"How many players do you want to input?\"))\n curr = 0\n while curr != numberOfPlayers:\n player = input(\"Input player name: \")\n score = int(input(\"Input player score: \"))\n if score > currHighestScore():\n filew = open(\"HIGHEST.txt\", 'w')\n filew.write(player+' '+str(score))\n print(currHighestScore())\n \n else:\n print(player+\" has not beat the current highest score of\",currHighestScore(),\". Try again!\")\n curr += 1\n\nscoreReading()\n","sub_path":"H2ComputingSpecimen1/Qn1.py","file_name":"Qn1.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"125346170","text":"\r\n\"\"\"\r\n This function takes two arguments l1 and l2\r\n each is a list with two elements containing the\r\n x coordinates of a line.\r\n It returns a True if the lines overlap,\r\n and False otherwise. The function considers that\r\n if either end of the lines are at the same point,\r\n they overlap.\r\n\"\"\"\r\n\r\ndef doLinesOverlap(l1, l2):\r\n l1.sort()\r\n l2.sort()\r\n x1, x2 = tuple(l1)\r\n x3, x4 = tuple(l2)\r\n x1overlaps = x3 <= x1 and x1 <= x4\r\n x2overlaps = x3 <= x2 and x2 <= x4\r\n\r\n return x1overlaps or x2overlaps\r\n\r\nif __name__ == '__main__':\r\n l1 = list(map(int, input('Please enter the first line (form x1 x2): ').split(' ')))\r\n l2 = list(map(int, input('Please enter the second line (form x1 x2): ').split(' ')))\r\n\r\n result = doLinesOverlap(l1, l2)\r\n if result:\r\n print('lines overlap')\r\n else:\r\n print('lines do not overlap')\r\n","sub_path":"question_a/line_comparator/line_comparator.py","file_name":"line_comparator.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"73617169","text":"from django import forms\nfrom imagr_user.models import ImagrUser\nfrom registration.forms import RegistrationForm\n\n\nclass ImagrUserRegistrationForm(RegistrationForm):\n def clean_username(self):\n \"\"\"Validate that the username is alphanumeric and is not already in use.\n \"\"\"\n existing = ImagrUser.objects.filter(\n username__iexact=self.cleaned_data['username']\n )\n if existing.exists():\n raise forms.ValidationError(\n \"A user with that username already exists.\"\n )\n else:\n return self.cleaned_data['username']\n\n def save(self, commit=True):\n # Save password in hashed format\n user = super(ImagrUserRegistrationForm, self).save(commit=False)\n user.set_password(self.cleaned_data['password'])\n if commit:\n user.save()\n return user\n","sub_path":"cfpydev-imagr/imagr_user/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"604472222","text":"#\n# @lc app=leetcode.cn id=239 lang=python3\n#\n# [239] 滑动窗口最大值\n#\n\n# @lc code=start\nclass Solution:\n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n \n if not nums or k==0:\n return []\n n=len(nums)\n m=max(nums[:k])\n result=[m]\n for i in range(1,n-k+1):\n if nums[i-1]==m:\n m=max(nums[i:i+k])\n else:\n m=max(m,nums[i+k-1])\n result.append(m)\n return result\n# @lc code=end\n\n","sub_path":"Week_01/239.滑动窗口最大值.py","file_name":"239.滑动窗口最大值.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"30389202","text":"import aiohttp\nimport asyncio\nfrom bs4 import BeautifulSoup, SoupStrainer\nimport motor.motor_asyncio\n\nimport json\nimport os\nimport time\nimport sys\nimport configparser\nfrom contextlib import contextmanager\n\n\"\"\"\n\nhttps://www.zhihu.com/question/28070036/answers/created?page=1\n\nhttps://aiohttp.readthedocs.io/en/stable/index.html\nhttps://motor.readthedocs.io/en/stable/index.html\n\n\"\"\"\n\nif sys.platform == 'linux':\n import uvloop\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n\nif sys.platform == 'win32':\n loop = asyncio.ProactorEventLoop()\n asyncio.set_event_loop(loop)\n\nasync def get_answers(collection, question_id, total=0, offset=0, limit=20):\n headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36' }\n qa_link = f'https://www.zhihu.com/api/v4/questions/{question_id}/answers'\n qa_include = 'data[*].is_normal,admin_closed_comment,reward_info,is_collapsed,annotation_action,annotation_detail,collapse_reason,is_sticky,collapsed_by,suggest_edit,comment_count,can_comment,content,editable_content,voteup_count,reshipment_settings,comment_permission,created_time,updated_time,review_info,relevant_info,question,excerpt,relationship.is_authorized,is_author,voting,is_thanked,is_nothelp;data[*].mark_infos[*].url;data[*].author.follower_count,badge[?(type=best_answerer)].topics'\n\n for x in range(offset, total, limit):\n print('x:', x)\n params = {'offset': x, 'limit': limit, 'include': qa_include, 'sort_by': 'created'}\n try:\n async with aiohttp.ClientSession() as session:\n async with session.get(qa_link, headers=headers, timeout=10, params=params) as resp:\n result = await resp.json()\n data = result['data']\n print(len(data))\n db_result = await collection.insert_many(data)\n print(db_result.inserted_ids)\n except Exception as e:\n print(e)\n\ndef main():\n client = motor.motor_asyncio.AsyncIOMotorClient('mongodb://localhost:32768')\n collection = client['zhihu_data']['zhihu_answers']\n loop = asyncio.get_event_loop()\n loop.run_until_complete(get_answers(collection, '28070036', 120))\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"cache/zhihu-spider-py/async_spider.py","file_name":"async_spider.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"413133608","text":"import torch\nimport nltk\nimport string\nfrom torch.autograd import Variable\nimport numpy as np\n\n## functions for loading data from disk\n\ndef process_tokenize_text(text):\n punc_remover = str.maketrans('', '', string.punctuation)\n processed_text = text.lower().translate(punc_remover)\n tokens = nltk.word_tokenize(processed_text)\n return tokens\n\ndef read_embedding(embed_pt_filepath):\n embed_tuple = torch.load(embed_pt_filepath)\n word2index, w2v_tensor, dim = embed_tuple\n return word2index, w2v_tensor\n\n\ndef find_max_seq_length(text):\n max_len = -1\n for tokens in text:\n curr_len = len(tokens)\n if curr_len > max_len:\n max_len = curr_len\n return max_len\n\n\ndef read_text_tensor(batch_text, word_vocab):\n out_text = []\n max_len = find_max_seq_length(batch_text)\n for sent_tokens in batch_text:\n S = len(sent_tokens)\n sent = []\n for i in range(S):\n token = sent_tokens[i]\n sent.append( word_vocab.get_index(token) )\n # pad the right end till the max length of the mini batch\n for i in range(S, max_len):\n sent.append( word_vocab.pad_index )\n out_text.append(sent)\n return torch.LongTensor(out_text)\n\ndef read_labels_tensor(rel_labels, rel_vocab, cuda=False):\n N = len(rel_labels)\n labels_list = []\n for i in range(N):\n token = rel_labels[i]\n labels_list.append( rel_vocab.get_index(token) )\n return torch.LongTensor(labels_list)\n\ndef read_dataset(datapath, word_vocab, rel_vocab):\n questions = []\n rel_labels = []\n # read questions and label from the datapath - could be train, dev, testls\n with open(datapath) as f:\n for line in f:\n line_items = line.split(\"\\t\")\n # add relation\n relation = line_items[1]\n rel_labels.append(relation)\n # add text\n qText = line_items[3]\n tokens = process_tokenize_text(qText)\n questions.append(tokens)\n\n dataset = {\"word_vocab\": word_vocab, \"rel_vocab\": rel_vocab, \"size\": len(rel_labels),\n \"questions\": np.array(questions), \"rel_labels\": np.array(rel_labels)}\n return dataset\n\n","sub_path":"simple_qa_rnn/utils/read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"592650807","text":"from selenium import webdriver\n\nhyperlink = \"https://www.edureka.co\"\ndriver = webdriver.Chrome(executable_path=\"D:\\pythonProject\\driver\\chromedriver.exe\")\ndriver.get(hyperlink)\n\nnavigationStart = driver.execute_script(\"return window.performance.timing.navigationStart\")\nresponseStart = driver.execute_script(\"return window.performance.timing.responseStart\")\ndomComplete = driver.execute_script(\"return window.performance.timing.domComplete\")\n\nbackendPerformance_calc = responseStart - navigationStart\nfrontendPerformance_calc = domComplete - responseStart\n\nprint(\"Back End: %s\" % backendPerformance_calc)\nprint(\"Front End: %s\" % frontendPerformance_calc)\n\ndriver.quit()","sub_path":"Projectdemo/testcase/test page lioad.py","file_name":"test page lioad.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"282490234","text":"from dps import cfg\nfrom dps.utils import Config\nfrom dps.rl import (\n RLContext, Agent, StochasticGradientDescent,\n BuildEpsilonSoftmaxPolicy, BuildLstmController,\n PolicyGradient, RLUpdater, AdvantageEstimator, PolicyEntropyBonus,\n ValueFunction, PolicyEvaluation_State, Retrace, ValueFunctionRegularization,\n BasicAdvantageEstimator, ConstrainedPolicyEvaluation_State, DifferentiableLoss\n)\n\n\ndef A2C(env):\n with RLContext(cfg.gamma) as context:\n actor = cfg.build_policy(\n env, name=\"actor\",\n exploration_schedule=cfg.exploration_schedule,\n val_exploration_schedule=cfg.val_exploration_schedule\n )\n\n context.set_behaviour_policy(actor)\n context.set_validation_policy(actor)\n\n if cfg.value_weight:\n value_function = ValueFunction(1, actor, \"critic\")\n\n if cfg.split:\n actor_agent = Agent(\"actor_agent\", cfg.build_controller, [actor])\n critic_agent = Agent(\"critic_agent\", cfg.build_controller, [value_function])\n agents = [actor_agent, critic_agent]\n else:\n agent = Agent(\"agent\", cfg.build_controller, [actor, value_function])\n agents = [agent]\n\n values_from_returns = Retrace(\n actor, value_function, lmbda=cfg.v_lmbda, importance_c=cfg.v_importance_c,\n to_action_value=False, from_action_value=False,\n name=\"RetraceV\"\n )\n\n if cfg.value_epsilon:\n ConstrainedPolicyEvaluation_State(\n value_function, values_from_returns,\n epsilon=cfg.value_epsilon, weight=cfg.value_weight,\n n_samples=cfg.value_n_samples, direct=cfg.value_direct\n )\n else:\n policy_eval = PolicyEvaluation_State(value_function, values_from_returns, weight=cfg.value_weight)\n\n if cfg.value_reg_weight:\n ValueFunctionRegularization(policy_eval, weight=cfg.value_reg_weight)\n\n action_values_from_returns = Retrace(\n actor, value_function, lmbda=cfg.q_lmbda, importance_c=cfg.q_importance_c,\n to_action_value=True, from_action_value=False,\n name=\"RetraceQ\"\n )\n\n advantage_estimator = AdvantageEstimator(\n action_values_from_returns, value_function)\n else:\n agent = Agent(\"agent\", cfg.build_controller, [actor])\n agents = [agent]\n\n # Build an advantage estimator that estimates advantage from current set of rollouts.\n advantage_estimator = BasicAdvantageEstimator(\n actor, q_importance_c=cfg.q_importance_c, v_importance_c=cfg.v_importance_c)\n\n PolicyGradient(\n actor, advantage_estimator, epsilon=cfg.epsilon,\n importance_c=cfg.policy_importance_c, weight=cfg.policy_weight)\n PolicyEntropyBonus(actor, weight=cfg.entropy_weight)\n\n if env.has_differentiable_loss and cfg.use_differentiable_loss:\n DifferentiableLoss(env, actor)\n\n optimizer = StochasticGradientDescent(agents=agents, alg=cfg.optimizer_spec)\n context.set_optimizer(optimizer)\n\n return RLUpdater(env, context)\n\n\nconfig = Config(\n exp_name=\"A2C\",\n get_updater=A2C,\n n_controller_units=64,\n batch_size=16,\n n_val_rollouts=100,\n optimizer_spec=\"adam\",\n opt_steps_per_update=1,\n sub_batch_size=0,\n epsilon=None,\n lr_schedule=1e-4,\n\n exploration_schedule=0.1,\n val_exploration_schedule=0.0,\n\n value_weight=1.0,\n value_epsilon=None,\n value_n_samples=0,\n value_direct=False,\n value_reg_weight=0.0,\n\n build_policy=BuildEpsilonSoftmaxPolicy(),\n build_controller=BuildLstmController(),\n\n policy_weight=1.0,\n entropy_weight=0.01,\n\n split=False,\n q_lmbda=1.0,\n v_lmbda=1.0,\n policy_importance_c=0,\n q_importance_c=None,\n v_importance_c=None,\n max_grad_norm=None,\n gamma=1.0,\n\n use_differentiable_loss=False,\n render_n_rollouts=4,\n)\n\n\nactor_critic_config = config.copy(\n exp_name=\"ActorCritic\",\n split=True\n)\n\n\nppo_config = config.copy(\n exp_name=\"PPO\",\n opt_steps_per_update=10,\n epsilon=0.2,\n value_weight=0.0,\n)\n\n\n# Same config that is used in the test.\ntest_config = config.copy(\n exp_name=\"TestA2C\",\n opt_steps_per_update=20,\n sub_batch_size=0,\n epsilon=0.2,\n n_controller_units=32,\n value_weight=0.0,\n split=False,\n)\n\n\nreinforce_config = config.copy(\n exp_name=\"REINFORCE\",\n epsilon=0.0,\n opt_steps_per_update=1,\n value_weight=0.0,\n)\n","sub_path":"dps/rl/algorithms/a2c.py","file_name":"a2c.py","file_ext":"py","file_size_in_byte":4648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"111348997","text":"# https://github.com/tensorflow/tensorflow/issues/27023\r\nimport warnings\r\n\r\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\r\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\r\n\r\nimport random\r\nimport string\r\nimport tempfile\r\nimport os\r\nimport contextlib\r\nimport json\r\nimport urllib.request\r\nimport hashlib\r\nimport time\r\nimport subprocess as sp\r\nimport multiprocessing as mp\r\nimport platform\r\nimport base64\r\n\r\nimport av\r\nimport pytest\r\nfrom tensorflow.io import gfile\r\nimport imageio\r\nimport numpy as np\r\n\r\nimport blobfile as bf\r\nfrom blobfile import ops, azure\r\n\r\nGCS_TEST_BUCKET = \"csh-test-3\"\r\nAS_TEST_ACCOUNT = \"cshteststorage2\"\r\nAS_TEST_CONTAINER = \"testcontainer2\"\r\n\r\nAZURE_VALID_CONTAINER = (\r\n f\"https://{AS_TEST_ACCOUNT}.blob.core.windows.net/{AS_TEST_CONTAINER}\"\r\n)\r\nAZURE_INVALID_CONTAINER = f\"https://{AS_TEST_ACCOUNT}.blob.core.windows.net/{AS_TEST_CONTAINER}-does-not-exist\"\r\nAZURE_INVALID_ACCOUNT = f\"https://{AS_TEST_ACCOUNT}-does-not-exist.blob.core.windows.net/{AS_TEST_CONTAINER}\"\r\nGCS_VALID_BUCKET = f\"gs://{GCS_TEST_BUCKET}\"\r\nGCS_INVALID_BUCKET = f\"gs://{GCS_TEST_BUCKET}-does-not-exist\"\r\n\r\n\r\n@pytest.fixture(scope=\"session\", autouse=True)\r\ndef setup_gcloud_auth():\r\n # only run this for our docker tests, this tells gcloud to use the credentials supplied by the\r\n # test running script\r\n if platform.system() == \"Linux\":\r\n sp.run(\r\n [\r\n \"gcloud\",\r\n \"auth\",\r\n \"activate-service-account\",\r\n f\"--key-file={os.environ['GOOGLE_APPLICATION_CREDENTIALS']}\",\r\n ]\r\n )\r\n yield\r\n\r\n\r\n@contextlib.contextmanager\r\ndef chdir(path):\r\n original_path = os.getcwd()\r\n os.chdir(path)\r\n yield\r\n os.chdir(original_path)\r\n\r\n\r\n@contextlib.contextmanager\r\ndef _get_temp_local_path():\r\n with tempfile.TemporaryDirectory() as tmpdir:\r\n assert isinstance(tmpdir, str)\r\n path = os.path.join(tmpdir, \"file.name\")\r\n yield path\r\n\r\n\r\n@contextlib.contextmanager\r\ndef _get_temp_gcs_path():\r\n path = f\"gs://{GCS_TEST_BUCKET}/\" + \"\".join(\r\n random.choice(string.ascii_lowercase) for i in range(16)\r\n )\r\n gfile.mkdir(path)\r\n yield path + \"/file.name\"\r\n gfile.rmtree(path)\r\n\r\n\r\n@contextlib.contextmanager\r\ndef _get_temp_as_path():\r\n random_id = \"\".join(random.choice(string.ascii_lowercase) for i in range(16))\r\n path = (\r\n f\"https://{AS_TEST_ACCOUNT}.blob.core.windows.net/{AS_TEST_CONTAINER}/\"\r\n + random_id\r\n )\r\n yield path + \"/file.name\"\r\n sp.run(\r\n [\r\n \"az\",\r\n \"storage\",\r\n \"blob\",\r\n \"delete-batch\",\r\n \"--account-name\",\r\n AS_TEST_ACCOUNT,\r\n \"--source\",\r\n AS_TEST_CONTAINER,\r\n \"--pattern\",\r\n f\"{random_id}/*\",\r\n ],\r\n check=True,\r\n shell=platform.system() == \"Windows\",\r\n )\r\n\r\n\r\ndef _write_contents(path, contents):\r\n if \".blob.core.windows.net\" in path:\r\n with tempfile.TemporaryDirectory() as tmpdir:\r\n assert isinstance(tmpdir, str)\r\n account, container, blob = azure.split_url(path)\r\n filepath = os.path.join(tmpdir, \"tmp\")\r\n with open(filepath, \"wb\") as f:\r\n f.write(contents)\r\n sp.run(\r\n [\r\n \"az\",\r\n \"storage\",\r\n \"blob\",\r\n \"upload\",\r\n \"--account-name\",\r\n account,\r\n \"--container-name\",\r\n container,\r\n \"--name\",\r\n blob,\r\n \"--file\",\r\n filepath,\r\n ],\r\n check=True,\r\n shell=platform.system() == \"Windows\",\r\n stdout=sp.DEVNULL,\r\n stderr=sp.DEVNULL,\r\n )\r\n else:\r\n with gfile.GFile(path, \"wb\") as f:\r\n f.write(contents)\r\n\r\n\r\ndef _read_contents(path):\r\n if \".blob.core.windows.net\" in path:\r\n with tempfile.TemporaryDirectory() as tmpdir:\r\n assert isinstance(tmpdir, str)\r\n account, container, blob = azure.split_url(path)\r\n filepath = os.path.join(tmpdir, \"tmp\")\r\n sp.run(\r\n [\r\n \"az\",\r\n \"storage\",\r\n \"blob\",\r\n \"download\",\r\n \"--account-name\",\r\n account,\r\n \"--container-name\",\r\n container,\r\n \"--name\",\r\n blob,\r\n \"--file\",\r\n filepath,\r\n ],\r\n check=True,\r\n shell=platform.system() == \"Windows\",\r\n stdout=sp.DEVNULL,\r\n stderr=sp.DEVNULL,\r\n )\r\n with open(filepath, \"rb\") as f:\r\n return f.read()\r\n else:\r\n with gfile.GFile(path, \"rb\") as f:\r\n return f.read()\r\n\r\n\r\ndef test_basename():\r\n testcases = [\r\n (\"/\", \"\"),\r\n (\"a/\", \"\"),\r\n (\"a\", \"a\"),\r\n (\"a/b\", \"b\"),\r\n (\"\", \"\"),\r\n (\"gs://a\", \"\"),\r\n (\"gs://a/\", \"\"),\r\n (\"gs://a/b/\", \"\"),\r\n (\"gs://a/b\", \"b\"),\r\n (\"gs://a/b/c/test.filename\", \"test.filename\"),\r\n (\"https://a.blob.core.windows.net/b\", \"\"),\r\n (\"https://a.blob.core.windows.net/b/\", \"\"),\r\n (\"https://a.blob.core.windows.net/b/c/\", \"\"),\r\n (\"https://a.blob.core.windows.net/b/c\", \"c\"),\r\n (\"https://a.blob.core.windows.net/b/c/test.filename\", \"test.filename\"),\r\n ]\r\n for input_, desired_output in testcases:\r\n actual_output = bf.basename(input_)\r\n assert desired_output == actual_output\r\n\r\n\r\ndef test_dirname():\r\n testcases = [\r\n (\"a\", \"\"),\r\n (\"a/b\", \"a\"),\r\n (\"a/b/c\", \"a/b\"),\r\n (\"a/b/c/\", \"a/b/c\"),\r\n (\"a/b/c/////\", \"a/b/c\"),\r\n (\"\", \"\"),\r\n (\"gs://a\", \"gs://a\"),\r\n (\"gs://a/\", \"gs://a\"),\r\n (\"gs://a/////\", \"gs://a\"),\r\n (\"gs://a/b\", \"gs://a\"),\r\n (\"gs://a/b/c/test.filename\", \"gs://a/b/c\"),\r\n (\"gs://a/b/c/\", \"gs://a/b\"),\r\n (\"gs://a/b/c/////\", \"gs://a/b\"),\r\n (\r\n \"https://a.blob.core.windows.net/container\",\r\n \"https://a.blob.core.windows.net/container\",\r\n ),\r\n (\r\n \"https://a.blob.core.windows.net/container/\",\r\n \"https://a.blob.core.windows.net/container\",\r\n ),\r\n (\r\n \"https://a.blob.core.windows.net/container/////\",\r\n \"https://a.blob.core.windows.net/container\",\r\n ),\r\n (\r\n \"https://a.blob.core.windows.net/container/b\",\r\n \"https://a.blob.core.windows.net/container\",\r\n ),\r\n (\r\n \"https://a.blob.core.windows.net/container/b/c/test.filename\",\r\n \"https://a.blob.core.windows.net/container/b/c\",\r\n ),\r\n (\r\n \"https://a.blob.core.windows.net/container/b/c/\",\r\n \"https://a.blob.core.windows.net/container/b\",\r\n ),\r\n (\r\n \"https://a.blob.core.windows.net/container/b/c//////\",\r\n \"https://a.blob.core.windows.net/container/b\",\r\n ),\r\n ]\r\n for input_, desired_output in testcases:\r\n actual_output = bf.dirname(input_)\r\n assert desired_output == actual_output, f\"{input_}\"\r\n\r\n\r\ndef test_join():\r\n testcases = [\r\n (\"a\", \"b\", \"a/b\"),\r\n (\"a/b\", \"c\", \"a/b/c\"),\r\n (\"a/b/\", \"c\", \"a/b/c\"),\r\n (\"a/b/\", \"c/\", \"a/b/c/\"),\r\n (\"a/b/\", \"/c/\", \"/c/\"),\r\n (\"\", \"\", \"\"),\r\n # this doesn't work with : in the second path\r\n (\r\n \"gs://a/b/c\",\r\n \"d0123456789-._~!$&'()*+,;=@\",\r\n \"gs://a/b/c/d0123456789-._~!$&'()*+,;=@\",\r\n ),\r\n (\"gs://a\", \"b\", \"gs://a/b\"),\r\n (\"gs://a/b\", \"c\", \"gs://a/b/c\"),\r\n (\"gs://a/b/\", \"c\", \"gs://a/b/c\"),\r\n (\"gs://a/b/\", \"c/\", \"gs://a/b/c/\"),\r\n (\"gs://a/b/\", \"/c/\", \"gs://a/c/\"),\r\n (\"gs://a/b/\", \"../c\", \"gs://a/c\"),\r\n (\"gs://a/b/\", \"../c/\", \"gs://a/c/\"),\r\n (\"gs://a/b/\", \"../../c/\", \"gs://a/c/\"),\r\n (\r\n \"https://a.blob.core.windows.net/container\",\r\n \"b\",\r\n \"https://a.blob.core.windows.net/container/b\",\r\n ),\r\n (\r\n \"https://a.blob.core.windows.net/container/b\",\r\n \"c\",\r\n \"https://a.blob.core.windows.net/container/b/c\",\r\n ),\r\n (\r\n \"https://a.blob.core.windows.net/container/b/\",\r\n \"c\",\r\n \"https://a.blob.core.windows.net/container/b/c\",\r\n ),\r\n (\r\n \"https://a.blob.core.windows.net/container/b/\",\r\n \"c/\",\r\n \"https://a.blob.core.windows.net/container/b/c/\",\r\n ),\r\n (\r\n \"https://a.blob.core.windows.net/container/b/\",\r\n \"/c/\",\r\n \"https://a.blob.core.windows.net/container/c/\",\r\n ),\r\n (\r\n \"https://a.blob.core.windows.net/container/b/\",\r\n \"../c\",\r\n \"https://a.blob.core.windows.net/container/c\",\r\n ),\r\n (\r\n \"https://a.blob.core.windows.net/container/b/\",\r\n \"../c/\",\r\n \"https://a.blob.core.windows.net/container/c/\",\r\n ),\r\n (\r\n \"https://a.blob.core.windows.net/container/b/\",\r\n \"../../c/\",\r\n \"https://a.blob.core.windows.net/container/c/\",\r\n ),\r\n ]\r\n for input_a, input_b, desired_output in testcases:\r\n actual_output = bf.join(input_a, input_b)\r\n assert desired_output == actual_output, f\"{input_a} {input_b}\"\r\n\r\n # this should raise an error because the behavior is confusing\r\n with pytest.raises(bf.Error):\r\n bf.join(\"gs://test/a/b\", \"c:d\")\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"ctx\", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]\r\n)\r\ndef test_get_url(ctx):\r\n contents = b\"meow!\"\r\n with ctx() as path:\r\n _write_contents(path, contents)\r\n url, _ = bf.get_url(path)\r\n assert urllib.request.urlopen(url).read() == contents\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"ctx\", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]\r\n)\r\n@pytest.mark.parametrize(\"streaming\", [True, False])\r\ndef test_read_write(ctx, streaming):\r\n contents = b\"meow!\\npurr\\n\"\r\n with ctx() as path:\r\n path = bf.join(path, \"a folder\", \"a.file\")\r\n bf.makedirs(bf.dirname(path))\r\n with bf.BlobFile(path, \"wb\", streaming=streaming) as w:\r\n w.write(contents)\r\n with bf.BlobFile(path, \"rb\", streaming=streaming) as r:\r\n assert r.read() == contents\r\n with bf.BlobFile(path, \"rb\", streaming=streaming) as r:\r\n lines = list(r)\r\n assert b\"\".join(lines) == contents\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"ctx\", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]\r\n)\r\ndef test_append(ctx):\r\n contents = b\"meow!\\n\"\r\n additional_contents = b\"purr\\n\"\r\n with ctx() as path:\r\n with bf.LocalBlobFile(path, \"ab\") as w:\r\n w.write(contents)\r\n with bf.LocalBlobFile(path, \"ab\") as w:\r\n w.write(additional_contents)\r\n with bf.BlobFile(path, \"rb\") as r:\r\n assert r.read() == contents + additional_contents\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"ctx\", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]\r\n)\r\ndef test_stat(ctx):\r\n contents = b\"meow!\"\r\n with ctx() as path:\r\n _write_contents(path, contents)\r\n s = bf.stat(path)\r\n assert s.size == len(contents)\r\n assert 0 <= abs(time.time() - s.mtime) <= 5\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"ctx\", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]\r\n)\r\ndef test_remove(ctx):\r\n contents = b\"meow!\"\r\n with ctx() as path:\r\n _write_contents(path, contents)\r\n assert bf.exists(path)\r\n bf.remove(path)\r\n assert not bf.exists(path)\r\n\r\n\r\n@pytest.mark.parametrize(\r\n # don't test local path because that has slightly different behavior\r\n \"ctx\",\r\n [_get_temp_gcs_path, _get_temp_as_path],\r\n)\r\ndef test_rmdir(ctx):\r\n contents = b\"meow!\"\r\n with ctx() as path:\r\n dirpath = bf.dirname(path)\r\n # this is an error for a local path but not for a blob path\r\n bf.rmdir(bf.join(dirpath, \"fakedirname\"))\r\n new_dirpath = bf.join(dirpath, \"dirname\")\r\n bf.makedirs(new_dirpath)\r\n assert bf.exists(new_dirpath)\r\n bf.rmdir(new_dirpath)\r\n assert not bf.exists(new_dirpath)\r\n\r\n # double delete is fine\r\n bf.rmdir(new_dirpath)\r\n\r\n # implicit dir\r\n new_filepath = bf.join(dirpath, \"dirname\", \"name\")\r\n _write_contents(new_filepath, contents)\r\n with pytest.raises(OSError):\r\n # not empty dir\r\n bf.rmdir(new_dirpath)\r\n bf.remove(new_filepath)\r\n bf.rmdir(new_dirpath)\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"ctx\", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]\r\n)\r\ndef test_makedirs(ctx):\r\n contents = b\"meow!\"\r\n with ctx() as path:\r\n dirpath = bf.join(path, \"x\", \"x\", \"x\")\r\n bf.makedirs(dirpath)\r\n assert bf.exists(dirpath)\r\n _write_contents(bf.join(dirpath, \"testfile\"), contents)\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"ctx\", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]\r\n)\r\ndef test_isdir(ctx):\r\n contents = b\"meow!\"\r\n with ctx() as path:\r\n assert not bf.isdir(path)\r\n _write_contents(path, contents)\r\n assert not bf.isdir(path)\r\n dirpath = path + \".dir\"\r\n bf.makedirs(dirpath)\r\n assert bf.isdir(dirpath)\r\n assert not bf.isdir(dirpath[:-1])\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"ctx\", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]\r\n)\r\ndef test_listdir(ctx):\r\n contents = b\"meow!\"\r\n with ctx() as path:\r\n dirpath = bf.dirname(path)\r\n a_path = bf.join(dirpath, \"a\")\r\n with bf.BlobFile(a_path, \"wb\") as w:\r\n w.write(contents)\r\n b_path = bf.join(dirpath, \"b\")\r\n with bf.BlobFile(b_path, \"wb\") as w:\r\n w.write(contents)\r\n bf.makedirs(bf.join(dirpath, \"c\"))\r\n assert sorted(list(bf.listdir(dirpath))) == [\"a\", \"b\", \"c\"]\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"ctx\", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]\r\n)\r\ndef test_listdir_sharded(ctx):\r\n contents = b\"meow!\"\r\n with ctx() as path:\r\n dirpath = bf.dirname(path)\r\n with bf.BlobFile(bf.join(dirpath, \"a\"), \"wb\") as w:\r\n w.write(contents)\r\n with bf.BlobFile(bf.join(dirpath, \"aa\"), \"wb\") as w:\r\n w.write(contents)\r\n with bf.BlobFile(bf.join(dirpath, \"b\"), \"wb\") as w:\r\n w.write(contents)\r\n with bf.BlobFile(bf.join(dirpath, \"ca\"), \"wb\") as w:\r\n w.write(contents)\r\n bf.makedirs(bf.join(dirpath, \"c\"))\r\n with bf.BlobFile(bf.join(dirpath, \"c/a\"), \"wb\") as w:\r\n w.write(contents)\r\n # this should also test shard_prefix_length=2 but that takes too long\r\n assert sorted(list(bf.listdir(dirpath, shard_prefix_length=1))) == [\r\n \"a\",\r\n \"aa\",\r\n \"b\",\r\n \"c\",\r\n \"ca\",\r\n ]\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"ctx\", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]\r\n)\r\n@pytest.mark.parametrize(\"topdown\", [False, True])\r\ndef test_walk(ctx, topdown):\r\n contents = b\"meow!\"\r\n with ctx() as path:\r\n dirpath = bf.dirname(path)\r\n a_path = bf.join(dirpath, \"a\")\r\n with bf.BlobFile(a_path, \"wb\") as w:\r\n w.write(contents)\r\n bf.makedirs(bf.join(dirpath, \"c/d\"))\r\n b_path = bf.join(dirpath, \"c/d/b\")\r\n with bf.BlobFile(b_path, \"wb\") as w:\r\n w.write(contents)\r\n expected = [\r\n (dirpath, [\"c\"], [\"a\"]),\r\n (bf.join(dirpath, \"c\"), [\"d\"], []),\r\n (bf.join(dirpath, \"c\", \"d\"), [], [\"b\"]),\r\n ]\r\n if not topdown:\r\n expected = list(reversed(expected))\r\n assert list(bf.walk(dirpath, topdown=topdown)) == expected\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"ctx\", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]\r\n)\r\n@pytest.mark.parametrize(\"parallel\", [False, True])\r\ndef test_glob(ctx, parallel):\r\n contents = b\"meow!\"\r\n with ctx() as path:\r\n dirpath = bf.dirname(path)\r\n a_path = bf.join(dirpath, \"ab\")\r\n with bf.BlobFile(a_path, \"wb\") as w:\r\n w.write(contents)\r\n b_path = bf.join(dirpath, \"bb\")\r\n with bf.BlobFile(b_path, \"wb\") as w:\r\n w.write(contents)\r\n\r\n def assert_listing_equal(path, desired):\r\n desired = sorted([bf.join(dirpath, p) for p in desired])\r\n actual = sorted(list(bf.glob(path, parallel=parallel)))\r\n assert actual == desired, f\"{actual} != {desired}\"\r\n\r\n assert_listing_equal(bf.join(dirpath, \"*b\"), [\"ab\", \"bb\"])\r\n assert_listing_equal(bf.join(dirpath, \"a*\"), [\"ab\"])\r\n assert_listing_equal(bf.join(dirpath, \"ab*\"), [\"ab\"])\r\n assert_listing_equal(bf.join(dirpath, \"*\"), [\"ab\", \"bb\"])\r\n assert_listing_equal(bf.join(dirpath, \"bb\"), [\"bb\"])\r\n\r\n path = bf.join(dirpath, \"test.txt\")\r\n with bf.BlobFile(path, \"wb\") as w:\r\n w.write(contents)\r\n path = bf.join(dirpath, \"subdir\", \"test.txt\")\r\n bf.makedirs(bf.dirname(path))\r\n with bf.BlobFile(path, \"wb\") as f:\r\n f.write(contents)\r\n path = bf.join(dirpath, \"subdir\", \"subsubdir\", \"test.txt\")\r\n if \"://\" not in path:\r\n # implicit directory\r\n bf.makedirs(bf.dirname(path))\r\n with bf.BlobFile(path, \"wb\") as f:\r\n f.write(contents)\r\n\r\n assert_listing_equal(bf.join(dirpath, \"*/test.txt\"), [\"subdir/test.txt\"])\r\n assert_listing_equal(bf.join(dirpath, \"*/*.txt\"), [\"subdir/test.txt\"])\r\n if \"://\" in path:\r\n # local glob doesn't handle ** the same way as remote glob\r\n assert_listing_equal(\r\n bf.join(dirpath, \"**.txt\"),\r\n [\"test.txt\", \"subdir/test.txt\", \"subdir/subsubdir/test.txt\"],\r\n )\r\n else:\r\n assert_listing_equal(bf.join(dirpath, \"**.txt\"), [\"test.txt\"])\r\n assert_listing_equal(bf.join(dirpath, \"*/test\"), [])\r\n assert_listing_equal(bf.join(dirpath, \"subdir/test.txt\"), [\"subdir/test.txt\"])\r\n\r\n # directories\r\n assert_listing_equal(bf.join(dirpath, \"*\"), [\"ab\", \"bb\", \"subdir\", \"test.txt\"])\r\n assert_listing_equal(bf.join(dirpath, \"subdir\"), [\"subdir\"])\r\n assert_listing_equal(bf.join(dirpath, \"subdir/\"), [\"subdir\"])\r\n assert_listing_equal(bf.join(dirpath, \"*/\"), [\"subdir\"])\r\n assert_listing_equal(bf.join(dirpath, \"*dir\"), [\"subdir\"])\r\n assert_listing_equal(bf.join(dirpath, \"subdir/*dir\"), [\"subdir/subsubdir\"])\r\n assert_listing_equal(bf.join(dirpath, \"subdir/*dir/\"), [\"subdir/subsubdir\"])\r\n assert_listing_equal(bf.join(dirpath, \"su*ir/*dir/\"), [\"subdir/subsubdir\"])\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"ctx\", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]\r\n)\r\ndef test_rmtree(ctx):\r\n contents = b\"meow!\"\r\n with ctx() as path:\r\n root = bf.dirname(path)\r\n destroy_path = bf.join(root, \"destroy\")\r\n bf.makedirs(destroy_path)\r\n save_path = bf.join(root, \"save\")\r\n bf.makedirs(save_path)\r\n\r\n # implicit dir\r\n if not \"://\" in path:\r\n bf.makedirs(bf.join(destroy_path, \"adir\"))\r\n with bf.BlobFile(bf.join(destroy_path, \"adir/b\"), \"wb\") as w:\r\n w.write(contents)\r\n\r\n # explicit dir\r\n bf.makedirs(bf.join(destroy_path, \"bdir\"))\r\n with bf.BlobFile(bf.join(destroy_path, \"bdir/b\"), \"wb\") as w:\r\n w.write(contents)\r\n\r\n bf.makedirs(bf.join(save_path, \"somedir\"))\r\n with bf.BlobFile(bf.join(save_path, \"somefile\"), \"wb\") as w:\r\n w.write(contents)\r\n\r\n def assert_listing_equal(path, desired):\r\n actual = list(bf.walk(path))\r\n # ordering of os walk is weird, only compare sorted order\r\n assert sorted(actual) == sorted(desired), f\"{actual} != {desired}\"\r\n\r\n assert_listing_equal(\r\n root,\r\n [\r\n (root, [\"destroy\", \"save\"], []),\r\n (destroy_path, [\"adir\", \"bdir\"], []),\r\n (bf.join(destroy_path, \"adir\"), [], [\"b\"]),\r\n (bf.join(destroy_path, \"bdir\"), [], [\"b\"]),\r\n (save_path, [\"somedir\"], [\"somefile\"]),\r\n (bf.join(save_path, \"somedir\"), [], []),\r\n ],\r\n )\r\n\r\n bf.rmtree(destroy_path)\r\n\r\n assert_listing_equal(\r\n root,\r\n [\r\n (root, [\"save\"], []),\r\n (save_path, [\"somedir\"], [\"somefile\"]),\r\n (bf.join(save_path, \"somedir\"), [], []),\r\n ],\r\n )\r\n\r\n\r\ndef test_copy():\r\n contents = b\"meow!\"\r\n with _get_temp_local_path() as local_path1, _get_temp_local_path() as local_path2, _get_temp_local_path() as local_path3, _get_temp_gcs_path() as gcs_path1, _get_temp_gcs_path() as gcs_path2, _get_temp_as_path() as as_path1, _get_temp_as_path() as as_path2:\r\n with pytest.raises(FileNotFoundError):\r\n bf.copy(gcs_path1, gcs_path2)\r\n with pytest.raises(FileNotFoundError):\r\n bf.copy(as_path1, as_path2)\r\n\r\n _write_contents(local_path1, contents)\r\n\r\n testcases = [\r\n (local_path1, local_path2),\r\n (local_path1, gcs_path1),\r\n (gcs_path1, gcs_path2),\r\n (gcs_path2, as_path1),\r\n (as_path1, as_path2),\r\n (as_path2, local_path3),\r\n ]\r\n\r\n for src, dst in testcases:\r\n h = bf.copy(src, dst, return_md5=True)\r\n assert h == hashlib.md5(contents).hexdigest()\r\n assert _read_contents(dst) == contents\r\n with pytest.raises(FileExistsError):\r\n bf.copy(src, dst)\r\n bf.copy(src, dst, overwrite=True)\r\n assert _read_contents(dst) == contents\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"ctx\", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]\r\n)\r\ndef test_exists(ctx):\r\n contents = b\"meow!\"\r\n with ctx() as path:\r\n assert not bf.exists(path)\r\n _write_contents(path, contents)\r\n assert bf.exists(path)\r\n\r\n\r\n@contextlib.contextmanager\r\ndef environ_context():\r\n env = os.environ.copy()\r\n yield\r\n os.environ = env\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"backend_ctx\",\r\n [\r\n (\"local\", _get_temp_local_path),\r\n (\"google\", _get_temp_gcs_path),\r\n (\"azure\", _get_temp_as_path),\r\n ],\r\n)\r\ndef test_backends_env_var(backend_ctx):\r\n backend, ctx = backend_ctx\r\n contents = b\"meow!\"\r\n\r\n with environ_context():\r\n with ctx() as path:\r\n _write_contents(path, contents)\r\n\r\n with bf.BlobFile(path, \"rb\") as f:\r\n f.read()\r\n\r\n os.environ[\"BLOBFILE_BACKENDS\"] = \"\"\r\n\r\n with pytest.raises(bf.Error):\r\n with bf.BlobFile(path, \"rb\") as f:\r\n f.read()\r\n\r\n os.environ[\"BLOBFILE_BACKENDS\"] = backend\r\n\r\n with bf.BlobFile(path, \"rb\") as f:\r\n f.read()\r\n\r\n\r\ndef test_more_exists():\r\n testcases = [\r\n (AZURE_INVALID_CONTAINER, False),\r\n (AZURE_INVALID_CONTAINER + \"/\", False),\r\n (AZURE_INVALID_CONTAINER + \"//\", False),\r\n (AZURE_INVALID_CONTAINER + \"/invalid.file\", False),\r\n (GCS_INVALID_BUCKET, False),\r\n (GCS_INVALID_BUCKET + \"/\", False),\r\n (GCS_INVALID_BUCKET + \"//\", False),\r\n (GCS_INVALID_BUCKET + \"/invalid.file\", False),\r\n (AZURE_INVALID_ACCOUNT, False),\r\n (AZURE_INVALID_ACCOUNT + \"/\", False),\r\n (AZURE_INVALID_ACCOUNT + \"//\", False),\r\n (AZURE_INVALID_ACCOUNT + \"/invalid.file\", False),\r\n (AZURE_VALID_CONTAINER, True),\r\n (AZURE_VALID_CONTAINER + \"/\", True),\r\n (AZURE_VALID_CONTAINER + \"//\", False),\r\n (AZURE_VALID_CONTAINER + \"/invalid.file\", False),\r\n (GCS_VALID_BUCKET, True),\r\n (GCS_VALID_BUCKET + \"/\", True),\r\n (GCS_VALID_BUCKET + \"//\", False),\r\n (GCS_VALID_BUCKET + \"/invalid.file\", False),\r\n (f\"/does-not-exist\", False),\r\n (f\"/\", True),\r\n ]\r\n for path, should_exist in testcases:\r\n assert bf.exists(path) == should_exist\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"base_path\", [AZURE_INVALID_ACCOUNT, AZURE_INVALID_CONTAINER, GCS_INVALID_BUCKET]\r\n)\r\ndef test_invalid_paths(base_path):\r\n for suffix in [\"\", \"/\", \"//\", \"/invalid.file\", \"/invalid/dir/\"]:\r\n path = base_path + suffix\r\n print(path)\r\n if path.endswith(\"/\"):\r\n expected_error = IsADirectoryError\r\n else:\r\n expected_error = FileNotFoundError\r\n list(bf.glob(path))\r\n if suffix == \"\":\r\n for pattern in [\"*\", \"**\"]:\r\n try:\r\n list(bf.glob(path + pattern))\r\n except bf.Error as e:\r\n assert \"Wildcards cannot be used\" in e.message\r\n else:\r\n for pattern in [\"*\", \"**\"]:\r\n list(bf.glob(path + pattern))\r\n with pytest.raises(FileNotFoundError):\r\n list(bf.listdir(path))\r\n assert not bf.exists(path)\r\n assert not bf.isdir(path)\r\n with pytest.raises(expected_error):\r\n bf.remove(path)\r\n if suffix in (\"\", \"/\"):\r\n try:\r\n bf.rmdir(path)\r\n except bf.Error as e:\r\n assert \"Cannot delete bucket\" in e.message\r\n else:\r\n bf.rmdir(path)\r\n with pytest.raises(NotADirectoryError):\r\n bf.rmtree(path)\r\n with pytest.raises(FileNotFoundError):\r\n bf.stat(path)\r\n\r\n if base_path == AZURE_INVALID_ACCOUNT:\r\n with pytest.raises(bf.Error):\r\n bf.get_url(path)\r\n else:\r\n bf.get_url(path)\r\n\r\n with pytest.raises(FileNotFoundError):\r\n bf.md5(path)\r\n with pytest.raises(bf.Error):\r\n bf.makedirs(path)\r\n list(bf.walk(path))\r\n with tempfile.TemporaryDirectory() as tmpdir:\r\n local_path = os.path.join(tmpdir, \"test.txt\")\r\n with pytest.raises(expected_error):\r\n bf.copy(path, local_path)\r\n with open(local_path, \"w\") as f:\r\n f.write(\"meow\")\r\n with pytest.raises(expected_error):\r\n bf.copy(local_path, path)\r\n for streaming in [False, True]:\r\n with pytest.raises(expected_error):\r\n with bf.BlobFile(path, \"rb\", streaming=streaming) as f:\r\n f.read()\r\n with pytest.raises(expected_error):\r\n with bf.BlobFile(path, \"wb\", streaming=streaming) as f:\r\n f.write(b\"meow\")\r\n\r\n\r\n@pytest.mark.parametrize(\"buffer_size\", [1, 100])\r\n@pytest.mark.parametrize(\"ctx\", [_get_temp_gcs_path, _get_temp_as_path])\r\ndef test_read_stats(buffer_size, ctx):\r\n with ctx() as path:\r\n contents = b\"meow!\"\r\n\r\n with bf.BlobFile(path, \"wb\") as w:\r\n w.write(contents)\r\n\r\n with bf.BlobFile(path, \"rb\", buffer_size=buffer_size) as r:\r\n r.read(1)\r\n\r\n if buffer_size == 1:\r\n assert r.raw.bytes_read == 1 # type: ignore\r\n else:\r\n assert r.raw.bytes_read == len(contents) # type: ignore\r\n\r\n with bf.BlobFile(path, \"rb\", buffer_size=buffer_size) as r:\r\n r.read(1)\r\n r.seek(4)\r\n r.read(1)\r\n\r\n if buffer_size == 1:\r\n assert r.raw.requests == 2 # type: ignore\r\n assert r.raw.bytes_read == 2 # type: ignore\r\n else:\r\n assert r.raw.requests == 1 # type: ignore\r\n assert r.raw.bytes_read == len(contents) # type: ignore\r\n\r\n\r\n@pytest.mark.parametrize(\"ctx\", [_get_temp_gcs_path, _get_temp_as_path])\r\ndef test_cache_dir(ctx):\r\n cache_dir = tempfile.mkdtemp()\r\n contents = b\"meow!\"\r\n alternative_contents = b\"purr!\"\r\n with ctx() as path:\r\n with bf.BlobFile(path, mode=\"wb\") as f:\r\n f.write(contents)\r\n with bf.LocalBlobFile(path, mode=\"rb\", cache_dir=cache_dir) as f:\r\n assert f.read() == contents\r\n content_hash = hashlib.md5(contents).hexdigest()\r\n cache_path = bf.join(cache_dir, content_hash, bf.basename(path))\r\n with open(cache_path, \"rb\") as f:\r\n assert f.read() == contents\r\n # alter the cached file to make sure we are not re-reading the remote file\r\n with open(cache_path, \"wb\") as f:\r\n f.write(alternative_contents)\r\n with bf.LocalBlobFile(path, mode=\"rb\", cache_dir=cache_dir) as f:\r\n assert f.read() == alternative_contents\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"ctx\", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]\r\n)\r\n@pytest.mark.parametrize(\"use_random\", [False, True])\r\ndef test_change_file_size(ctx, use_random):\r\n chunk_size = 2 ** 20\r\n long_contents = b\"\\x00\" * chunk_size * 3\r\n short_contents = b\"\\xFF\" * chunk_size * 2\r\n if use_random:\r\n long_contents = os.urandom(len(long_contents))\r\n short_contents = os.urandom(len(short_contents))\r\n with ctx() as path:\r\n # make file shorter\r\n with bf.BlobFile(path, \"wb\") as f:\r\n f.write(long_contents)\r\n with bf.BlobFile(path, \"rb\") as f:\r\n read_contents = f.read(chunk_size)\r\n with bf.BlobFile(path, \"wb\") as f2:\r\n f2.write(short_contents)\r\n # close underlying connection\r\n f.raw._f = None # type: ignore\r\n read_contents += f.read()\r\n assert len(f.read()) == 0\r\n assert (\r\n read_contents\r\n == long_contents[:chunk_size] + short_contents[chunk_size:]\r\n )\r\n\r\n # make file longer\r\n with bf.BlobFile(path, \"wb\") as f:\r\n f.write(short_contents)\r\n with bf.BlobFile(path, \"rb\") as f:\r\n read_contents = f.read(chunk_size)\r\n with bf.BlobFile(path, \"wb\") as f2:\r\n f2.write(long_contents)\r\n # close underlying connection\r\n f.raw._f = None # type: ignore\r\n read_contents += f.read()\r\n assert len(f.read()) == 0\r\n expected = (\r\n short_contents[:chunk_size] + long_contents[chunk_size : chunk_size * 2]\r\n )\r\n # local files behave differently and read the new contents until the\r\n # end of the new file size\r\n if not path.startswith(\"gs://\") and not path.startswith(\"https://\"):\r\n expected = short_contents[:chunk_size] + long_contents[chunk_size:]\r\n assert read_contents == expected\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"ctx\", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]\r\n)\r\ndef test_overwrite_while_reading(ctx):\r\n chunk_size = 2 ** 20\r\n contents = b\"\\x00\" * chunk_size * 2\r\n alternative_contents = b\"\\xFF\" * chunk_size * 4\r\n with ctx() as path:\r\n with bf.BlobFile(path, \"wb\") as f:\r\n f.write(contents)\r\n with bf.BlobFile(path, \"rb\") as f:\r\n read_contents = f.read(chunk_size)\r\n with bf.BlobFile(path, \"wb\") as f2:\r\n f2.write(alternative_contents)\r\n # close underlying connection\r\n f.raw._f = None # type: ignore\r\n read_contents += f.read(chunk_size)\r\n assert (\r\n read_contents\r\n == contents[:chunk_size]\r\n + alternative_contents[chunk_size : chunk_size * 2]\r\n )\r\n\r\n\r\ndef test_create_local_intermediate_dirs():\r\n contents = b\"meow\"\r\n with _get_temp_local_path() as path:\r\n dirpath = bf.dirname(path)\r\n with chdir(dirpath):\r\n for filepath in [\r\n bf.join(dirpath, \"dirname\", \"file.name\"),\r\n bf.join(\"..\", bf.basename(dirpath), \"file.name\"),\r\n \"./file.name\",\r\n \"file.name\",\r\n ]:\r\n with bf.BlobFile(filepath, \"wb\") as f:\r\n f.write(contents)\r\n\r\n\r\n@pytest.mark.parametrize(\"binary\", [True, False])\r\n@pytest.mark.parametrize(\"blobfile\", [bf.BlobFile, bf.LocalBlobFile])\r\n@pytest.mark.parametrize(\r\n \"ctx\", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]\r\n)\r\ndef test_more_read_write(binary, blobfile, ctx):\r\n rng = np.random.RandomState(0)\r\n\r\n with ctx() as path:\r\n if binary:\r\n read_mode = \"rb\"\r\n write_mode = \"wb\"\r\n else:\r\n read_mode = \"r\"\r\n write_mode = \"w\"\r\n\r\n with blobfile(path, write_mode) as w:\r\n pass\r\n\r\n with blobfile(path, read_mode) as r:\r\n assert len(r.read()) == 0\r\n\r\n contents = b\"meow!\"\r\n if not binary:\r\n contents = contents.decode(\"utf8\")\r\n\r\n with blobfile(path, write_mode) as w:\r\n w.write(contents)\r\n\r\n with blobfile(path, read_mode) as r:\r\n assert r.read(1) == contents[:1]\r\n assert r.read() == contents[1:]\r\n assert len(r.read()) == 0\r\n\r\n with blobfile(path, read_mode) as r:\r\n for i in range(len(contents)):\r\n assert r.read(1) == contents[i : i + 1]\r\n assert len(r.read()) == 0\r\n assert len(r.read()) == 0\r\n\r\n contents = b\"meow!\\n\\nmew!\\n\"\r\n lines = [b\"meow!\\n\", b\"\\n\", b\"mew!\\n\"]\r\n if not binary:\r\n contents = contents.decode(\"utf8\")\r\n lines = [line.decode(\"utf8\") for line in lines]\r\n\r\n with blobfile(path, write_mode) as w:\r\n w.write(contents)\r\n\r\n with blobfile(path, read_mode) as r:\r\n assert r.readlines() == lines\r\n\r\n with blobfile(path, read_mode) as r:\r\n assert [line for line in r] == lines\r\n\r\n if binary:\r\n for size in [2 * 2 ** 20, 12_345_678]:\r\n contents = rng.randint(0, 256, size=size, dtype=np.uint8).tobytes()\r\n\r\n with blobfile(path, write_mode) as w:\r\n w.write(contents)\r\n\r\n with blobfile(path, read_mode) as r:\r\n size = rng.randint(0, 1_000_000)\r\n buf = b\"\"\r\n while True:\r\n b = r.read(size)\r\n if b == b\"\":\r\n break\r\n buf += b\r\n assert buf == contents\r\n else:\r\n obj = {\"a\": 1}\r\n\r\n with blobfile(path, write_mode) as w:\r\n json.dump(obj, w)\r\n\r\n with blobfile(path, read_mode) as r:\r\n assert json.load(r) == obj\r\n\r\n\r\n@pytest.mark.parametrize(\"blobfile\", [bf.BlobFile, bf.LocalBlobFile])\r\n@pytest.mark.parametrize(\r\n \"ctx\", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]\r\n)\r\ndef test_video(blobfile, ctx):\r\n rng = np.random.RandomState(0)\r\n shape = (256, 64, 64, 3)\r\n video_data = rng.randint(0, 256, size=np.prod(shape), dtype=np.uint8).reshape(shape)\r\n\r\n with ctx() as path:\r\n with blobfile(path, mode=\"wb\") as wf:\r\n with imageio.get_writer(\r\n wf,\r\n format=\"ffmpeg\",\r\n quality=None,\r\n codec=\"libx264rgb\",\r\n pixelformat=\"bgr24\",\r\n output_params=[\"-f\", \"mp4\", \"-crf\", \"0\"],\r\n ) as w:\r\n for frame in video_data:\r\n w.append_data(frame)\r\n\r\n with blobfile(path, mode=\"rb\") as rf:\r\n with imageio.get_reader(\r\n rf, format=\"ffmpeg\", input_params=[\"-f\", \"mp4\"]\r\n ) as r:\r\n for idx, frame in enumerate(r):\r\n assert np.array_equal(frame, video_data[idx])\r\n\r\n with blobfile(path, mode=\"rb\") as rf:\r\n container = av.open(rf)\r\n stream = container.streams.video[0]\r\n for idx, frame in enumerate(container.decode(stream)):\r\n assert np.array_equal(frame.to_image(), video_data[idx])\r\n\r\n\r\n# this is pretty slow and docker will often run out of memory\r\n@pytest.mark.slow\r\n@pytest.mark.parametrize(\r\n \"ctx\", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]\r\n)\r\ndef test_large_file(ctx):\r\n contents = b\"0\" * 2 ** 32\r\n with ctx() as path:\r\n with bf.BlobFile(path, \"wb\", streaming=True) as f:\r\n f.write(contents)\r\n with bf.BlobFile(path, \"rb\", streaming=True) as f:\r\n assert contents == f.read()\r\n\r\n\r\ndef test_composite_objects():\r\n with _get_temp_gcs_path() as remote_path:\r\n with _get_temp_local_path() as local_path:\r\n contents = b\"0\" * 2 * 2 ** 20\r\n with open(local_path, \"wb\") as f:\r\n f.write(contents)\r\n sp.run(\r\n [\r\n \"gsutil\",\r\n \"-o\",\r\n \"GSUtil:parallel_composite_upload_threshold=1M\",\r\n \"cp\",\r\n local_path,\r\n remote_path,\r\n ],\r\n check=True,\r\n )\r\n\r\n assert hashlib.md5(contents).hexdigest() == bf.md5(remote_path)\r\n assert hashlib.md5(contents).hexdigest() == bf.md5(remote_path)\r\n\r\n with tempfile.TemporaryDirectory() as tmpdir:\r\n with bf.BlobFile(remote_path, \"rb\", cache_dir=tmpdir, streaming=False) as f:\r\n assert f.read() == contents\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"ctx\", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]\r\n)\r\ndef test_md5(ctx):\r\n contents = b\"meow!\"\r\n meow_hash = hashlib.md5(contents).hexdigest()\r\n\r\n with ctx() as path:\r\n _write_contents(path, contents)\r\n assert bf.md5(path) == meow_hash\r\n with bf.BlobFile(path, \"wb\") as f:\r\n f.write(contents)\r\n assert bf.md5(path) == meow_hash\r\n with bf.BlobFile(path, \"wb\") as f:\r\n f.write(contents)\r\n assert bf.md5(path) == meow_hash\r\n\r\n\r\n@pytest.mark.parametrize(\"ctx\", [_get_temp_as_path])\r\ndef test_azure_maybe_update_md5(ctx):\r\n contents = b\"meow!\"\r\n meow_hash = hashlib.md5(contents).hexdigest()\r\n alternative_contents = b\"purr\"\r\n purr_hash = hashlib.md5(alternative_contents).hexdigest()\r\n\r\n with ctx() as path:\r\n _write_contents(path, contents)\r\n _isfile, metadata = ops._azure_isfile(path)\r\n assert ops._azure_maybe_update_md5(path, metadata[\"ETag\"], meow_hash)\r\n _write_contents(path, alternative_contents)\r\n assert not ops._azure_maybe_update_md5(path, metadata[\"ETag\"], meow_hash)\r\n _isfile, metadata = ops._azure_isfile(path)\r\n assert base64.b64decode(metadata[\"Content-MD5\"]).hex() == purr_hash\r\n\r\n\r\ndef _get_http_pool_id(q):\r\n q.put(id(ops._get_http_pool()))\r\n\r\n\r\ndef test_fork():\r\n q = mp.Queue()\r\n # this reference should keep the old http client alive in the child process\r\n # to ensure that a new one does not recycle the memory address\r\n http1 = ops._get_http_pool()\r\n parent1 = id(http1)\r\n p = mp.Process(target=_get_http_pool_id, args=(q,))\r\n p.start()\r\n p.join()\r\n http2 = ops._get_http_pool()\r\n parent2 = id(http2)\r\n\r\n child = q.get()\r\n assert parent1 == parent2\r\n assert child != parent1\r\n","sub_path":"blobfile/ops_test.py","file_name":"ops_test.py","file_ext":"py","file_size_in_byte":39602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"52556894","text":"import numpy as np\nimport math\nimport random\nimport os\nimport pickle as pickle\nimport xml.etree.ElementTree as ET\nfrom sklearn import preprocessing\n# from sklearn.preprocessing import MinMaxScalar\n\n\n\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport glob\nimport os\n\n\nclass DataLoader():\n def __init__(self, args, logger, limit = 500):\n self.data_dir = args.data_dir\n self.threshold=args.threshold\n self.alphabet = args.alphabet\n self.scale = args.scale\n self.batch_size = args.batch_size\n self.tsteps = args.tstepshh\n self.data_scale = args.data_scale # scale data down by this factor\n self.ascii_steps = args.tsteps/args.tsteps_per_ascii\n self.logger = logger\n self.limit = limit # removes large noisy gaps in the data\n\n data_file = os.path.join(self.data_dir, \"strokes_training_data.cpkl\")\n stroke_dir = self.data_dir + \"/lineStrokes\"\n ascii_dir = self.data_dir + \"/ascii\"\n\n if not (os.path.exists(data_file)) :\n self.logger.write(\"\\tcreating training data cpkl file from raw source\")\n self.preprocess(data_file)\n\n self.load_preprocessed(data_file)\n self.reset_batch_pointer()\n\n def preprocess(self,data_file):\n # create data file from raw xml files from iam handwriting source.\n self.logger.write(\"\\tparsing dataset...\")\n\n asciis = []\n strokes = []\n for ID in range(21):\n path = '../Trace_data_processed\\ID%s\\*.csv' % (ID + 1)\n\n for fname in sorted(glob.glob(path), reverse=True):\n phrases = fname.split('_')[3:-2]\n\n phrases_join_ = ' '.join(phrases)\n\n if ID ==21:\n phrases_join=phrases_join_\n\n pos_list = []\n pre_x = 0\n pre_y = 0\n with open(fname, newline='') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n for row in spamreader:\n\n if row == []:\n continue\n row_split = row\n pos_list.append([self.scale * (float(row_split[0]) - pre_x), self.scale * (float(row_split[1]) - pre_y),0])\n pre_x = float(row_split[0])\n pre_y = float(row_split[1])\n\n else:\n phrases_join = phrases_join_+'_'\n pos_list = []\n pre_x=0\n pre_y=0\n\n with open(fname, newline='') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n for row in spamreader:\n if row == []:\n continue\n row_split=row\n pos_list.append([self.scale*(-float(row_split[0])-pre_x), self.scale*(float(row_split[1])-pre_y), 0])\n pre_x=-float(row_split[0])\n pre_y=float(row_split[1])\n\n counter_sum+=counter\n asciis.append(phrases_join)\n strokes.append(pos_list)\n\n assert (len(strokes) == len(asciis)), \"There should be a 1:1 correspondence between stroke data and ascii labels.\"\n f = open(data_file, \"wb\")\n pickle.dump([strokes, asciis], f, protocol=2)\n f.close()\n\n self.logger.write(\"\\tfinished parsing dataset. saved {} lines\".format(len(strokes)))\n\n\n def load_preprocessed(self, data_file):\n f = open(data_file,\"rb\")\n [self.raw_stroke_data, self.raw_ascii_data] = pickle.load(f, encoding='latin1')\n # [raw_stroke_data, raw_ascii_data] = pickle.load(f, encoding='latin1')\n f.close()\n\n\n # goes thru the list, and only keeps the text entries that have more than tsteps points\n self.stroke_data = []\n self.ascii_data = []\n self.valid_stroke_data = []\n self.valid_ascii_data = []\n\n # every 1 in 20 (5%) will be used for validation data\n cur_data_counter = 0\n zero = 0\n\n original_stroke_data = []\n original_ascii_data = []\n\n for m in range(len(self.raw_stroke_data)):\n data = self.raw_stroke_data[m]\n data=np.array(data)\n assert not np.any(np.isnan(data))\n\n ascii= self.raw_ascii_data[m]\n ascii_len = len(self.raw_ascii_data[m])\n average_tsteps_ = ascii_len * 12\n\n if len(data) < average_tsteps_:\n\n multiple = int(round(average_tsteps_ / len(data)))\n length = multiple * len(data)\n new_data = np.zeros((length, 3))\n\n for i in range(length):\n if i % multiple is 0:\n step = np.divide(data[int(i / multiple)], multiple)\n step=np.reshape(step,(1,3))\n\n for j in range(multiple):\n new_data[(i + j),:] = step\n new_data=np.array(new_data)\n original_stroke_data.append(new_data)\n original_ascii_data.append(ascii)\n else:\n multiple = int(round(len(data) / average_tsteps_))\n length = int(round(len(data) / multiple))\n new_data = np.zeros((length + 1, 3))\n for ii in range(len(data)):\n\n if ii % multiple is 0:\n sum = np.sum(data[ii:ii + multiple],axis=0)\n sum=np.reshape(sum,(1,3))\n\n new_data[int(ii / multiple),:] = sum\n\n new_data=np.array(new_data)\n original_stroke_data.append(new_data)\n original_ascii_data.append(ascii)\n f = open(\"data/new_data\", \"wb\")\n pickle.dump([original_stroke_data,original_ascii_data], f, protocol=2)\n f.close()\n\n original_stroke_data_ = []\n original_ascii_data_ = []\n tsteps = 300\n for i in range(len(original_stroke_data)):\n data = original_stroke_data[i]\n ascii = original_ascii_data[i]\n\n if len(data) < tsteps:\n multiple = int(round(tsteps / len(data)))\n length = multiple * len(data)\n new_data = []\n counter = 0\n for i in range(length):\n\n if i % multiple is 0:\n new_data.append(data[i - counter])\n else:\n new_data.append([0., 0., 0.])\n counter += 1\n\n original_stroke_data_.append(new_data)\n original_ascii_data_.append(ascii)\n\n else:\n original_stroke_data_.append(data)\n original_ascii_data_.append(ascii)\n\n original_stroke_data = original_stroke_data_\n original_ascii_data = original_ascii_data_\n\n original_stroke_data_ = []\n original_ascii_data_ = []\n for i in range(len(original_stroke_data)):\n data = original_stroke_data[i]\n ascii = original_ascii_data[i]\n\n if len(data) < tsteps:\n pad = int(tsteps - np.shape(data)[0])\n data_ = np.concatenate((np.zeros((pad, 3)), data), axis=0)\n original_stroke_data_.append(data_)\n original_ascii_data_.append(ascii)\n else:\n original_stroke_data_.append(data)\n original_ascii_data_.append(ascii)\n\n\n\n whole_data=list(zip(original_stroke_data_,original_ascii_data_))\n np.random.shuffle(whole_data)\n np.random.shuffle(whole_data)\n\n original_stroke_data,original_ascii_data=zip(*whole_data)\n\n average_ascii_sum=0\n average_tsteps_sum=0\n average_char_sum=0\n for i in range(len(original_stroke_data)):\n average_ascii_sum+=len(original_stroke_data[i])/len(original_ascii_data[i])\n average_tsteps_sum+=len(original_stroke_data[i])\n average_char_sum+=len(original_ascii_data[i])\n\n\n average_ascii=average_ascii_sum/len(original_stroke_data)\n average_tsteps=average_tsteps_sum/len(original_stroke_data)\n average_char=average_char_sum/len(original_stroke_data)\n\n for i in range(len(original_stroke_data)):\n data=original_stroke_data[i]\n ascii=original_ascii_data[i]\n\n cur_data_counter = cur_data_counter + 1\n # data = np.array(data, dtype=np.float32)\n if cur_data_counter % 20 == 0:\n self.valid_stroke_data.append(data)\n self.valid_ascii_data.append(ascii)\n else:\n self.stroke_data.append(data)\n self.ascii_data.append(ascii)\n\n # minus 1, since we want the ydata to be a shifted version of x data\n assert (len(original_ascii_data) == len(original_stroke_data)), \"There should be a 1:1 correspondence between stroke data and ascii labels.\"\n self.num_batches = int(len(self.stroke_data) / self.batch_size)\n self.logger.write(\"\\tload dataset:\")\n self.logger.write(\"\\t\\t{} average number of characters per phrase\".format(average_char))\n self.logger.write(\"\\t\\t{} average steps per phrase\".format(average_tsteps))\n self.logger.write(\"\\t\\t{} average steps per ascii\".format(average_ascii))\n self.logger.write(\"\\t\\t{} zero individual data points\".format(zero))\n self.logger.write(\"\\t\\t{} train individual data points\".format(len(self.stroke_data)))\n self.logger.write(\"\\t\\t{} valid individual data points\".format(len(self.valid_stroke_data)))\n self.logger.write(\"\\t\\t{} batches\".format(self.num_batches))\n\n\n def validation_data(self):\n # returns validation data\n x_batch = []\n y_batch = []\n ascii_list = []\n for i in range(self.batch_size):\n valid_ix = i%len(self.valid_stroke_data)\n data = self.valid_stroke_data[valid_ix]\n x_batch.append(np.copy(data[-self.tsteps-4:-4]))\n y_batch.append(np.copy(data[-self.tsteps-3:-3]))\n ascii_list.append(self.valid_ascii_data[valid_ix])\n one_hots = [to_one_hot(s, self.ascii_steps, self.alphabet) for s in ascii_list]\n return x_batch, y_batch, ascii_list, one_hots\n\n def next_batch(self):\n # returns a randomized, tsteps-sized portion of the training data\n x_batch = []\n y_batch = []\n ascii_list = []\n for i in range(self.batch_size):\n data = self.stroke_data[self.idx_perm[self.pointer]]\n #idx = random.randint(0, len(data)-self.tsteps-2)\n x_batch.append(np.copy(data[-self.tsteps-4:-4]))\n y_batch.append(np.copy(data[-self.tsteps-3:-3]))\n ascii_list.append(self.ascii_data[self.idx_perm[self.pointer]])\n self.tick_batch_pointer()\n one_hots = [to_one_hot(s, self.ascii_steps, self.alphabet) for s in ascii_list]\n return x_batch, y_batch, ascii_list, one_hots\n\n def tick_batch_pointer(self):\n self.pointer += 1\n if (self.pointer >= len(self.stroke_data)):\n self.reset_batch_pointer()\n def reset_batch_pointer(self):\n self.idx_perm = np.random.permutation(len(self.stroke_data))\n self.pointer = 0\n\n# utility function for converting input ascii characters into vectors the network can understand.\n# index position 0 means \"unknown\"\ndef to_one_hot(s, ascii_steps, alphabet):\n # steplimit=3e3; s = s[:3e3] if len(s) > 3e3 else s # clip super-long strings\n alphabet_bi_list=[]\n for i in range(len(alphabet)):\n for j in range(len(alphabet)):\n alphabet_bi=alphabet[i]+alphabet[j]\n alphabet_bi_list.append(alphabet_bi)\n\n s_list=[]\n\n for i in range(len(s)):\n if i > len(s)-2:\n continue\n s_bi=s[i]+s[i+1]\n s_list.append(s_bi)\n\n\n seq = [alphabet_bi_list.index(char) + 1 for char in s_list]\n if len(seq) >= ascii_steps-1:\n # print(ascii_steps)\n seq = seq[-int(ascii_steps-1):]\n ss = [alphabet_bi_list[i - 1] for i in seq]\n ss=''.join(ss)\n\n else:\n seq = [0]*int(ascii_steps-1 - int(len(seq))) + seq\n ss=s\n\n one_hot = np.zeros((int(ascii_steps-1),len(alphabet_bi_list)+1))\n one_hot[np.arange(int(ascii_steps-1)),seq] = 1\n return one_hot\n\n\ndef to_one_hot_sample(s,tsteps, alphabet):\n # steplimit=3e3; s = s[:3e3] if len(s) > 3e3 else s # clip super-long strings\n ascii_steps=int(tsteps/12)\n alphabet_bi_list=[]\n for i in range(len(alphabet)):\n for j in range(len(alphabet)):\n alphabet_bi=alphabet[i]+alphabet[j]\n alphabet_bi_list.append(alphabet_bi)\n\n s_list=[]\n\n for i in range(len(s)):\n if i > len(s)-2:\n continue\n s_bi=s[i]+s[i+1]\n s_list.append(s_bi)\n\n\n seq = [alphabet_bi_list.index(char) + 1 for char in s_list]\n if len(seq) >= ascii_steps-1:\n # print(ascii_steps)\n seq = seq[-int(ascii_steps-1):]\n\n else:\n seq = [0]*int(ascii_steps-1 - int(len(seq))) + seq\n\n\n one_hot = np.zeros((int(ascii_steps-1),len(alphabet_bi_list)+1))\n one_hot[np.arange(int(ascii_steps-1)),seq] = 1\n return one_hot\n\n\n\n\n\ndef to_string_sample(s, tsteps, alphabet):\n # steplimit=3e3; s = s[:3e3] if len(s) > 3e3 else s # clip super-long strings\n ascii_steps = int(tsteps/12)\n seq = [alphabet.find(char) + 1 for char in s]\n if len(seq) >= ascii_steps:\n # print(ascii_steps)\n seq = seq[-int(ascii_steps):]\n ss = [alphabet[i - 1] for i in seq]\n ss=''.join(ss)\n\n else:\n seq = [0]*int(ascii_steps - int(len(seq))) + seq\n ss=s\n\n\n return ss\n\n\n\n\n\n\ndef to_string(s, ascii_steps, alphabet):\n # steplimit=3e3; s = s[:3e3] if len(s) > 3e3 else s # clip super-long strings\n seq = [alphabet.find(char) + 1 for char in s]\n if len(seq) >= ascii_steps:\n # print(ascii_steps)\n seq = seq[-int(ascii_steps):]\n ss = [alphabet[i - 1] for i in seq]\n ss=''.join(ss)\n\n else:\n seq = [0]*int(ascii_steps - int(len(seq))) + seq\n ss=s\n\n\n return ss\n\n\n# abstraction for logging\nclass Logger():\n def __init__(self, args):\n self.logf = '{}train_scribe.txt'.format(args.log_dir) if args.train else '{}sample_scribe.txt'.format(args.log_dir)\n with open(self.logf, 'w') as f: f.write(\"project by shawn shen\")\n\n def write(self, s, print_it=True):\n if print_it:\n print(s)\n with open(self.logf, 'a') as f:\n f.write(s + \"\\n\")","sub_path":"RNN-Based TF1/.ipynb_checkpoints/utils-checkpoint.py","file_name":"utils-checkpoint.py","file_ext":"py","file_size_in_byte":14722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"364946088","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'Myblog'\nurlpatterns = [\n path('index/', views.index, name='index'),\n # article路由已经不用了,用blog代替\n path('article/', views.article_detail, name='article_detail'),\n path('article/', views.article_list, name='article_list'),\n path('', views.blog_detail, name='blog_detail'),\n path('', views.blog_list, name='blog_list'),\n path('type/', views.blogs_with_type, name='blogs_with_type'),\n path('date//', views.blogs_with_date, name='blogs_with_date'),\n\n]","sub_path":"Myblog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"215851455","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport math\nimport os\nimport pickle\nimport pandas as pd\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score\nfrom sklearn.model_selection import train_test_split\nimport lightgbm as lgb\nfrom sklearn import metrics\n\nfrom util.functions import Rectangle, has_intersect, intersect, score_page, get_center_point, \\\n l2_distance, get_radian, get_nearest_box\n\nsave_dir = 'test_nms030_tta7_first_5models_soft_prob'\n\ngather_info = True\ncropping = True\nexpand_crop = True\npadding_rate = 0.05\ncrop_target_dir = 'input/pseudo_images'\ncrop_prob = 1.0\n\ntarget_images = [line.rstrip() for line in open('input/test_images.list').readlines()]\ncount = len(target_images)\n\n\ndef check_hiragana(label):\n codepoint = int(label.replace('U+', '0x'), 16)\n if 12352 <= codepoint <= 12447:\n return 1\n return 0\n\ndef is_first_in_second(a, b):\n return a[0] >= b[0] and b[2] >= a[2] \\\n and a[1] >= b[1] and b[3] >= a[3]\n\ndef check_box(boxlist, size, prob_list):\n width, height = size\n\n broken_box_list = [0] * len(boxlist)\n inside_box_list = [0] * len(boxlist)\n has_box_list = [0] * len(boxlist)\n overlap_rate_list = [0.] * len(boxlist)\n\n for i, current_box in enumerate(boxlist):\n if broken_box_list[i] == 1:\n continue\n\n try:\n current_rect = Rectangle(*current_box)\n except ValueError:\n broken_box_list[i] = 1\n continue\n\n current_rect_overlap = 0.\n\n for j, target_box in enumerate(boxlist):\n try:\n target_rect = Rectangle(*target_box)\n except ValueError:\n borken_box_list[j] = 1\n continue\n\n if i == j:\n continue\n\n if is_first_in_second(current_box, target_box):\n inside_box_list[i] = 1\n has_box_list[j] = 1\n\n if has_intersect(current_rect, target_rect):\n overlap_rate = intersect(current_rect, target_rect).area() / current_rect.area()\n current_rect_overlap += overlap_rate\n\n overlap_rate_list[i] = current_rect_overlap\n\n return broken_box_list, inside_box_list, has_box_list, overlap_rate_list\n\n\ndef gen_info(prob, label, bbox, box_score, broken, overlap_rate, nearest_dict, new_boxlist, size, image_id):\n\n # 統計量調査\n w_list = []\n h_list = []\n area_list = []\n x_point_list = []\n y_point_list = []\n for xmin, ymin, xmax, ymax in new_boxlist:\n w = round(float(xmax - xmin))\n w_list.append(w)\n h = round(float(ymax - ymin))\n h_list.append(h)\n area_list.append(w*h)\n center_point = get_center_point((xmin, ymin, xmax, ymax))\n x_point_list.append(center_point[0])\n y_point_list.append(center_point[1])\n wl = pd.Series(w_list)\n hl = pd.Series(h_list)\n al = pd.Series(area_list)\n xl = pd.Series(x_point_list)\n yl = pd.Series(y_point_list)\n mean_area = al.mean()\n mean_width = wl.mean()\n mean_height = hl.mean()\n mean_x = xl.mean()\n mean_y = yl.mean()\n std_area = al.std()\n std_width = wl.std()\n std_height = hl.std()\n std_x = xl.std()\n std_y = yl.std()\n median_area = al.median()\n median_width = wl.median()\n median_height = hl.median()\n median_x = xl.median()\n median_y = yl.median()\n box_num = len(new_boxlist)\n\n try:\n nearest_box = new_boxlist[nearest_dict[0]['index']]\n nearest_width = round(float(nearest_box[2] - nearest_box[0]))\n nearest_height = round(float(nearest_box[3] - nearest_box[1]))\n except IndexError:\n nearest_width = np.nan\n nearest_height = np.nan\n try:\n nearest2_box = new_boxlist[nearest_dict[1]['index']]\n nearest2_width = round(float(nearest2_box[2] - nearest2_box[0]))\n nearest2_height = round(float(nearest2_box[3] - nearest2_box[1]))\n except IndexError:\n nearest2_width = np.nan\n nearest2_height = np.nan\n try:\n nearest3_box = new_boxlist[nearest_dict[2]['index']]\n nearest3_width = round(float(nearest3_box[2] - nearest3_box[0]))\n nearest3_height = round(float(nearest3_box[3] - nearest3_box[1]))\n except IndexError:\n nearest3_width = np.nan\n nearest3_height = np.nan\n try:\n nearest4_box = new_boxlist[nearest_dict[3]['index']]\n nearest4_width = round(float(nearest4_box[2] - nearest4_box[0]))\n nearest4_height = round(float(nearest4_box[3] - nearest4_box[1]))\n except IndexError:\n nearest4_width = np.nan\n nearest4_height = np.nan\n try:\n nearest5_box = new_boxlist[nearest_dict[4]['index']]\n nearest5_width = round(float(nearest5_box[2] - nearest5_box[0]))\n nearest5_height = round(float(nearest5_box[3] - nearest5_box[1]))\n except IndexError:\n nearest5_width = np.nan\n nearest5_height = np.nan\n\n try:\n nearest_radian = nearest_dict[0]['radian']\n nearest_distance = nearest_dict[0]['distance']\n except IndexError:\n nearest_radian = np.nan\n nearest_distance = np.nan\n try:\n nearest_radian2 = nearest_dict[1]['radian']\n nearest_distance2 = nearest_dict[1]['distance']\n except IndexError:\n nearest_radian2 = np.nan\n nearest_distance2 = np.nan\n try:\n nearest_radian3 = nearest_dict[2]['radian']\n nearest_distance3 = nearest_dict[2]['distance']\n except IndexError:\n nearest_radian3 = np.nan\n nearest_distance3 = np.nan\n try:\n nearest_radian4 = nearest_dict[3]['radian']\n nearest_distance4 = nearest_dict[3]['distance']\n except IndexError:\n nearest_radian4 = np.nan\n nearest_distance4 = np.nan\n try:\n nearest_radian5 = nearest_dict[4]['radian']\n nearest_distance5 = nearest_dict[4]['distance']\n except IndexError:\n nearest_radian5 = np.nan\n nearest_distance5 = np.nan\n\n center_point = get_center_point(bbox)\n sub_str = f\"{label} {center_point[0]} {center_point[1]}\"\n width = bbox[2] - bbox[0]\n height = bbox[3] - bbox[1]\n x_center, y_center = center_point\n current_info = {\n 'image_id': image_id,\n 'char': label,\n 'char_score': prob,\n 'is_hiragana': check_hiragana(label),\n 'bbox': bbox,\n 'bbox_score': box_score,\n # 'broken': broken,\n # 'inside': inside,\n # 'has_box': has_box,\n 'overlap_rate': overlap_rate,\n 'page_width': size[0],\n 'page_height': size[1],\n # 'width': width,\n 'width_page_rate': width / size[0],\n 'width_mean_rate': width / mean_width,\n 'width_std_rate': width / std_width if std_width else 0.,\n 'width_median_rate': width / median_width,\n # 'height': height,\n 'height_page_rate': height / size[1],\n 'height_mean_rate': height / mean_height,\n 'height_std_rate': height / std_height if std_height else 0.,\n 'height_median_rate': height / median_height,\n # 'area': width * height,\n 'area_page_rate': width * height / size[0] * size[1],\n 'area_mean_rate': width * height / mean_area,\n 'area_std_rate': width * height / std_area if std_area else 0,\n 'area_median_rate': width * height / median_area,\n # 'nearest_width': nearest_width,\n 'nearest_width_page_rate': nearest_width / size[0],\n 'nearest_width_mean_rate': nearest_width / mean_width,\n 'nearest_width_std_rate': nearest_width / std_width if std_width else 0,\n 'nearest_width_median_rate': nearest_width / median_width,\n # 'nearest2_width': nearest2_width,\n 'nearest2_width_page_rate': nearest2_width / size[0],\n 'nearest2_width_mean_rate': nearest2_width / mean_width,\n 'nearest2_width_std_rate': nearest2_width / std_width if std_width else 0,\n 'nearest2_width_median_rate': nearest2_width / median_width,\n # 'nearest3_width': nearest3_width,\n 'nearest3_width_page_rate': nearest3_width / size[0],\n 'nearest3_width_mean_rate': nearest3_width / mean_width,\n 'nearest3_width_std_rate': nearest3_width / std_width if std_width else 0,\n 'nearest3_width_median_rate': nearest3_width / median_width,\n # 'nearest4_width': nearest4_width,\n 'nearest4_width_page_rate': nearest4_width / size[0],\n 'nearest4_width_mean_rate': nearest4_width / mean_width,\n 'nearest4_width_std_rate': nearest4_width / std_width if std_width else 0,\n 'nearest4_width_median_rate': nearest4_width / median_width,\n # 'nearest5_width': nearest5_width,\n 'nearest5_width_page_rate': nearest5_width / size[0],\n 'nearest5_width_mean_rate': nearest5_width / mean_width,\n 'nearest5_width_std_rate': nearest5_width / std_width if std_width else 0,\n 'nearest5_width_median_rate': nearest5_width / median_width,\n # 'nearest_height': nearest_height,\n 'nearest_height_page_rate': nearest_height / size[0],\n 'nearest_height_mean_rate': nearest_height / mean_height,\n 'nearest_height_std_rate': nearest_height / std_height if std_height else 0,\n 'nearest_height_median_rate': nearest_height / median_height,\n # 'nearest2_height': nearest2_height,\n 'nearest2_height_page_rate': nearest2_height / size[0],\n 'nearest2_height_mean_rate': nearest2_height / mean_height,\n 'nearest2_height_std_rate': nearest2_height / std_height if std_height else 0,\n 'nearest2_height_median_rate': nearest2_height / median_height,\n # 'nearest3_height': nearest3_height,\n 'nearest3_height_page_rate': nearest3_height / size[0],\n 'nearest3_height_mean_rate': nearest3_height / mean_height,\n 'nearest3_height_std_rate': nearest3_height / std_height if std_height else 0,\n 'nearest3_height_median_rate': nearest3_height / median_height,\n # 'nearest4_height': nearest4_height,\n 'nearest4_height_page_rate': nearest4_height / size[0],\n 'nearest4_height_mean_rate': nearest4_height / mean_height,\n 'nearest4_height_std_rate': nearest4_height / std_height if std_height else 0,\n 'nearest4_height_median_rate': nearest4_height / median_height,\n # 'nearest5_height': nearest5_height,\n 'nearest5_height_page_rate': nearest5_height / size[0],\n 'nearest5_height_mean_rate': nearest5_height / mean_height,\n 'nearest5_height_std_rate': nearest5_height / std_height if std_height else 0,\n 'nearest5_height_median_rate': nearest5_height / median_height,\n # 'nearest_area': nearest_width * nearest_height,\n 'nearest_area_page_rate': nearest_width * nearest_height / size[0] * size[1],\n 'nearest_area_mean_rate': nearest_width * nearest_height / mean_area,\n 'nearest_area_std_rate': nearest_width * nearest_height / std_area if std_area else 0,\n 'nearest_area_median_rate': nearest_width * nearest_height / median_area,\n # 'nearest2_area': nearest2_width * nearest2_height,\n 'nearest2_area_page_rate': nearest2_width * nearest2_height / size[0] * size[1],\n 'nearest2_area_mean_rate': nearest2_width * nearest2_height / mean_area,\n 'nearest2_area_std_rate': nearest2_width * nearest2_height / std_area if std_area else 0,\n 'nearest2_area_median_rate': nearest2_width * nearest2_height / median_area,\n # 'nearest3_area': nearest3_width * nearest3_height,\n 'nearest3_area_page_rate': nearest3_width * nearest3_height / size[0] * size[1],\n 'nearest3_area_mean_rate': nearest3_width * nearest3_height / mean_area,\n 'nearest3_area_std_rate': nearest3_width * nearest3_height / std_area if std_area else 0,\n 'nearest3_area_median_rate': nearest3_width * nearest3_height / median_area,\n # 'nearest4_area': nearest4_width * nearest4_height,\n 'nearest4_area_page_rate': nearest4_width * nearest4_height / size[0] * size[1],\n 'nearest4_area_mean_rate': nearest4_width * nearest4_height / mean_area,\n 'nearest4_area_std_rate': nearest4_width * nearest4_height / std_area if std_area else 0,\n 'nearest4_area_median_rate': nearest4_width * nearest4_height / median_area,\n # 'nearest5_area': nearest5_width * nearest5_height,\n 'nearest5_area_page_rate': nearest5_width * nearest5_height / size[0] * size[1],\n 'nearest5_area_mean_rate': nearest5_width * nearest5_height / mean_area,\n 'nearest5_area_std_rate': nearest5_width * nearest5_height / std_area if std_area else 0,\n 'nearest5_area_median_rate': nearest5_width * nearest5_height / median_area,\n # 'nearest_distance': nearest_dict[0]['distance'],\n 'nearest_distance_page_width_rate': nearest_distance / size[0],\n 'nearest_distance_page_height_rate': nearest_distance / size[1],\n # 'nearest2_distance': nearest_dict[1]['distance'],\n 'nearest2_distance_page_width_rate': nearest_distance2 / size[0],\n 'nearest2_distance_page_height_rate': nearest_distance2 / size[1],\n # 'nearest3_distance': nearest_dict[2]['distance'],\n 'nearest3_distance_page_width_rate': nearest_distance3 / size[0],\n 'nearest3_distance_page_height_rate': nearest_distance3 / size[1],\n # 'nearest4_distance': nearest_dict[3]['distance'],\n 'nearest4_distance_page_width_rate': nearest_distance4 / size[0],\n 'nearest4_distance_page_height_rate': nearest_distance4 / size[1],\n # 'nearest5_distance': nearest_dict[4]['distance'],\n 'nearest5_distance_page_width_rate': nearest_distance5 / size[0],\n 'nearest5_distance_page_height_rate': nearest_distance5 / size[1],\n 'nearest_radian': nearest_radian,\n 'nearest2_radian': nearest_radian2,\n 'nearest3_radian': nearest_radian3,\n 'nearest4_radian': nearest_radian4,\n 'nearest5_radian': nearest_radian5,\n # 'x': x_center,\n # 'y': y_center,\n 'x_mean_rate': x_center / mean_x,\n 'y_mean_yrate': y_center /mean_y,\n 'x_std_rate': x_center / std_x,\n 'y_std_rate': y_center / std_y,\n 'x_median_rate': x_center / median_x,\n 'y_median_rate': y_center / median_y,\n 'x_page_rate': x_center / size[0],\n 'y_page_rate': y_center / size[1],\n 'mean_area': mean_area,\n 'mean_width': mean_width,\n 'mean_height': mean_height,\n 'mean_x': mean_x,\n 'mean_y': mean_y,\n 'std_area': std_area,\n 'std_width': std_width,\n 'std_height': std_height,\n 'std_x': std_x,\n 'std_y': std_y,\n 'median_area': median_area,\n 'median_width': median_width,\n 'median_height': median_height,\n 'median_x': median_x,\n 'median_y': median_y,\n 'box_num': box_num,\n }\n return sub_str, current_info\n\n\ndef gen_csv_lgbm(prob_threshold, model_path, booster=False):\n\n if booster:\n with open(model_path, \"rb\") as fp:\n boosters = pickle.load(fp)\n else:\n with open(model_path, \"rb\") as fp:\n model = pickle.load(fp)\n\n after_score = []\n\n res = open('first_model_submission.csv', 'w')\n res.write('image_id,labels\\n')\n\n write_count = 0\n\n for target_index in range(0, count):\n image_id = target_images[target_index]\n\n target_file = f'test_images/{image_id}.jpg'\n denoised_target_file = f'input/denoised_test/{image_id}.png'\n\n load_file = os.path.join(save_dir,image_id + '.pickle')\n with open(load_file, 'rb') as f:\n r = pickle.load(f)\n\n size = r['size']\n prob_list = r['prob_list']\n pred_labels = r['pred_labels']\n bbox_score = r['bbox_score']\n new_boxlist = r['new_boxlist']\n\n sub_info = []\n sub_list = []\n char_score_list = []\n box_score_list = []\n\n ## check box\n broken_box_list, inside_box_list, has_box_list, overlap_rate_list = check_box(new_boxlist, size, prob_list)\n\n ## check nearest box\n nearest_dict_list = get_nearest_box(new_boxlist)\n\n if cropping:\n orgimg = Image.open(denoised_target_file).convert('RGB')\n\n for i, (prob, label, bbox, box_score, broken, overlap_rate, nearest_dict) in \\\n enumerate(zip(prob_list, pred_labels, new_boxlist, bbox_score, broken_box_list, overlap_rate_list, nearest_dict_list)):\n\n sub_str, current_info = gen_info(prob, label, bbox, box_score, broken, overlap_rate, nearest_dict, new_boxlist, size, image_id)\n\n sub_info.append(current_info)\n sub_list.append(sub_str)\n char_score_list.append(prob)\n box_score_list.append(box_score)\n\n sub_df = pd.DataFrame(sub_info)\n try:\n sub_df = sub_df.drop(['char', 'bbox', 'image_id'], axis=1)\n except KeyError:\n # sub_info is empty\n pass\n\n if len(sub_df) > 0:\n if booster:\n y_pred_list = []\n for booster in boosters:\n y_pred_list.append(booster.predict(sub_df, num_iteration=booster.best_iteration))\n y_pred = np.average(y_pred_list, axis=0)\n else:\n y_pred = model.predict(sub_df, num_iteration=model.best_iteration)\n\n tmp_sub_list = []\n for current_info, sub, prob, char_score, box_score in zip(sub_info, sub_list, y_pred, char_score_list, box_score_list):\n (xmin, ymin, xmax, ymax) = current_info['bbox']\n\n if prob >= prob_threshold:\n tmp_sub_list.append(sub)\n\n if char_score >= crop_prob and cropping:\n (xmin, ymin, xmax, ymax) = current_info['bbox']\n label = current_info['char']\n image_id = current_info['image_id']\n\n w = xmax - xmin\n h = ymax - ymin\n if expand_crop:\n padding = round((w+h)/2 * padding_rate)\n xmin = max(xmin - padding, 0)\n ymin = max(ymin - padding, 0)\n xmax = min(xmax + padding, size[0])\n ymax = min(ymax + padding, size[1])\n else:\n pass\n img_crop = orgimg.crop((xmin, ymin, xmax, ymax))\n target_save_dir = os.path.join(crop_target_dir, label)\n os.makedirs(target_save_dir, exist_ok=True)\n target_filename = f\"{image_id}_{xmin}_{ymin}_{xmax}_{ymax}.png\"\n save_path = os.path.join(target_save_dir, target_filename)\n img_crop.save(save_path)\n\n sub_list = tmp_sub_list\n\n else:\n sub_list = []\n\n sub_labels = ' '.join(sub_list)\n res.write(image_id.rstrip() + ',' + sub_labels + '\\n')\n res.flush()\n write_count += 1\n print(\".\", end='')\n\n res.close()\n print('')\n print('write_count:', write_count)\n\ngen_csv_lgbm(0.50, \"models/booster_for_val_nms030_tta7_first_5models_soft_prob.pkl\", booster=True)\n","sub_path":"scripts/gen_pseudo_label.py","file_name":"gen_pseudo_label.py","file_ext":"py","file_size_in_byte":19275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"352372061","text":"import unittest\nfrom youtube2notion.youtube_video import YoutubeVideo\nfrom os.path import isfile\n\n\nclass TestYoutubeVideo(unittest.TestCase):\n\n def setUp(self):\n self.video_id = 'Kc_cvAXCs4Y'\n\n def test_get_output_filename(self):\n self.assertEqual(\n YoutubeVideo.get_output_filename(self.video_id,\n './tmp/%s/' % self.video_id),\n './tmp/Kc_cvAXCs4Y/Kc_cvAXCs4Y.mp4')\n\n def test_to_url(self):\n self.assertEqual(\n YoutubeVideo.to_url(self.video_id), 'https://youtu.be/Kc_cvAXCs4Y')\n\n def test_download(self):\n downloaded_video_filename = YoutubeVideo.download(\n video_id=self.video_id,\n output_dir=YoutubeVideo.get_output_filename(\n self.video_id, './tmp/%s/' % self.video_id))\n\n self.assertTrue(isfile(downloaded_video_filename))\n","sub_path":"tests/youtube2notion/test_youtube_video.py","file_name":"test_youtube_video.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"96098407","text":"class Solution(object):\r\n\tdef search(self, nums, target):\r\n\t\tlo, hi = 0, len(nums)-1\r\n\t\twhile lo <= hi:\r\n\t\t\tmid = lo + (hi - lo)//2\r\n\t\t\tif nums[mid] == target:\r\n\t\t\t\treturn True\r\n\t\t\tif nums[mid] == nums[lo] == nums[hi]:\r\n\t\t\t\tlo += 1\r\n\t\t\t\thi -= 1\r\n\t\t\telif nums[lo] <= nums[mid]:\r\n\t\t\t\tif nums[lo] <= target < nums[mid]:\r\n\t\t\t\t\thi = mid - 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tlo = mid + 1\r\n\t\t\telse:\r\n\t\t\t\tif nums[mid] < target <= nums[hi]:\r\n\t\t\t\t\tlo = mid + 1\r\n\t\t\t\telse:\r\n\t\t\t\t\thi = mid - 1\r\n\t\treturn False\r\n","sub_path":"081.search-in-rotated-sorted-array-ii.py","file_name":"081.search-in-rotated-sorted-array-ii.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"553149545","text":"# -*- encoding: utf-8 -*-\n\nfrom yuancloud.osv import fields, osv\n\n\nclass res_partner(osv.osv):\n \n _name = 'res.partner'\n _inherit = 'res.partner'\n _columns = {\n 'state_id': fields.many2one(\"res.country.state\", 'Ubication', domain=\"[('country_id','=',country_id),('type','=','normal')]\"),\n }\n \n\nres_partner()\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:","sub_path":"yuancloud/extend/base_state_ubication/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"528497125","text":"# -*- coding: utf-8 *-*\nimport re\nfrom unicodedata import normalize\n\n_punct_re = re.compile(r'[\\t !\"#$%&\\'()*\\:\\;\\-/<=>?@\\[\\\\\\]^_`{|},.]+')\n\n\ndef get_slug(input_text, delim=u\"-\"):\n result = []\n for word in _punct_re.split(input_text.lower()):\n word = normalize('NFKD', word).encode('ascii', 'ignore')\n if word:\n result.append(word)\n return unicode(delim.join(result))\n","sub_path":"selene/helpers/string_helper.py","file_name":"string_helper.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"108974411","text":"@staticmethod\ndef string_to_datetime(in_str):\n ret_val = None\n try:\n a = datetime.strptime(in_str, '%Y-%m-%d %H:%M:%S.%f')\n #2015-07-14 08:14:01.375010\n ret_val = a\n except:\n d_items = in_str.split(\" \")[0].split(\"-\")\n t_items = in_str.split(\" \")[1].split(\":\")\n\n try:\n b = datetime(int(d_items[0]), int(d_items[1]), int(d_items[2]), int(t_items[0]), int(t_items[1]), int(t_items[2].split(\".\")[0]), int(t_items[2].split(\".\")[1]))\n ret_val = b\n except:\n c = datetime(int(d_items[0]), int(d_items[1]), int(d_items[2]), int(t_items[0]), int(t_items[1]), int(t_items[2]), 0)\n ret_val = c\n\n return ret_val\n\n","sub_path":"classes/Helpers_/string_to_datetime.py","file_name":"string_to_datetime.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"546774226","text":"#!/usr/bin/python\n\nimport matplotlib.pyplot as plt\nfrom accelrationvector import AccelrationVector\nfrom timekeeper import TimeKeeper\n\nclass Plot(object):\n\t\"\"\"Hold the real plots\"\"\"\n\tdef __init__(self, figsize=(16,12), max_points=20, scaling=4, y_min=-2, y_max=2):\n\t\tsuper(Plot, self).__init__()\n\t\tself.figsize = figsize\n\t\tself.figure = plt.figure(figsize=self.figsize, facecolor='#ffffff')\n\t\tself.axes = {\n\t\t\t'x':self.figure.add_subplot(311),\n\t\t\t'y':self.figure.add_subplot(312),\n\t\t\t'z':self.figure.add_subplot(313)\n\t\t}\n\t\tself.max_points = max_points\n\t\tself.scaling = scaling\n\t\tself.A = AccelrationVector(max_points=self.max_points)\n\t\tself.T = TimeKeeper(max_points=self.max_points, scaling=self.scaling)\n\n\t\tif self.A.max_points != self.T.max_points :\n\t\t\traise ValueError(\"TimeKeeper and AccelrationVector objects should have same max \")\n\n\t\t#draw the first version\n\t\tfor acc, axis in self.axes.iteritems():\n\t\t\taxis.set_title(acc)\n\t\t\taxis.set_ylim([y_min, y_max])\n\t\t\taxis.set_xlim([1.0/self.T.scaling, self.T.max_points/self.T.scaling])\n\n\t\tplt.ion()\n\t\tplt.tight_layout()\n\t\tplt.show()\n\n\tdef update_axes(self):\n\t\tfor direction, axis in self.axes.iteritems():\n\t\t\tchange = self.T.max_points/self.T.scaling - 1.0/self.T.scaling\n\t\t\txmin,xmax = axis.get_xlim()\n\n\t\t\taxis.set_xlim(xmin+change, xmax+change)\n\t\t\t\t\n\tdef update(self, ax, ay, az):\n\t\tself.A.update([ax, ay, az]) \n\t\tself.T.tick()\n\t\ti = 0 \n\t\tfor direction, axis in self.axes.iteritems():\n\t\t\tif self.T.points[-1] >= self.max_points/self.scaling :\n\t\t\t\taxis.set_axis_bgcolor('white')\n\t\t\taxis.plot(self.T.points , self.A[direction], alpha=0.5, linewidth=0.05, color='red')\n\t\tplt.draw()","sub_path":"legacy/2Dplotter/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"253843823","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.14-x86_64/egg/lykkelleloader/loadstockhisteod.py\n# Compiled at: 2020-01-24 20:23:33\n# Size of source mod 2**32: 12637 bytes\n\"\"\"\nCreated on Sat Jul 6 11:25:21 2019\n\n@author: debmishra\n\"\"\"\nimport lykkelleconf.connecteod as c\nimport lykkelleconnector.geteodstock as ysh\nimport datetime as dt\nimport lykkelleconf.workday as wd\nimport sys\nimport lykkelleconf.time2epoch2time as et\nimport psycopg2 as pgs, time, os, pandas as pd, math\nhome = os.path.expanduser('~')\n\nclass loadstockhist:\n\n def stockhist(ticker, todate, fromdate, sourcetable, cursor):\n loaddata = []\n fromdate = fromdate.strftime('%Y-%m-%d')\n todate = todate.strftime('%Y-%m-%d')\n if todate == fromdate:\n print('the dates used for history calculation are same.\\n History requires delta of dates. Moving to next ticker')\n status = -999\n header = None\n else:\n histr = ysh(ticker, fromdate, todate)\n status = histr.sts\n header = histr.header\n att = 0\n while status != 200:\n if status != -999:\n if att <= 5:\n print('begins wait of 30 sec. att-', att)\n time.sleep(30)\n histr = ysh(ticker, fromdate, todate)\n status = histr.sts\n header = histr.header\n att = att + 1\n else:\n print('even after 6 reattempts not getting status code 200')\n status = -899\n break\n\n if status == 200:\n val = histr.stock_Response\n if val is not None:\n lt = len(val)\n if lt > 0:\n load = 0\n badload = 0\n ldate = []\n lprice = []\n lvolume = []\n lticker = []\n lsource = []\n for i in range(lt):\n try:\n volume = val[i].get('volume')\n if volume is not None and volume > 0:\n volume = math.floor(volume)\n volume = int(volume)\n else:\n volume = 0\n except AttributeError:\n volume = None\n\n try:\n close = val[i].get('adjusted_close')\n except AttributeError:\n try:\n close = val[i].get('close')\n except AttributeError:\n close = None\n\n try:\n pricedate = val[i].get('date')\n except AttributeError:\n pricedate = None\n\n if pricedate is not None and close is not None:\n if 'benchmark' in sourcetable:\n delq = 'delete from benchmark_history where symbol=%s\\n and price_date between %s and %s'\n copq = 'benchmark_history'\n else:\n delq = 'delete from stock_history where symbol=%s\\n and price_date between %s and %s'\n copq = 'stock_history'\n try:\n ldate.append(pricedate)\n lprice.append(close)\n lvolume.append(volume)\n lticker.append(ticker)\n lsource.append(sourcetable)\n load = load + 1\n except Exception as e:\n try:\n badload = badload + 1\n print(e)\n finally:\n e = None\n del e\n\n else:\n print('Failed load for ', ticker, ' had a close price of ', close, 'for date ', pricedate)\n\n data = {'ticker':lticker, \n 'pricedate':ldate, \n 'price':lprice, \n 'volume':lvolume, \n 'sourcetable':lsource}\n myfile = './tmp/ticker.csv'\n df = pd.DataFrame(data, columns=['ticker', 'pricedate', 'price', 'volume', 'sourcetable'])\n df.to_csv(myfile, index=None, header=False)\n cursor.execute(delq, (ticker, fromdate, todate))\n f = open(myfile, 'r')\n cursor.copy_from(f, copq, columns=('symbol', 'price_date', 'price',\n 'volume', 'source_table'), sep=',')\n f.close()\n os.remove(myfile)\n loaddata = [ticker, load, badload]\n return loaddata\n print('No history data found for ticker', ticker, 'and dates ', fromdate, ':', todate)\n loaddata = [ticker, 'fail']\n return loaddata\n else:\n print('No history data found for ticker', ticker, 'and dates ', fromdate, ':', todate)\n loaddata = [status, ticker]\n return loaddata\n else:\n if status == -899:\n print('Error code 200 after 5 attempts for ticker', ticker, 'and dates ', fromdate, ':', todate)\n loaddata = [ticker, 'fail']\n if header is not None:\n ldate = dt.date.today()\n ldate = wd.workday(str(ldate)).sdate()\n myheader = header.get('X-RateLimit-Remaining')\n else:\n myheader = None\n desc = (\n ticker + ':' + str(fromdate) + ':' + str(todate) + ':=', status)\n nrtbl = 'insert into ticker_no_response_list\\n (symbol, load_date,src,\"description\",tablename,errorcode,headeroutput)\\n values (%s,%s,%s,%s,%s,%s,%s) ON CONFLICT DO NOTHING'\n cursor.execute(nrtbl, (ticker, ldate, 'mhistory', desc, sourcetable, status, myheader))\n return loaddata\n print('Ignored as data already present for ticker', ticker, 'and dates ', fromdate, ':', todate)\n loaddata = [ticker, 'ignore']\n return loaddata\n\n def __init__(self, ticker, sourcetable, mode, jday, cursor):\n self.ticker = ticker\n self.sourcetable = sourcetable\n td = dt.date.today()\n iwd = td.isoweekday()\n todate = wd.workday(str(td)).sdate()\n todate = dt.datetime.strptime(todate, '%Y-%m-%d').date()\n modevalue = ['A', 'M']\n if mode in modevalue:\n pass\n else:\n print('Possible entries in mode are A or M. System will exit now')\n sys.exit(1)\n if not iwd == 6:\n if iwd == 7 or mode == 'M':\n if 'benchmark' in self.sourcetable:\n maxdate = 'select max(price_date) from benchmark_history\\n where symbol = %s'\n mindate = 'select min(price_date) from benchmark_history\\n where symbol = %s'\n else:\n maxdate = 'select max(price_date) from stock_history\\n where symbol = %s'\n mindate = 'select min(price_date) from stock_history\\n where symbol = %s'\n cursor.execute(maxdate, (ticker,))\n md = cursor.fetchone()\n cursor.execute(mindate, (ticker,))\n mid = cursor.fetchone()\n md = md[0]\n mid = mid[0]\n eparam = 1\n if not md is None:\n if md == mid or mode == 'M':\n wks = 780\n fromdate = todate - dt.timedelta(weeks=wks)\n elif md < todate - dt.timedelta(days=2):\n fromdate = md\n else:\n eparam = 0\n if eparam == 0:\n print('No valid history since the max history date is equal or > to latest date for symbol:', self.ticker)\n jobload = \"update jobrunlist\\n set runstatus = 'ignored' where symbol=%s and\\n runsource='mhistory' and rundate=%s and jobtable=%s \"\n try:\n cursor.execute(jobload, (ticker, jday, sourcetable))\n print(ticker, ' job executed successfully')\n except pgs.Error as e:\n try:\n print(e.pgerror)\n finally:\n e = None\n del e\n\n else:\n shload = loadstockhist.stockhist(self.ticker, todate, fromdate, self.sourcetable, cursor)\n ignr = 0\n if shload[1] == 'fail':\n print('the ticker ', shload[0], \"didn't find a result from eodhd\")\n rdate = dt.datetime.today().date()\n print('date for no response:', rdate)\n print(shload[0], ' was not able to fetch history data from EODHD even after 5 attempts')\n else:\n if shload[1] == 'ignore':\n print('the ticker ', shload[0], ' was ignored from run as data already present')\n rdate = dt.datetime.today().date()\n print('date for no response:', rdate)\n print(shload[0], ' was ignored as data already present')\n ignr = 1\n else:\n if isinstance(shload[0], int) is True and shload[0] != -999:\n rdate = dt.datetime.today().date()\n print('date for no response:', rdate)\n print(self.ticker, ' was not able to fetch history data from EODHD even after 5 attempts')\n else:\n if isinstance(shload[0], int) is True and shload[0] == -999:\n print(self.ticker, \"'s history is too recent. There should be at least>1 days delta\")\n ignr = 1\n else:\n print('For the ticker:', shload[0], ', total of ', shload[1], ' number of successful records were loaded and ', shload[2], ' number of records could not be loaded')\n if ignr == 1:\n jobload = \"update jobrunlist\\n set runstatus = 'ignored' where symbol=%s and\\n runsource='mhistory' and rundate=%s and jobtable=%s \"\n try:\n cursor.execute(jobload, (ticker, jday, sourcetable))\n print(ticker, ' job executed successfully')\n except pgs.Error as e:\n try:\n print(e.pgerror)\n finally:\n e = None\n del e\n\n else:\n jobload = \"update jobrunlist\\n set runstatus = 'complete' where symbol=%s and\\n runsource='mhistory' and rundate=%s and jobtable=%s \"\n try:\n cursor.execute(jobload, (ticker, jday, sourcetable))\n print(ticker, ' job executed successfully')\n except pgs.Error as e:\n try:\n print(e.pgerror)\n finally:\n e = None\n del e\n\n else:\n print('not a weekend. scrapping history run')\n sys.exit(1)","sub_path":"pycfiles/lykkelleloader-0.0.11-py3.7/loadstockhisteod.cpython-37.py","file_name":"loadstockhisteod.cpython-37.py","file_ext":"py","file_size_in_byte":12596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"517387983","text":"''' Pre-sanity steps:\n1. Allow Root login and Password authentication in /etc/ssh/sshd_config\n2. restart sshd\n'''\n\nimport paramiko\nimport os\n\nnodes = (\"noden19\", \"noden20\", \"nodec9\", \"noden29\", \"nodei34\", \"nodeg20\")\nmgmt_ips = (\"192.168.7.19\", \"192.168.7.20\", \"192.168.7.9\", \"192.168.7.29\", \"192.168.7.34\", \"192.168.7.60\")\n\ncommands = [\n \"sudo sed -i -e 's/PasswordAuthentication no/PasswordAuthentication yes/' -e 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config\",\n \"sudo systemctl restart sshd\"\n]\n\nfor ip in mgmt_ips:\n os.system(f'ssh-keygen -f \"/root/.ssh/known_hosts\" -R \"{ip}\"')\n\n\ndef execute_cmd_on_remote(i):\n client = paramiko.SSHClient()\n try:\n # k = paramiko.RSAKey.from_private_key_file('~/.ssh/id_rsa')\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=mgmt_ips[i], username='ubuntu')\n except:\n print(\"[!] Cannot connect to the SSH Server\")\n exit()\n \n for command in commands:\n print(\"=\"*50, command, \"=\"*50)\n stdin, stdout, stderr = client.exec_command(command)\n print(stdout.read().decode())\n err = stderr.read().decode()\n if err:\n print(err)\n client.close()\n\ndef ssh_sed(machine='all'):\n if machine == 'all':\n for i in range(len(nodes)):\n execute_cmd_on_remote(i)\n else:\n execute_cmd_on_remote(machine)\n\n\n# By default on all machines if no parameter is specified\nssh_sed(machine='all') \n# ssh_sed(machine=3) \n# ssh_sed(machine=4) \n","sub_path":"scripts/pre-sanity.py","file_name":"pre-sanity.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"186453199","text":"\nfrom glob import glob\nimport urllib2\nimport sys\nfrom urllib2 import HTTPError\nimport pickle\nimport os\n\nfrom time import sleep\n\nfrom bs4 import BeautifulSoup\nfrom unidecode import unidecode\n\nif __name__ == \"__main__\":\n if os.path.isfile(\"cache.pkl\"):\n cache = pickle.load(open(\"cache.pkl\", 'rb'))\n print(\"Read %i from cache\" % len(cache))\n sleep(10)\n else:\n cache = {}\n\n buffer = \"\"\n for ii in glob(\"%s/*\" % sys.argv[1]):\n pickle.dump(cache, open(\"%s.pkl\" % sys.argv[1], 'wb'))\n for line, jj in enumerate(open(ii)):\n print(jj)\n if jj in cache:\n buffer += cache[jj]\n else:\n sleep(15)\n try:\n res = urllib2.urlopen('http://www.poemhunter.com/%s' % jj.strip())\n html = unidecode(res.read())\n except HTTPError:\n print(\"Error!\")\n sleep(60)\n continue\n\n try:\n text = html.split('
    ')[1]\n text = text.split(\"\")[0]\n\n if \"\" in text:\n text = text.rsplit(\"\", 1)[1]\n\n soup = BeautifulSoup(text, 'html.parser')\n cache[jj] = \"%s\\n\\n\\n\" % jj.replace(\"-\", \" \").replace(\"poem/\", \"\")\n\n print(\"%s %i %s\" % (sys.argv[1], line,\n \":\".join(x.strip() for x in\n list(soup.strings)[:5])))\n cache[jj] += \" \".join(soup.strings)\n buffer += \"%s\\n\\n\" % cache[jj]\n except IndexError:\n print(\"Index error\")\n\n o = open(\"%s.txt\" % sys.argv[1], 'w')\n o.write(unidecode(buffer))\n","sub_path":"util/poets/get_poem.py","file_name":"get_poem.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"517053888","text":"from django.conf.urls.defaults import *\nfrom django.conf import settings\nfrom django.views.generic.simple import direct_to_template\nfrom mango.contacts.views import *\n\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n (r'^admin/doc/', include('django.contrib.admindocs.urls')),\n (r'^admin/', include(admin.site.urls)),\n (r'^accounts/', include('userena.urls')),\n (r'^messages/', include('userena.contrib.umessages.urls')),\n url(r'^AddContact/', AddContact,name=\"addcontact\"),\n (r'^check/', UpContact),\n url(r'^deletecontact/(?P[\\.\\w]+)/', delete_contact, name=\"deletecontact\"),\n url(r'^accounts/(?P[\\.\\w]+)/AddEvent/', AddEvent, name=\"newevent\" ),\n url(r'^accounts/(?P[\\.\\w]+)/editcontact/', edit_contact, name=\"editcontact\" ),\n url(r'^accounts/(?P[\\.\\w]+)/contact/', view_all,name=\"contact\"),\n url(r'^accounts/contact/(?P[\\.\\w]+)/(?P[\\.\\w]+)/', edit_event,name=\"editevent\"),\n url(r'^accounts/(?P[\\.\\w]+)/(?P[\\.\\w]+)/contact/', addhistory,name=\"history\"),\n url(r'^accounts/(?P[\\.\\w]+)/viewcontact/',view_contact,name=\"viewcontact\"),\n ( r'^face/', include( 'facebook.urls' ) ),\n url(r'^$',\n direct_to_template,\n {'template': 'static/promo.html'},\n name='promo'),\n (r'^i18n/', include('django.conf.urls.i18n')),\n (r'^media/mugshots/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT+'/mugshots'}),\n)\n\nif settings.DEBUG:\n urlpatterns += patterns('',\n (r'^media/(?P.*)$',\n 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT, 'show_indexes': True, }),\n)\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"544597794","text":"# coding: utf-8\n\"\"\"Setup script for IVA TPU.\"\"\"\n\nfrom setuptools import find_packages, setup\nfrom distutils.extension import Extension\nfrom Cython.Build import cythonize\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(name='iva-tpu',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n version=\"7.0.3\",\n author=\"Maxim Moroz\",\n author_email=\"m.moroz@iva-tech.ru\",\n description=\"IVA TPU Python API\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"http://git.mmp.iva-tech.ru/tpu_sw/pytpu\",\n install_requires=[\n 'numpy>=1.14',\n 'Cython'\n ],\n ext_modules=cythonize([\n Extension(\"iva_tpu.server.tpu\", ['src/iva_tpu/server/tpu.pyx'], libraries=[\"tpu\"]),\n ]),\n python_requires='>=3.6',\n )\n","sub_path":"pypi_install_script/iva-tpu-7.0.3.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"166336430","text":"from buildbot.buildslave import BuildSlave\nfrom buildbot import locks\n\nimport general\n\n# This properties are common to all slaves, to override them just specify the value in the slave\nslaves_default_properties = {\n\t'accesskey': 'z3n0n1',\n\t'parallel_builds': general.max_parallel_builds,\n\t'admin': 'nicolas.baglivo@gmail.com',\n}\n\nlinux32 = {\n\t'name': 'Linux32',\n\t'env': {},\n}\n\nwinSeven32 = {\n\t'name': 'WinSeven32',\n\t'env': {},\n}\n\nlinux64 = {\n\t'name': 'Linux64',\n\t'env': {},\n}\n\n\ndef configure(config):\n\tslaves = [linux32, winSeven32, linux64]\n\n\t# Configure default common properties\n\tfor slave in slaves:\n\t\tfor k, v in slaves_default_properties.items():\n\t\t\tif k not in slave:\n\t\t\t\tslave[k] = v\n\n\t\t# Add lock to slave !\n\t\tslave['lock'] = locks.MasterLock(slave['name'], maxCount = slave['parallel_builds'])\n\n\t\t# Add slave !\n\t\tconfig['slaves'].append(\n\t\t\tBuildSlave(\n\t\t\t\tslave['name'],\n\t\t\t\tslave['accesskey'],\n\t\t\t\tnotify_on_missing = slave[\"admin\"],\n\t\t\t\tmissing_timeout = 7200,\n\t\t\t)\n\t\t)\n","sub_path":"slaves.py","file_name":"slaves.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"623488464","text":"import turtle\r\n\r\nt1 = turtle.Turtle()\r\nt1.speed(10000)\r\nt1.width(2)\r\nred = \"red\"\r\norr = \"orange\"\r\nyell = \"yellow\"\r\ngree = \"green\"\r\nblu = \"blue\"\r\nind = \"indigo\"\r\nvio = \"violet\"\r\ntest = 0\r\nflip = 0\r\nsmp = 0\r\n\r\ndef form(t,pl,pi,pu,py, smol):\r\n smol**=2\r\n t.color(pl, pi)\r\n t.begin_fill()\r\n t.forward(100)\r\n t.right(90)\r\n t.forward(100)\r\n t.left(smol)\r\n t.forward(50)\r\n t.right(45)\r\n t.forward(25)\r\n \r\n t.right(90)\r\n t.forward(100)\r\n t.right(90)\r\n t.forward(100)\r\n t.right(90)\r\n t.end_fill()\r\n t.right(6)\r\n t.right(2)\r\n\r\n t.color(pu, py)\r\n t.begin_fill()\r\n t.forward(90)\r\n t.right(90)\r\n t.forward(100)\r\n t.right(90)\r\n t.forward(90)\r\n t.right(90)\r\n t.forward(100)\r\n t.right(90)\r\n t.end_fill()\r\n t.right(8)\r\n t.right(4)\r\ndef flippy(flp, tst):\r\n if flp == 0:\r\n tst+=1\r\n return tst\r\n elif flp == 1:\r\n tst-=1\r\n return tst\r\n \r\nfor petal in range(28):\r\n test = flippy(flip, test)\r\n if test == 0:\r\n flip = 0\r\n one = red\r\n two = orr\r\n three = yell\r\n four = gree\r\n elif test == 1:\r\n one = orr\r\n two = yell\r\n three = gree\r\n four = blu\r\n elif test == 2:\r\n one = yell\r\n two = gree\r\n three = blu\r\n four = ind\r\n elif test == 3:\r\n one = gree\r\n two = blu\r\n three = ind\r\n four = vio\r\n elif test == 4:\r\n one = blu\r\n two = ind\r\n three = vio\r\n four = red\r\n elif test == 5:\r\n one = ind\r\n two = vio\r\n three = red\r\n four = orr\r\n elif test == 6:\r\n one = vio\r\n two = red\r\n three = orr\r\n four = yell\r\n flip = 1\r\n \r\n #test +=1\r\n \r\n form(t1,one,two,three,four,test)\r\n","sub_path":"1/yt.py","file_name":"yt.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"264457618","text":"# reference to file with classess and definition\r\nimport math\r\nfrom calculator_classes import Calculator\r\n\r\n#code made in a new version of Python - instead or raw_input we have just inputs\r\n#definition to change elements of the lists into a float\r\ndef intinate(lis):\r\n array_length = len(lis)\r\n for i in range(array_length):\r\n z = float(lis[i])\r\n lis[i] = z\r\n return lis\r\n\r\ndef calculate():\r\n try:\r\n # raw input from user\r\n a = intinate(input('Please enter the first list of numbers on which you would like to compute: ').split(\",\"))\r\n number_2 = input('Would you like to input second list? Y or N?')\r\n if number_2 == \"Y\":\r\n b = intinate(input('Please enter the second list of numbers: ').split(\",\"))\r\n operation = input('What would you like to do? Etc. add, multiply, subtract?')\r\n except:\r\n print(\"Input must be correct - try again\")\r\n\r\n if number_2 == \"Y\" and operation == \"add\":\r\n print('{} + {} = '.format(a, b))\r\n print(Calculator.add(Calculator, a, b))\r\n\r\n elif number_2 == \"Y\" and operation == 'subtract':\r\n print('{} - {} = '.format(a, b))\r\n print(Calculator.subtract(Calculator, a,b))\r\n\r\n elif number_2 == \"Y\" and operation == 'multiply':\r\n print('{} * {} = '.format(a,b))\r\n print(Calculator.multiply(Calculator, a,b))\r\n\r\n elif number_2 == \"Y\" and operation == 'divide':\r\n print('{} / {} = '.format(a,b))\r\n print(Calculator.divide(Calculator, a,b))\r\n\r\n elif number_2 == \"Y\" and operation == 'exponent':\r\n print('{} ** {} = '.format(a,b))\r\n print(Calculator.exponent(Calculator, a,b))\r\n\r\n elif number_2 == \"N\" and operation == 'sqrt':\r\n print('{} = '.format(a))\r\n print(Calculator.sqrt(Calculator, a))\r\n\r\n elif number_2 == \"N\" and operation == 'square':\r\n print('{} = '.format(a))\r\n print(Calculator.square(Calculator, a))\r\n\r\n elif number_2 != \"N\" and operation == 'cube':\r\n print('{} = '.format(a))\r\n print(Calculator.cube(Calculator, a))\r\n\r\n elif number_2 == \"N\" and operation == 'sin':\r\n print('{} = '.format(a))\r\n print(Calculator.sin(Calculator, a))\r\n\r\n elif number_2 == \"N\" and operation == 'factorial':\r\n print('{} = '.format(a))\r\n print(Calculator.factorial(Calculator, a))\r\n\r\n else:\r\n print('Please type a valid operator!')\r\n\r\n\r\n# definiotion to give a user chance to perform another calculation\r\ndef again():\r\n calc_again = input('Would you like to continue? Y/N')\r\n\r\n if calc_again == 'Y':\r\n calculate()\r\n elif calc_again == 'N':\r\n print('See you later.')\r\n else:\r\n print(\"Wrong input - type Y or N\")\r\n again()\r\n\r\n\r\ncalculate()\r\nagain()","sub_path":"calculator_app_CA5.py","file_name":"calculator_app_CA5.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"505653383","text":"# __main__.py\n\nimport argparse\nimport os\nimport sys\n\nimport requests\nimport loqed\n\ndef parse_arguments():\n \"\"\"Parse command line arguments\"\"\"\n\n parser = argparse.ArgumentParser(prog=sys.argv[0],\n description='Control a LOQED smart lock',\n epilog='Visit https://webhooks.loqed.com for more information')\n parser.add_argument('--version', action='version', version=loqed.__version__)\n\n sgroup = parser.add_argument_group(title='loqed api')\n sgroup.add_argument('-l', '--lock_id',required=True, help='LOQED lock ID')\n sgroup.add_argument('-k', '--lock_api_key', required=True, help='lock api key')\n sgroup.add_argument('-t', '--api_token', required=True, help='api token')\n sgroup.add_argument('-i', '--local_key_id', required=True, help='local key id')\n sgroup.add_argument('-s', '--state', required=True, help='LOCK, UNLOCK, OPEN')\n \n\n return parser.parse_args()\n\ndef main():\n \"\"\"Main function for pyloqed command line tool\"\"\"\n args = parse_arguments()\n\n if args.lock_id is None:\n print(\"Error: lock id not specified.\")\n sys.exit(1)\n \n if args.lock_api_key is None:\n print(\"Error: lock api key not specified.\")\n sys.exit(1)\n\n if args.api_token is None:\n print(\"Error: api token not specified.\")\n sys.exit(1)\n \n if args.local_key_id is None:\n print(\"Error: local key id not specified.\")\n sys.exit(1)\n\n if args.state is None:\n print(\"Error: state not specified.\")\n sys.exit(1)\n \n try:\n if(args.state == 'OPEN'):\n res = loqed.open(args.lock_id, args.lock_api_key, args.api_token, args.local_key_id)\n \n if(args.state == 'LOCK'):\n res = loqed.lock(args.lock_id, args.lock_api_key, args.api_token, args.local_key_id)\n\n if(args.state == 'UNLOCK'):\n res = loqed.unlock(args.lock_id, args.lock_api_key, args.api_token, args.local_key_id)\n except:\n print(res)\n sys.exit(8)\n else:\n print(res)\n \n\nif __name__ == \"__main__\":\n main()","sub_path":"loqed/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"645564228","text":"# Isaac Yep\n# Monte Carlo Simulator: Craps\n\nimport random\n\nwin = 0\ntot = 0\nnum_of_trials = int(input(\"How many trials would you like to perform? (positive number) \"))\n\nfor i in range(1,num_of_trials):\n\troll1 = random.randint(1,6)\n\troll2 = random.randint(1,6)\n\tres = roll1 + roll2\n\tif res == 7 or res == 11:\n\t\twin += 1\n\ttot += 1\n\nprob = win/tot\n\nprint(\"Probability of {} trials: {}\".format(num_of_trials, prob))\n","sub_path":"Main_Arch/in_class_CS199_1/craps.py","file_name":"craps.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"102977595","text":"import numpy as np\nimport sys\nimport math\nfrom collections import Counter\n\n\n\"\"\"\nATTENTION: Use the following dictionaries to get the correct index for each\n amino acid when accessing any type of matrix or array provided as\n parameters. Further, use those indices when generating or returning\n any matrices or arrays. Failure to do so will most likely result in\n not passing the tests.\nEXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'\n in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].\n\"\"\"\nALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'\nAA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}\nINT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}\nGAP_INDEX = AA_TO_INT['-']\n\n\nclass MSA:\n\n def __init__(self, sequences):\n \"\"\"\n Initialize the MSA class with the provided list of sequences. Check the\n sequences for correctness. Pre-calculate any statistics you seem fit.\n\n :param sequences: List containing the MSA sequences.\n \"\"\"\n self.validateMSA(sequences)\n self.msa = sequences\n self.weight_matrix = None\n self.num_ind_obs = None\n self.seq_weights = None\n self.calc_w_matrix_and_ind_obs()\n self.calcSeqWeights()\n\n def validateMSA(self, sequences):\n hasMultipleSequences = len(sequences) > 0\n sameLength = len(set(map(len, sequences))) in (0, 1)\n onlyValidChars = all(map(self.hasOnlyValidCharacters, sequences))\n\n if not(hasMultipleSequences and sameLength and onlyValidChars):\n raise TypeError(\"Invalid MSA!\")\n\n def hasOnlyValidCharacters(self, sequence):\n return all(aa in ALPHABET for aa in sequence)\n\n def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,\n redistribute_gaps=False, add_pseudocounts=False):\n \"\"\"\n Return a PSSM for the underlying MSA. Use the appropriate refinements \n according to the parameters. If no bg_matrix is specified, use uniform \n background frequencies.\n Every row in the resulting PSSM corresponds to a non-gap position in \n the primary sequence of the MSA (i.e. the first one).\n Every column in the PSSM corresponds to one of the 20 amino acids.\n Values that would be -inf must be replaced by -20 in the final PSSM.\n Before casting to dtype=numpy.int64, round all values to the nearest\n integer (do not just FLOOR all values).\n\n :param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).\n Access the matrix using the indices from AA_TO_INT.\n :param beta: Beta value (float) used to weight the pseudocounts \n against the observed amino acids in the MSA.\n :param use_sequence_weights: Calculate and apply sequence weights.\n :param redistribute_gaps: Redistribute the gaps according to the \n background frequencies.\n :param add_pseudocounts: Calculate and add pseudocounts according \n to the background frequencies.\n\n :return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).\n L = ungapped length of the primary sequence.\n \"\"\"\n # Calculate array of background frequencies\n in_use_bg_freq_arr = self.calculate_bg_freq(bg_matrix)\n in_use_bg_matrix = self.get_bg_matrix(bg_matrix)\n print(\"Background frequncies used\", in_use_bg_freq_arr)\n # Rows -> number of amino acids, columns -> 20 amino acids plus one column for gaps\n pssm = np.zeros((len(self.msa[0]), 21))\n self.count_frequencies(pssm, use_sequence_weights)\n print(\"After counting\", pssm)\n pssm = self.redistribute_gaps(pssm, in_use_bg_freq_arr, redistribute_gaps)\n print(\"After redistributing\", pssm)\n \n if add_pseudocounts:\n pssm = self.add_pseudo_counts(pssm, beta, in_use_bg_matrix, in_use_bg_freq_arr)\n print(\"After adding pseudocounts\", pssm)\n\n self.normalize_to_relative_frequencies(pssm)\n print(\"After normalization\", pssm)\n pssm = self.divide_by_background_frequencies(pssm, in_use_bg_freq_arr)\n print(\"After division by bg_freq\", pssm)\n pssm = self.calculate_log_score(pssm)\n print(\"After calculating log score\", pssm)\n pssm = self.remove_gap_rows(pssm)\n print(\"After removing gap rows\", pssm)\n pssm = np.rint(pssm).astype(np.int64)\n print(\"After rounding\", pssm)\n return pssm\n\n def get_bg_matrix(self, bg_matrix):\n if bg_matrix == None:\n return np.full((20,20), 1/400)\n else:\n return bg_matrix\n\n def calculate_bg_freq(self, bg_matrix):\n if bg_matrix == None:\n return np.full(20, 1/20)\n else:\n return np.array(bg_matrix).sum(axis=0)\n\n def redistribute_gaps(self, pssm, bg_freq_arr, redistribute_gaps):\n gap_counts = pssm[:,-1]\n pssm_without_gaps = pssm[:, 0:-1]\n\n if redistribute_gaps:\n for row in range(self.get_size()[1]):\n pssm_without_gaps[row,:] = pssm_without_gaps[row,:] + bg_freq_arr * gap_counts[row]\n return pssm_without_gaps\n\n def add_pseudo_counts(self, pssm, beta, bg_matrix, bg_freq_arr):\n pseudo_matrix = np.zeros(pssm.shape)\n for row in range(pssm.shape[0]):\n for col in range(pssm.shape[1]):\n pseudo_matrix[row, col] = sum(freq / bg_freq_arr[idx] * bg_matrix[idx][col] for idx, freq in enumerate(pssm[row,:]))\n return (pssm * (self.get_number_of_observations() - 1) + pseudo_matrix * beta) / ((self.get_number_of_observations() - 1) + beta)\n\n def count_frequencies(self, pssm, use_sequence_weights):\n rows = self.get_size()[1]\n\n if use_sequence_weights:\n weights = self.seq_weights\n else:\n weights = np.ones(self.get_size()[0])\n\n for pos in range(rows):\n posSeq = ''.join(seq[pos] for seq in self.msa)\n\n for seq_id, aa in enumerate(posSeq) :\n col = AA_TO_INT[aa]\n pssm[pos, col] += weights[seq_id]\n\n def normalize_to_relative_frequencies(self, pssm):\n row_sums = pssm.sum(axis=1)\n for row, row_sum in enumerate(row_sums):\n pssm[row,:] = pssm[row,:] / row_sum\n\n def divide_by_background_frequencies(self, pssm, bg_freq_arr):\n pssm = pssm / bg_freq_arr\n return pssm\n\n def calculate_log_score(self, pssm):\n pssm = np.log2(pssm) * 2\n np.place(pssm, np.isneginf(pssm), -20)\n return pssm\n\n def remove_gap_rows(self, pssm):\n mask = [True if char != '-' else False for char in self.msa[0]] \n return pssm[mask,:]\n\n def get_size(self):\n \"\"\"\n Return the number of sequences in the MSA and the MSA length, i.e.\n the number of columns in the MSA. This includes gaps.\n\n :return: Tuple of two integers. First element is the number of\n sequences in the MSA, second element is the MSA length.\n \"\"\"\n return (len(self.msa), len(self.msa[0]))\n\n def get_primary_sequence(self):\n \"\"\"\n Return the primary sequence of the MSA. In this exercise, the primary\n sequence is always the first sequence of the MSA. The returned \n sequence must NOT include gap characters.\n\n :return: String containing the ungapped primary sequence.\n \"\"\"\n return self.msa[0].replace('-', '')\n\n def get_sequence_weights(self):\n \"\"\"\n Return the calculated sequence weights for all sequences in the MSA.\n The order of weights in the array must be equal to the order of the\n sequences in the MSA.\n\n :return: Numpy array (dtype=numpy.float64) containing the weights for\n all sequences in the MSA.\n \"\"\"\n return self.seq_weights\n\n def calc_w_matrix_and_ind_obs(self):\n seq_length = len(self.msa[0])\n weight_matrix = np.zeros((seq_length, len(self.msa) + 1))\n ind_observations = 0\n for pos, _ in enumerate(self.msa[0]):\n posSeq = ''.join(seq[pos] for seq in self.msa)\n count = Counter(posSeq)\n r = len(count)\n ind_observations += r\n weight_matrix[pos, -1] = r\n\n for idx, aa in enumerate(posSeq):\n equalOccurences = count[aa]\n weight_matrix[pos, idx] = 1 / (r * equalOccurences)\n self.weight_matrix = weight_matrix\n self.num_ind_obs = ind_observations / seq_length\n \n def calcSeqWeights(self):\n weights = np.zeros(len(self.msa))\n rows, columns = self.weight_matrix.shape\n # Don't need the r column\n for col in range(columns - 1):\n weights[col] = sum(self.weight_matrix[row, col] for row in range(rows) if self.weight_matrix[row, -1] != 1)\n self.seq_weights = weights.astype(np.float64)\n\n def get_number_of_observations(self):\n \"\"\"\n Return the estimated number of independent observations in the MSA.\n\n :return: Estimate of independent observation (dtype=numpy.float64).\n \"\"\"\n return self.num_ind_obs\n\nif __name__ == '__main__':\n valid_msa = [\n \"--------MVATGLFVGLNKGHVVTKRE----------QPPRPNNRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGAGASEKKK----\",\n \"----MTTPQVKTGLFVGLNKGHVVTRRE----------LAPRPRSRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGGGATEKKK----\",\n \"----MTTPAVKTGLFVGLNKGHVVTRRE----------LAPRPNSRKGKTSKRTIFIRKLIREVAGMAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSLGGAAAAEKKM---\",\n \"----MTTPQVKTGLFVGLNKGHVVTRRE----------LAPRPRSRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLK------------VAKRKLGTHKRAKRKREEMSSVLRKMRSGGGGATEKKK----\",\n \"----------MGEIAVGLNKGHQVTKKA----------GTPRPSRRKGFLSQRVKKVRAVVREVAGWAPYERRVMELLKVGKD---KRALKMCKRKLGTHMRGKKKREEMAGVLRKMQAASKGE---------\",\n \"----MAPKQPNTGLFVGLNKGHIVTKKE----------LAPRPSDRKGKTSKRTHFVRNLIREVAGFAPYEKRITELLKVGKD---KRALKVRQEKVGHSQESKEEER--GDVQCSP--------PDEGWWWY\",\n \"---------MAPGLVVGLNKGKVLTKRQ----------LPERPSRRKGQLSKRTSFVRSIVREVAGFAPYERRVMELIRNSQD---KRARKLAKKRLGTLKRAKGKIEELTSVIQSSRLAH------------\",\n \"-------MGVQYKLAVGLGKGHKVTKNE----------YKPRPSRRKGALSKHTRFVRDLIREVCGFAPFERRAMELLKVSKD---KRALKFIKKRLGTHLRGKRKRDELSNVLVAQRKAAAHKEKTEHK---\",\n \"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------\",\n \"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------\",\n \"-------MALRYPMAVGLNKGHKVTKNV----------GKPRHSRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------\",\n \"---------MAPGLVVGLNKGKTLTKRQ----------LPERPSRRKGHLSKRTAFVRSIVREVAGFAPYERRVMELIRNSQD---KRARKLAKKRLGTLKRAKGKIEELTSVIQSSRLAH------------\",\n \"-------MAIRYPMAVGLNKGHKVTKNV----------SKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------\",\n \"-------MAIRYPMAVGLNKGHRVTKNV----------TKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRIGTHIRAKRKREELSNVLAAMRKAAAKKD--------\",\n \"-------MAIRYPMAVGLNKGHRVTKNV----------TKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRIGTHIRAKRKREELSNVLAAMRKAAAKKD--------\",\n \"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRGRLTKHTKFVRDMIREVCAFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------\",\n \"-------MAIRYPMAVGLNKGYKVTKNV----------SKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------\",\n \"-------MVVRYPMAVGLNKGHKVTKNV----------SKPKHSRRRGRLTKHAKFARDLIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNTLAAMRKAAAKKE--------\",\n \"-------MAIRYPMAVGLKKGHPVTKNV----------TKPKHSRRGGRLTKHSKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNILAAMRKAAAKKE--------\",\n \"---MAKEAPAKTGLAVGLNKGHKTTARV----------VKPRVSRTKGHLSKRTAFVREVVKEVAGLAPYERRVIELLRNSKD---KRARKLAKKRLGTFGRAKRKVDELQRVIAESRRAH------------\",\n \"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRSRLTNHTKFVRDMIREVCGFAPYERRAMELLKVSKS---KRALKFIKKRVGTHIRAKRKREELSNVLAAMEEAAAKKD--------\",\n \"-----MSGPGIEGLAVGLNKGHAATQLP----------VKQRQNRHKGVASKKTKIVRELVREITGFAPYERRVLEMLRISKD---KRALKFLKRRIGTHRRAKGKREELQNVIIAQRKAHK-----------\",\n \"--------MAKSGIAAGVNKGRKTTAKE----------VAPKISYRKGASSQRTVFVRSIVKEVAGLAPYERRLIELIRNAGE---KRAKKLAKKRLGTHKRALRKVEEMTQVIAESRRH-------------\",\n \"-------MAVRYELAIGLNKGHKTSKIRNVKYTGDKKVKGLRGSRLKNIQTRHTKFMRDLVREVVGHAPYEKRTMELLKVSKD---KRALKFLKRRLGTHIRAKRKREELSNILTQLRKAQTHAK--------\",\n \"-------MAVKTGIAIGLNKGKKVTQMT----------PAPKISYKKGAASNRTKFVRSLVREIAGLSPYERRLIDLIRNSGE---KRARKVAKKRLGSFTRAKAKVEEMNNIIAASRRH-------------\",\n \"-------MTVKTGIAIGLNKGKKVTSMT----------PAPKISYKKGAASNRTKFVRSLVREIAGLSPYERRLIDLIRNSGE---KRARKVAKKRLGSFTRAKAKVEEMNNIIAASRRH-------------\",\n \"---------MAKGQAVGINKGFITTQLE-------KKLQKHSAVQRKGKLGKRVALVRQVIREVTGFAPYEKRIIELIKAGSAKDSKKATKIARKRLGTHRRAKVKKALLEEAVRAQRKK-------------\",\n \"MSSAATKPVKRSGIIKGFNKGHAVAKRT------------VTSTFKKQVVTKRVAAIRDVIREISGFSPYERRVSELLKSGLD---KRALKVAKKRLGSIQAGKKKRDDIANINRKASAK-------------\",\n \"MKNA--------------------YKKVRVRYPVKRPDVKRKQRGPRAETQESRFLAAAVADEISGLSPLEKKAISLLEAKNN---NKAQKLLRKRLGSHKRAVAKVEKLARMLLEK----------------\"\n ]\n msa_val = MSA(valid_msa)\n #invalid_msas = [MSA(msa) for msa in invalid_msa]\n pssm = msa_val.get_pssm()\n #print('Rows ' + str(rows) + ' Columns ' + str(columns))\n print(pssm)","sub_path":"codechecker/repos/4/collected_files/pssm/ga86cuy.py","file_name":"ga86cuy.py","file_ext":"py","file_size_in_byte":13710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"653493098","text":"''' Remove last 70 data points in a1a.s1\n'''\nfrom liblinearutil import *\nimport numpy as np\nfrom random import *\nimport copy\nimport subprocess\nimport time\nimport sys\nimport pickle\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator, FuncFormatter\n\n# ------------------------------------\n\ntrain_file_name = sys.argv[1]\npickle_name_in = sys.argv[2]\npickle_name_out = sys.argv[3]\noutput_Folder = '../../output/'\npickle_path_in = pickle_name_in\npickle_path_out = pickle_name_out\n#test_file_name = train_file_name + '.t'\npure_data_name = (train_file_name.split('/'))[len(train_file_name.split('/'))-1].replace('.s1', '')\ntest_file_name = '../data/' + pure_data_name + '.t'\ntmp_file_path = '/tmp/cwtsai/Dec-Ative/'\n\nliblinear_incdec_train_path = '../../liblinear-incdec-2.01/train'\n# train_size_rate = 0.25\n\n# ------------------------------------\ndef check_zero (x, y):\n for _i in range (len(y)):\n if not x[_i]:\n x[_i][1] = 0\n return (x, y)\n\ndef write_svm_file (x, y, file_name):\n output_file = open(file_name, 'w')\n for _i in range (len(y)):\n output_file.write (str(y[_i]))\n temp_list = sorted(list(x[_i].keys()) )\n for _j in temp_list:\n output_file.write (' ' + str(_j) + ':' + str(x[_i][_j]))\n output_file.write ('\\n')\n# ------------------------------------\n\n\ntrain_y, train_x = svm_read_problem(train_file_name)\ntest_y, test_x = svm_read_problem(test_file_name)\n\ntrain_x, train_y = check_zero(train_x, train_y)\ntest_x, test_y = check_zero(test_x, test_y)\n\n_, order, Eout, Ein = pickle.load(open(pickle_path_in, 'rb'))\n\n# Remove last 70 data points sorted by Eout in a1a.s1\nN = len(train_y)\nremain_dict = range(N)\nremove_list = order[-209:]\n'''\nremove last #:\n- a1a : 70\n- letter : 67\n- pendigit : 209\n'''\nremove_list.sort(reverse=True) # from biggest to smallest, avoid misorder\nfor index in remove_list:\n train_x.pop(index)\n train_y.pop(index)\n\ndata_num_size = int(len(train_y)*0.99)\nwrite_svm_file (train_x, train_y, tmp_file_path + pure_data_name+'-temp-n1.train')\n# ------------------------------------\n\n\ncmd = liblinear_incdec_train_path + ' -s 2 -q ' + tmp_file_path + pure_data_name + '-temp-n1.train'\nsubprocess.call (cmd.split())\nm = load_model ( pure_data_name+'-temp-n1.train.model')\n\np_label_in, p_acc_in, p_val_in = predict(train_y, train_x, m)\nbase_acc_in = p_acc_in[0]\np_label_out, p_acc_out, p_val_out = predict(test_y, test_x, m)\nbase_acc_out = p_acc_out[0]\nE_in_0 = [1.0 for i in range(data_num_size+1)]\nE_out_0 = [1.0 for i in range(data_num_size+1)]\n\ncmd = 'rm ' + pure_data_name + '-temp-n1.train.model'\nsubprocess.call (cmd.split())\n# ------------------------------------\nN = len(train_y)\nremain_dict = range(N)\nremove_order=[]\nE_in_1 = []\nE_out_1 = []\ntrain_x_copy = copy.deepcopy(train_x)\ntrain_y_copy = copy.deepcopy(train_y)\nfor it in range(data_num_size+1):\n acc_rank = []\n write_svm_file (train_x_copy, train_y_copy, tmp_file_path + pure_data_name+'-temp2-n1.train')\n cmd = liblinear_incdec_train_path + ' -s 2 -q ' + tmp_file_path + pure_data_name + '-temp2-n1.train'\n subprocess.call (cmd.split())\n\n for i in range(len(train_y_copy)):\n cmd = 'cp ' \\\n + pure_data_name + '-temp2-n1.train.model'+ ' ' \\\n + pure_data_name + '-temp3-n1.train.model'\n subprocess.call (cmd.split())\n\n temp_train_x = copy.deepcopy(train_x_copy)\n temp_train_x.pop(i)\n temp_train_y = copy.deepcopy(train_y_copy)\n temp_train_y.pop(i)\n write_svm_file (temp_train_x, temp_train_y, tmp_file_path + pure_data_name+'-'+'temp3-n1.train')\n cmd = liblinear_incdec_train_path \\\n + ' -s 2 -q -i ' \\\n + pure_data_name+'-temp3-n1.train.model' + ' ' \\\n + tmp_file_path+pure_data_name+'-temp3-n1.train'\n subprocess.call (cmd.split())\n m2 = load_model (pure_data_name+'-'+'temp3-n1.train.model')\n\n p_label_in, p_acc_in, p_val_in = predict(temp_train_y, temp_train_x, m2)\n p_label_out, p_acc_out, p_val_out = predict(test_y, test_x, m2)\n acc_rank.append((p_acc_out[0], p_acc_in[0], i))\n\n cmd = 'rm ' + pure_data_name + '-temp3-n1.train.model'\n subprocess.call (cmd.split())\n\n cmd = 'rm ' + pure_data_name + '-temp2-n1.train.model'\n subprocess.call (cmd.split())\n (max_acc_out, max_acc_in, remove_num) = sorted(acc_rank, key=lambda x:x[0])[-1] #sort by p_acc_out\n remove_order.append(remain_dict.pop(remove_num)) #since the real remove index is in the remain_dict\n E_in_1.append(max_acc_in)\n E_out_1.append(max_acc_out)\n train_x_copy.pop(remove_num)\n train_y_copy.pop(remove_num)\n\nE_in_1 = [x / base_acc_in for x in E_in_1]\nE_out_1 = [x / base_acc_out for x in E_out_1]\n\n\nwith open(pickle_path_out, 'w') as store:\n pickle.dump((pure_data_name, remove_order, E_out_1, E_in_1), store)\n\n# ------------------------------------\nstep = round(0.99 / len(E_out_1), 5)\nterminal = 1.00 - step * len(E_out_1)\nquery_num = [1.00 - step * x for x in range(len(E_out_1))]\n\nplt.subplot2grid((5,5), (0,0), colspan=5 , rowspan=4)\nax = plt.gca()\nax.xaxis.set_major_locator( MultipleLocator(0.01) )\n\nplt.xlabel('% of Data')\nplt.ylabel('Acc rate')\nplt.xlim(1.00, terminal)\n#plt.ylim(0.0 , 0.02)\nplt.grid()\n\n\nplt.title(pure_data_name + '.s1_out_n1_' + ('%.3f' % base_acc_out))\nplt.plot(query_num, E_out_0, 'k', label='total')\n#plt.plot(query_num, E_out_02, 'r', label='noise')\nplt.plot(query_num, E_out_1, 'bo--', label='greedy')\n\nplt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),fancybox=True, shadow=True, ncol=3)\nplt.savefig(output_Folder + pure_data_name + '_Eout_n1_out.png')\n\nplt.cla()\n\n# ----------------------------------------------------------\nstep = round(0.99 / len(E_in_1), 5)\nterminal = 1.00 - step * len(E_in_1)\nquery_num = [1.00 - step * x for x in range(len(E_in_1))]\n\nplt.subplot2grid((5,5), (0,0), colspan=5 , rowspan=4)\nax = plt.gca()\nax.xaxis.set_major_locator( MultipleLocator(0.1) )\n\nplt.xlabel('% of Data')\nplt.ylabel('Acc rate')\nplt.xlim(1.00, terminal)\n# plt.ylim(0.8 , 1.2)\nplt.grid()\n\n\nplt.title(pure_data_name + '.s1_in_n1_' + ('%.3f' % base_acc_in))\nplt.plot(query_num, E_in_0, 'k', label='total')\n#plt.plot(query_num, E_in_02, 'r', label='noise')\nplt.plot(query_num, E_in_1, 'bo--', label='greedy')\n\nplt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),fancybox=True, shadow=True, ncol=3)\nplt.savefig(output_Folder + pure_data_name + '_Eout_n1_in.png')\n\nplt.cla()\n","sub_path":"src/greedy/remove_negative.py","file_name":"remove_negative.py","file_ext":"py","file_size_in_byte":6548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"159742011","text":"import pandas as pd\r\nimport tensorflow as tf\r\nimport datetime\r\nimport matplotlib.pyplot as plt\r\nfrom duel_q_learning_agent import Agent\r\nfrom utils import trading_by_trend\r\n\r\n# Hyperparameters\r\ninitial_money = 10000000\r\nwindow_size = 30\r\nskip = 1\r\nbatch_size = 32\r\n\r\n# Selected highly correlated features\r\nrelevant_features = [\"close\", \"high\", \"low\", \"open\", \"upperband\", \"middleband\", \"lowerband\",\r\n \"dema\", \"ema\", \"ht_trendline\", \"kama\", \"ma\", \"mama\", \"fama\",\r\n \"midpoint\", \"midprice\", \"sar\", \"sma\", \"t3\", \"tema\", \"trima\",\r\n \"wma\", \"plus_dm\", \"obv\", \"avgprice\", \"medprice\", \"typprice\", \"wclprice\", \"atr\"]\r\n\r\nfeature_num = len(relevant_features)\r\nstate_size = window_size * feature_num\r\n\r\n\r\ndef main():\r\n model_name = \"duel_q_learning\"\r\n\r\n # import data\r\n df = pd.read_csv('dataset/Samsung.csv')\r\n\r\n # split dataset\r\n temp = df.date.apply(lambda x: datetime.datetime.strptime(x, \"%Y-%m-%d %H:%M:%S\")).apply(\r\n lambda x: x.date() > datetime.date(2000, 1, 1))\r\n start_idx = temp[temp == True].argmin()\r\n df = df.iloc[start_idx:, :] # 2000 ~\r\n df.reset_index(inplace=True, drop=True)\r\n\r\n test_start = datetime.datetime.strptime(df['date'].iloc[-1], \"%Y-%m-%d %H:%M:%S\").date() - pd.DateOffset(\r\n years=1) # last 1 year\r\n test_start_idx = df[df.date == test_start.strftime(\"%Y-%m-%d 00:00:00\")].index[0]\r\n\r\n train_df = df.iloc[:test_start_idx, :]\r\n train_df.reset_index(inplace=True, drop=True)\r\n test_df = df.iloc[test_start_idx:, :]\r\n test_df.reset_index(inplace=True, drop=True)\r\n\r\n '''\r\n train model\r\n '''\r\n\r\n train_df = train_df.loc[:, relevant_features]\r\n test_df = test_df.loc[:, relevant_features]\r\n\r\n if 'Unnnamed: 0' in train_df.columns: del train_df['Unnamed: 0']\r\n if 'date' in train_df.columns: del train_df['date']\r\n\r\n # initialize agent\r\n trend = train_df.values.tolist()\r\n agent = Agent(state_size=state_size, window_size=window_size, trend=trend, skip=skip, batch_size=batch_size)\r\n\r\n print(\"Start Training...\")\r\n agent.train(iterations=200, checkpoint=1, initial_money=initial_money)\r\n\r\n # save model\r\n saver = tf.train.Saver()\r\n saver.save(agent.sess, 'rl_model/{name}.ckpt'.format(name=model_name))\r\n agent.sess.close()\r\n\r\n # get test data\r\n\r\n if 'Unnamed: 0' in test_df.columns: del test_df['Unnamed: 0']\r\n if 'date' in test_df.columns: del test_df['date']\r\n\r\n '''\r\n test\r\n '''\r\n\r\n # initialize agent\r\n trend = test_df.values.tolist()\r\n agent = Agent(state_size=state_size, window_size=window_size, trend=trend, skip=skip, batch_size=batch_size)\r\n close = test_df.close\r\n\r\n # load model\r\n saver = tf.train.Saver()\r\n sess = tf.InteractiveSession()\r\n saver.restore(sess, 'rl_model/{name}.ckpt'.format(name=model_name))\r\n agent._set_session(sess)\r\n\r\n # predict\r\n states_buy, states_sell, total_gains, invest = agent.buy(initial_money=initial_money, close=close)\r\n agent.sess.close()\r\n\r\n '''\r\n backtesting\r\n '''\r\n\r\n if (len(states_buy) == 0 | (len(states_sell)) == 0): return\r\n\r\n test_df = df.iloc[test_start_idx:, :]\r\n test_df.reset_index(inplace=True, drop=True)\r\n test_df = test_df[['date', 'close', 'high', 'low', 'open', 'volume']]\r\n\r\n test_df.loc[:, \"pred\"] = 2\r\n test_df.loc[states_buy, \"pred\"] = 1\r\n test_df.loc[states_sell, \"pred\"] = 0\r\n\r\n buy_date_list, buy_price_list, sell_date_list, sell_price_list, return_list, profit, buy_hold_return = trading_by_trend(\r\n test_df.copy())\r\n\r\n print(\"profit :\", profit)\r\n print(\"buy_hold_return :\", buy_hold_return)\r\n\r\n # make figure and save it\r\n\r\n buy_idx_list = [test_df[test_df.date == date].index[0] for date in buy_date_list]\r\n sell_idx_list = [test_df[test_df.date == date].index[0] for date in sell_date_list]\r\n\r\n fig = plt.figure(figsize=(15, 5))\r\n plt.plot(close, color='r', lw=2.)\r\n plt.plot(close, '^', markersize=10, color='m', label='buying signal', markevery=buy_idx_list)\r\n plt.plot(close, 'v', markersize=10, color='k', label='selling signal', markevery=sell_idx_list)\r\n plt.title('total gains %f, total investment %f%%' % (profit * initial_money, profit))\r\n plt.legend()\r\n fig = plt.gcf()\r\n\r\n fname = \"figures/backtesting_result_with_\" + model_name + \".png\"\r\n fig.savefig(fname, bbox_inches='tight')\r\n plt.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"rl_model/duel_q_learning_test.py","file_name":"duel_q_learning_test.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"39412104","text":"from __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models import Count, Q, Subquery, OuterRef, Exists\n\nimport core\nimport dashboard\n\nclass Assistant(models.Model):\n\t\"\"\"This is a wrapper object for a single assistant.\n\tJust need a username at the moment...\"\"\"\n\tuser = models.OneToOneField(User, on_delete = models.CASCADE,\n\t\t\thelp_text = \"The Django Auth user attached to the Assistant.\")\n\tshortname = models.CharField(max_length = 10,\n\t\t\thelp_text = \"Initials or short name for this Assistant\")\n\t@property\n\tdef name(self):\n\t\treturn self.user.get_full_name()\n\tdef __str__(self):\n\t\treturn self.name\n\tdef student_count(self):\n\t\treturn self.student_set.count()\n\nclass Student(models.Model):\n\t\"\"\"This is really a pair of a user and a semester (with a display name),\n\tendowed with the data of the curriculum of that student.\n\tIt also names the assistant of the student, if any.\"\"\"\n\tuser = models.ForeignKey(User, blank = True, null = True,\n\t\t\ton_delete = models.CASCADE,\n\t\t\thelp_text = \"The Django Auth user attached to the student\")\n\tsemester = models.ForeignKey(core.models.Semester,\n\t\t\ton_delete = models.CASCADE,\n\t\t\thelp_text = \"The semester for this student\")\n\tassistant = models.ForeignKey(Assistant, blank = True, null = True,\n\t\t\ton_delete = models.SET_NULL,\n\t\t\thelp_text = \"The assistant for this student, if any\")\n\n\tcurriculum = models.ManyToManyField(core.models.Unit, blank = True,\n\t\t\trelated_name = 'curriculum',\n\t\t\thelp_text = \"The choice of units that this student will work on\")\n\textra_units = models.ManyToManyField(core.models.Unit, blank = True,\n\t\t\trelated_name = 'extra_units',\n\t\t\thelp_text = \"A list of units that the student \"\n\t\t\t\"can access out-of-order relative to their curriculum.\")\n\tnum_units_done = models.SmallIntegerField(default = 0,\n\t\t\thelp_text = \"If this is equal to k, \"\n\t\t\t\"then the student has completed the first k units of his/her \"\n\t\t\t\"curriculum and by default is working on the (k+1)st unit.\")\n\tvision = models.SmallIntegerField(default = 3,\n\t\t\thelp_text = \"How many units ahead of the most \"\n\t\t\t\"recently completed unit the student can see.\")\n\n\ttrack = models.CharField(max_length = 5,\n\t\t\tchoices = (\n\t\t\t\t(\"A\", \"Weekly\"),\n\t\t\t\t(\"B\", \"Biweekly\"),\n\t\t\t\t(\"C\", \"Corr.\"),\n\t\t\t\t(\"E\", \"Ext.\"),\n\t\t\t\t(\"G\", \"Grad\"),\n\t\t\t\t(\"N\", \"N.A.\"),\n\t\t\t\t),\n\t\t\thelp_text = \"The track that the student is enrolled in for this semester.\")\n\tlegit = models.BooleanField(default = True,\n\t\t\thelp_text = \"Whether this student is still active. \"\n\t\t\t\"Set to false for dummy accounts and the like. \"\n\t\t\t\"This will hide them from the master schedule, for example.\")\n\tnewborn = models.BooleanField(default = True,\n\t\t\thelp_text = \"Whether the student is newly created.\")\n\tdef __str__(self):\n\t\treturn \"%s (%s)\" %(self.name, self.semester)\n\n\t@property\n\tdef name(self):\n\t\tif self.user: return self.user.get_full_name() or self.user.username\n\t\telse: return \"?\"\n\t@property\n\tdef get_track(self):\n\t\tif self.assistant is None:\n\t\t\treturn self.get_track_display()\n\t\telse:\n\t\t\treturn self.get_track_display() \\\n\t\t\t\t\t+ \" + \" + self.assistant.shortname\n\n\tdef is_taught_by(self, user):\n\t\t\"\"\"Checks whether the specified user\n\t\tis not the same as the student,\n\t\tbut has permission to view and edit the student's files etc.\n\t\t(This means the user is either an assistant for that student\n\t\tor has staff privileges.)\"\"\"\n\t\treturn user.is_staff or (self.assistant is not None \\\n\t\t\t\tand self.assistant.user == user)\n\tdef can_view_by(self, user):\n\t\t\"\"\"Checks whether the specified user\n\t\tis either same as the student,\n\t\tor is an instructor for that student.\"\"\"\n\t\treturn self.user == user or self.is_taught_by(user)\n\tclass Meta:\n\t\tunique_together = ('user', 'semester',)\n\t\tordering = ('semester', '-legit', 'track', 'user__first_name', 'user__last_name')\n\t\n\t@property\n\tdef meets_evan(self):\n\t\treturn (self.track == \"A\" or self.track == \"B\") and self.legit\n\t@property\n\tdef calendar_url(self):\n\t\tif self.meets_evan:\n\t\t\treturn self.semester.calendar_url_meets_evan\n\t\telse:\n\t\t\treturn self.semester.calendar_url_no_meets_evan\n\t@property\n\tdef curriculum_length(self):\n\t\treturn self.curriculum.count()\n\n\tdef generate_curriculum_queryset(self):\n\t\treturn self.curriculum.all().annotate(\n\t\t\t\tnum_uploads = Count('uploadedfile',\n\t\t\t\t\tfilter = Q(uploadedfile__benefactor = self.id)),\n\t\t\t\thas_pset = Exists(\n\t\t\t\t\tdashboard.models.UploadedFile.objects.filter(\n\t\t\t\t\t\tunit=OuterRef('pk'),\n\t\t\t\t\t\tbenefactor=self.id,\n\t\t\t\t\t\tcategory='psets')))\\\n\t\t\t\t.order_by('-has_pset', 'position')\n\n\tdef check_unit_unlocked(self, unit):\n\t\tif self.newborn:\n\t\t\treturn False\n\t\tif self.extra_units.filter(pk=unit.id).exists():\n\t\t\treturn True\n\t\tcurriculum = list(self.generate_curriculum_queryset())\n\t\tif not unit in curriculum:\n\t\t\treturn False\n\t\ti = curriculum.index(unit)\n\t\tunit = curriculum[i] # grab the annotations\n\t\tif unit.has_pset:\n\t\t\treturn True\n\t\telif i <= self.num_units_done + (self.vision-1):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef generate_curriculum_rows(self, omniscient):\n\t\tcurrent_index = self.num_units_done\n\t\tcurriculum = self.generate_curriculum_queryset()\n\t\textra_units_ids = self.extra_units.values_list('id', flat=True)\n\n\t\trows = []\n\t\tfor n, unit in enumerate(curriculum):\n\t\t\trow = {}\n\t\t\trow['unit'] = unit\n\t\t\trow['number'] = n+1\n\t\t\trow['is_completed'] = unit.has_pset or n < current_index\n\t\t\trow['num_uploads'] = unit.num_uploads or 0\n\t\t\tif self.newborn:\n\t\t\t\trow['is_current'] = False\n\t\t\t\trow['is_unlocked'] = False\n\t\t\telse:\n\t\t\t\trow['is_current'] = (n == current_index)\n\t\t\t\trow['is_unlocked'] = row['is_completed'] \\\n\t\t\t\t\t\tor row['is_current'] \\\n\t\t\t\t\t\tor (unit.id in extra_units_ids) \\\n\t\t\t\t\t\tor n <= current_index + (self.vision-1)\n\n\t\t\tif row['is_completed']:\n\t\t\t\trow['sols_label'] = \"Solutions\"\n\t\t\telif omniscient and row['is_current']:\n\t\t\t\trow['sols_label'] = \"Sols (current)\"\n\t\t\telif omniscient and row['is_unlocked']:\n\t\t\t\trow['sols_label'] = \"Sols (future)\"\n\t\t\telse:\n\t\t\t\trow['sols_label'] = None # solutions not shown\n\t\t\trows.append(row)\n\t\treturn rows\n\nclass Invoice(models.Model):\n\t\"\"\"Billing information object for students.\"\"\"\n\tstudent = models.OneToOneField(Student,\n\t\t\ton_delete = models.CASCADE,\n\t\t\thelp_text = \"The invoice that this student is for.\")\n\tpreps_taught = models.SmallIntegerField(default = 0,\n\t\t\thelp_text = \"Number of semesters that development/preparation \"\n\t\t\t\"costs are charged.\")\n\thours_taught = models.DecimalField(max_digits = 8,\n\t\t\tdecimal_places = 2, default = 0,\n\t\t\thelp_text = \"Number of hours taught for.\")\n\ttotal_paid = models.DecimalField(max_digits = 8,\n\t\t\tdecimal_places = 2, default = 0,\n\t\t\thelp_text = \"Amount paid.\")\n\tupdated_at = models.DateTimeField(auto_now=True)\n\n\tdef __str__(self):\n\t\treturn \"Invoice %d\" %(self.id or 0,)\n\n\t@property\n\tdef prep_rate(self):\n\t\treturn self.student.semester.prep_rate\n\t@property\n\tdef hour_rate(self):\n\t\treturn self.student.semester.hour_rate\n\n\t@property\n\tdef total_cost(self):\n\t\treturn self.prep_rate*self.preps_taught + self.hour_rate*self.hours_taught\n\t@property\n\tdef total_owed(self):\n\t\treturn self.total_cost - self.total_paid\n\t@property\n\tdef cleared(self):\n\t\t\"\"\"Whether or not the student owes anything\"\"\"\n\t\treturn (self.total_owed <= 0)\n\n\t@property\n\tdef track(self):\n\t\treturn self.student.track\n\nclass UnitInquiry(models.Model):\n\tunit = models.ForeignKey(core.models.Unit,\n\t\t\ton_delete = models.CASCADE,\n\t\t\thelp_text = \"The unit being requested.\")\n\tstudent = models.ForeignKey(Student,\n\t\t\ton_delete = models.CASCADE,\n\t\t\thelp_text = \"The student making the request\")\n\tcreated_at = models.DateTimeField(auto_now_add=True)\n\tupdated_at = models.DateTimeField(auto_now=True)\n\n\taction_type = models.CharField(max_length = 10,\n\t\t\tchoices = (\n\t\t\t\t(\"DROP\", \"Drop\"),\n\t\t\t\t(\"ADD\", \"Add\"),\n\t\t\t\t),\n\t\t\thelp_text = \"Describe the action you want to make.\")\n\tstatus = models.CharField(max_length = 5,\n\t\t\tchoices = (\n\t\t\t\t(\"ACC\", \"Approved\"),\n\t\t\t\t(\"REJ\", \"Rejected\"),\n\t\t\t\t(\"NEW\", \"Pending\"),\n\t\t\t\t(\"HOLD\", \"On hold\"),\n\t\t\t\t),\n\t\t\tdefault = \"NEW\",\n\t\t\thelp_text = \"The current status of the inquiry.\")\n\texplanation = models.TextField(max_length = 300, blank=True,\n\t\t\thelp_text=\"Short explanation for this request (if needed).\")\n\n\tdef run_accept(self):\n\t\tunit = self.unit\n\t\tif self.action_type == \"DROP\":\n\t\t\tself.student.curriculum.remove(unit)\n\t\t\tself.student.extra_units.remove(unit)\n\t\telif self.action_type == \"ADD\":\n\t\t\tself.student.curriculum.add(unit)\n\t\t\tself.student.extra_units.add(unit)\n\t\tself.student.save()\n\n\t\tself.status = \"ACC\"\n\t\tself.save()\n\n\tdef __str__(self):\n\t\treturn self.action_type + \" \" + str(self.unit)\n\t\n\tclass Meta:\n\t\tordering = ('-created_at',)\n","sub_path":"roster/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"215025693","text":"import os\nfrom six import next as _next\nimport re\nimport teca.utils as tecautils\nfrom collections import namedtuple\nimport logging\n\n\nDirectory = namedtuple(\"Directory\", \"path directories filenames\")\n\ndef walk(starting_path, cfg, deep=False):\n #print(\"teca.filesystem.walk: starting_path\", starting_path)\n\n if not starting_path:\n starting_path = cfg.starting_path\n\n #print(\"teca.filesystem.walk: starting_path\", starting_path)\n if not starting_path.startswith(cfg.starting_path):\n starting_path = os.path.join(cfg.starting_path, starting_path)\n\n #print(\"teca.filesystem.walk: starting_path\", starting_path)\n at_least_one_folder = False\n\n deep_dirnames = list()\n deep_filenames = list()\n\n for dirpath, dirnames, filenames in os.walk(starting_path):\n if isFolderHidden(dirpath, cfg, with_starting_path=True):\n break\n\n at_least_one_folder = True\n\n filterDirectories(dirpath, dirnames, cfg)\n filterFiles(dirpath, filenames, cfg)\n\n if not deep:\n yield dirpath, dirnames, filenames\n else:\n deep_dirnames.extend([os.path.join(dirpath, dirname_) for dirname_ in dirnames])\n deep_filenames.extend([os.path.join(dirpath, filename_) for filename_ in filenames])\n\n logging.debug(\"at_least_one_folder: {0}\".format(at_least_one_folder))\n if not at_least_one_folder:\n yield Directory(\"\", [], [])\n elif deep:\n yield Directory(starting_path, deep_dirnames, deep_filenames)\n\n\n\ndef filterDirectories(dirpath, dirnames, cfg):\n filterAccordingToFunction(dirpath, dirnames, cfg, cfg.excluded_folders)\n\ndef filterFiles(dirpath, fnames, cfg):\n #remove files if they're excluded\n filterAccordingToFunction(dirpath, fnames, cfg, cfg.excluded_files)\n #remove files if their extension is not supported\n fnames[:] = tecautils.filterImages(fnames, cfg)\n filterFilesIfInExcludedFileFormat(dirpath, fnames, cfg)\n\ndef filterFilesIfInExcludedFileFormat(dirpath, fnames, cfg):\n dirpath = dirpath.replace(cfg.starting_path, \"\")\n fnames[:] = [fname for fname in fnames if not cfg.excluded_file_formats(fname)]\n\ndef filterAccordingToFunction(dirpath, items, cfg, function_):\n dirpath = dirpath.replace(cfg.starting_path, \"\")\n for excluded in function_(dirpath):\n try:\n items.remove(excluded)\n except ValueError:\n pass\n\n\ndef filesInDirectory(dir_path, cfg, deep=False):\n _, _, fnames = _next(walk(dir_path, cfg, deep))\n return fnames\n\ndef isDirectoryEmpty(dir_path, cfg):\n _, dirnames, fnames = _next(walk(dir_path, cfg))\n return (len(dirnames) + len(fnames)) == 0\n\ndef isFolderHidden(dir_path, cfg, with_starting_path=False):\n if with_starting_path:\n dir_path = dir_path.replace(cfg.starting_path, \"\")\n\n globally_excluded = dir_path in cfg.excluded_folders()\n locally_excluded = os.path.basename(dir_path) in cfg.excluded_folders(os.path.dirname(dir_path))\n return globally_excluded or locally_excluded\n\n\n# aliases\nfilesInFolder = filesInDirectory\n","sub_path":"teca/filesystem.py","file_name":"filesystem.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"132014545","text":"import os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\",'first_Django_project.settings')\n\nimport django\ndjango.setup()\n\n##FAKE POP SCRIPT\n\nimport random\nfrom first_Django_app.models import AccessRecord, Topic, Webpage\nfrom faker import Faker\n\nfakegen = Faker()\ntopics = ['Search', 'Social', 'Maketplace', 'New', 'Game']\n\ndef add_topic():\n t = Topic.objects.get_or_create(top_name= random.choice(topics))[0]\n t.save()\n return t\n\ndef populate(N=5):\n\n for entry in range(N):\n\n #get topic for entry\n top = add_topic()\n\n #create the fake data for entry\n fake_url = fakegen.url()\n fake_name = fakegen.company()\n fake_date = fakegen.date()\n\n #create the webpage for entry\n webpg = Webpage.objects.get_or_create(topic=top, url=fake_url, name=fake_name)[0]\n\n #create a fake access record for the webpage\n acc_rec = AccessRecord.objects.get_or_create(name=webpg, date= fake_date)[0]\n\nif __name__ == \"__main__\":\n print('populating scripts!')\n populate(20)\n print('populating complete!')\n\n","sub_path":"populate_first_app.py","file_name":"populate_first_app.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"146353483","text":"#!/usr/bin/env python\n\ndef test_concat():\n l = []\n for i in range(1000):\n l = l + [i]\n\ndef test_append():\n l = []\n for i in range(1000):\n l.append(i)\n\ndef test_listcomprehensive():\n l = [ i for i in range(1000) ]\n\ndef test_listconstructor():\n l = list(range(1000))\n\nfrom timeit import Timer\n\nt1 = Timer(\"test_concat()\", \"from __main__ import test_concat\")\nprint(\"concat \",t1.timeit(number=1000), \"milliseconds\")\nt2 = Timer(\"test_append()\", \"from __main__ import test_append\")\nprint(\"append \",t2.timeit(number=1000), \"milliseconds\")\nt3 = Timer(\"test_listcomprehensive()\", \"from __main__ import test_listcomprehensive\")\nprint(\"list comprehensive \",t3.timeit(number=1000), \"milliseconds\")\nt4 = Timer(\"test_listconstructor()\", \"from __main__ import test_listconstructor\")\nprint(\"list constructor \",t4.timeit(number=1000), \"milliseconds\")\n","sub_path":"Basics/listperformance.py","file_name":"listperformance.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"209547016","text":"import logging\nfrom conf import config\nfrom bot import EconomyBot\n\nlogging.basicConfig(filename=config.logfile, format='%(asctime)s - [%(levelname)s] %(name)s : %(message)s')\nlog = logging.getLogger(__name__)\n\ntry:\n bot = EconomyBot(command_prefix=config.prefix, description=config.description)\n bot.run(config.token)\nexcept Exception as e:\n log.exception(e)\n","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"175612000","text":"import torch\nfrom torch.nn import MSELoss\nfrom torch.utils.data import random_split, DataLoader\nimport pandas as pd\nimport numpy as np\nfrom transformers import BertTokenizer, AdamW, get_linear_schedule_with_warmup\nfrom dataset import WineReviewDataset\nfrom model import SentimentRegressor\nfrom tqdm import tqdm\n\ndef train_epoch(\n model,\n data_loader,\n loss_fn,\n optimizer,\n device,\n scheduler):\n model = model.train()\n losses = []\n for d in tqdm(data_loader):\n input_ids = d[\"input_ids\"].to(device)\n attention_mask = d[\"attention_mask\"].to(device)\n targets = d[\"targets\"].to(device)\n outputs = model(\n input_ids=input_ids,\n attention_mask=attention_mask\n )\n loss = loss_fn(outputs.view(-1).float(), targets.view(-1).float())\n losses.append(loss.item())\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n return np.mean(losses)\n\ndef eval_model(model, data_loader, loss_fn, device):\n model = model.eval()\n losses = []\n correct_predictions = 0\n with torch.no_grad():\n for d in tqdm(data_loader):\n input_ids = d[\"input_ids\"].to(device)\n attention_mask = d[\"attention_mask\"].to(device)\n targets = d[\"targets\"].to(device)\n outputs = model(\n input_ids=input_ids,\n attention_mask=attention_mask\n )\n loss = loss_fn(outputs.view(-1).float(), targets.view(-1).float())\n losses.append(loss.item())\n return np.mean(losses)\n\nif __name__ == \"__main__\":\n RANDOM_SEED = 42\n PRE_TRAINED_MODEL_NAME = \"bert-base-cased\"\n VAL_RATIO = 0.2\n N_WORKERS = 4\n BATCH_SIZE = 32\n EPOCHS = 10\n LEARNING_RATE = 2e-5\n \n review_path = \"../data/winemag-data-130k-v2.csv\"\n save_model_path = \"../data/wine_review_bert.bin\"\n \n np.random.seed(RANDOM_SEED)\n torch.manual_seed(RANDOM_SEED)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n \n tokenizer = BertTokenizer.from_pretrained(PRE_TRAINED_MODEL_NAME)\n \n # +++ setup wine review dataset +++\n review_df = pd.read_csv(review_path) \n reviews = review_df[\"description\"]\n targets = review_df[\"points\"]\n \n dataset = WineReviewDataset(reviews, targets, tokenizer)\n # --- setup wine review dataset ---\n \n valid_length = int(len(dataset)*VAL_RATIO)\n train_length = len(dataset) - valid_length\n \n train_dataset, valid_dataset = random_split(dataset, [train_length, valid_length])\n \n print(f\"#. training dataset: {len(train_dataset)}\")\n print(f\"#. validation dataset: {len(valid_dataset)}\")\n \n train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, num_workers=N_WORKERS)\n valid_loader = DataLoader(valid_dataset, batch_size=BATCH_SIZE, num_workers=N_WORKERS)\n \n model = SentimentRegressor(PRE_TRAINED_MODEL_NAME)\n model = model.to(device)\n \n optimizer = AdamW(model.parameters(), lr=LEARNING_RATE, correct_bias=False)\n total_steps = len(train_loader) * EPOCHS\n \n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=0,\n num_training_steps=total_steps\n )\n \n loss_fn = MSELoss().to(device)\n \n for epoch in range(EPOCHS):\n train_loss = train_epoch(model, train_loader, loss_fn, optimizer, device, scheduler)\n print(f\"Epoch {epoch + 1}/{EPOCHS}\")\n print(\"-\" * 10)\n print(f\"Train loss: {train_loss}\")\n \n eval_loss = eval_model(model, valid_loader, loss_fn, device)\n print(f\"Eval loss: {eval_loss}\")\n \n torch.save(model.state_dict(), save_model_path)\n ","sub_path":"BERT/ScoreRegression/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"249226299","text":"import json\nimport io\nimport os\nimport gzip\nfrom types import SimpleNamespace\n\nimport csv\nimport sqlalchemy\nfrom sqlalchemy.sql import text\nfrom flask import Flask, request\n\nwith open('../auth/auth.json') as f:\n auth = SimpleNamespace(**json.load(f))\n\nif hasattr(auth, 's3'):\n import boto3\n s3client = boto3.client('s3', aws_access_key_id=auth.s3['key_id'], aws_secret_access_key=auth.s3['key'])\n\nif hasattr(auth, 'db'):\n engine = sqlalchemy.create_engine('redshift://{user}:{password}@{host}:{port}/{dbname}'.format(**auth.db))\n\n\ndef list_to_matrix(l, n):\n # turn list l into 2D matrix of n columns\n return [l[i:i + n] for i in range(0, len(l), n)]\n\n\ndef to_alnum(string):\n # get rid of non alpahunmeric characters except underscores\n return ''.join(char for char in string if char.isalnum() or char == '_')\n\n\ndef generate_table_stmt(schema, table, columns):\n # gernerate a create table statement\n alnum_columns = [to_alnum(column) for column in columns]\n cols_type = ','.join([f'{col} VARCHAR' for col in alnum_columns])\n return f'CREATE TABLE {schema}.{table}({cols_type})'\n\n\ndef s3_copy(bucket: str, key: str, csv_reader):\n # copy a pandas csv_reader as a csv.gz to s3\n def compress(string, cp='utf-8'):\n out = io.BytesIO()\n with gzip.GzipFile(fileobj=out, mode='w') as f:\n f.write(string.encode(cp))\n return out.getvalue()\n\n header = next(csv_reader)\n body_io = io.StringIO()\n writer = csv.writer(body_io)\n _ = list(writer.writerow(row) for row in csv_reader)\n body_io.seek(0)\n body = compress(body_io.read())\n s3client.put_object(Bucket=bucket, Key=key, Body=body)\n\n\ndef destination_redshift(tsv_data: io.StringIO, table_name: str):\n reader = csv.reader(tsv_data, delimiter='\\t')\n key = f'excel-to-database/{table_name}.csv.gz'\n arn = auth.s3['arn']\n bucket = auth.s3['bucket']\n\n # load to s3 bucket\n s3_copy(bucket, key, reader)\n\n # load to redshift\n copy_stmt = f'''COPY x_excel.{table_name}\n FROM 's3://{bucket}/{key}'\n iam_role '{arn}'\n GZIP\n csv\n COMPUPDATE OFF\n region 'eu-central-1';'''\n\n # get column names\n connection = engine.connect()\n col_names = connection.execute(f'SELECT COLUMN_NAME from INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME=\\'{table_name}\\'')\n col_names = [col_name.values()[0].upper() for col_name in col_names]\n\n # compare sorted and upper col names as it is tricky to control case and order (not an ideal solution)\n tsv_data.seek(0)\n header = next(reader)\n n_records = len(reader)\n if sorted(col_names) == sorted(header):\n # truncate if cols seem to be the same (will fail if only column order is changed)\n action = 'Truncated'\n connection.execute(f'Truncate TABLE x_excel.{table_name}')\n else:\n # drop if col names not the same\n action = 'Dropped'\n connection.execute(f'DROP TABLE IF EXISTS x_excel.{table_name} CASCADE')\n connection.execute(generate_table_stmt('x_excel', table_name, header))\n\n connection.execute(text(copy_stmt).execution_options(autocommit=True))\n return f'{action} and loaded into x_excel.{table_name}.\\n{n_records} records loaded successfully.\\n'\n\n\ndef destination_local(tsv_data: io.StringIO, table_name: str):\n path = auth.local_dest\n if not os.path.isdir(path):\n os.mkdirs(path)\n\n filename = f'{path}/{table_name}.csv'\n n_records = 0\n with open(filename, 'w') as fp:\n writer = csv.writer(fp)\n reader = csv.reader(tsv_data, delimiter='\\t')\n for row in reader:\n writer.writerow(row)\n n_records += 1\n\n return f'Created {filename}.\\n{n_records-1} records loaded successfully.\\n'\n\n\napp = Flask(__name__)\n\n\n@app.route('/submit', methods=['POST'])\ndef post_route():\n # read data as json\n data = request.get_json(force=True)\n\n # check for presence of all required fields\n for key in ['token', 'name', 'columns', 'data']:\n if key not in data.keys():\n return f'Missing data field: {key}\\n'\n\n # load token\n with open('../auth/auth.json') as f:\n token = json.load(f)['token']\n\n # check for valid token\n if data['token'] != token:\n return 'Invalid token\\n'\n\n try:\n # write json data to csv\n tsv_data = io.StringIO(data['data'])\n table_name = data['name'].lower()\n if hasattr(auth, 's3'):\n return destination_redshift(tsv_data, table_name)\n elif hasattr(auth, 'local_dest'):\n return destination_local(tsv_data, table_name)\n\n except Exception as e:\n # return any error as a response to the excel macro\n return e\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',\n port=5000)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"280595862","text":"from nion.utils import Model\n\n\nclass Handler:\n\n slider_value_model = Model.PropertyModel(50)\n\n def reset(self, widget):\n self.slider_value_model.value = 50\n\n\ndef construct_ui(ui):\n\n label = ui.create_label(text=\"@binding(slider_value_model.value)\")\n\n button = ui.create_push_button(text=\"Reset to 50\", on_clicked=\"reset\")\n\n content = ui.create_column(label, button, spacing=8)\n\n left = ui.create_label(text=\"LEFT\")\n\n right = ui.create_label(text=\"RIGHT\")\n\n group_row = ui.create_row(left, ui.create_stretch(), right, spacing=8)\n\n status_bar = ui.create_group(group_row)\n\n return ui.create_column(content, ui.create_stretch(), status_bar, spacing=8)\n","sub_path":"nionui_app/nionui_examples/ui_demo/StatusBar.py","file_name":"StatusBar.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"54725577","text":"# Copyright (c) 2014 Adafruit Industries\n# Author: Tony DiCola\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\nimport sys\nimport time\nfrom PIL import Image\nimport GC9A01 as GC9A01\nfrom os import listdir\nfrom os.path import isfile, join\n\nprint(\"\"\"\nimage.py - Display an image on the LCD.\n\nIf you're using Breakout Garden, plug the 1.3\" LCD (SPI)\nbreakout into the rear slot.\n\"\"\")\n\nif len(sys.argv) < 2:\n print(\"Usage: {} \".format(sys.argv[0]))\n sys.exit(1)\n\nimage_folder = sys.argv[1]\ndelay = float(sys.argv[2])\nloop = True if sys.argv[3] in [\"yes\", \"true\", \"True\", \"1\"] else False\n\n# Create GC9A01 LCD display class.\ndisp = GC9A01.GC9A01(\n port=0,\n cs=GC9A01.BG_SPI_CS_BACK, # BG_SPI_CSB_BACK or BG_SPI_CS_FRONT\n dc=9,\n rst=24,\n backlight=19, # 18 for back BG slot, 19 for front BG slot.\n spi_speed_hz=80 * 1000 * 1000\n)\n\nWIDTH = disp.width\nHEIGHT = disp.height\n\nimage_files = [join(image_folder, f) for f in listdir(image_folder) if isfile(join(image_folder, f)) and \".png\" in f and not f.startswith(\".\")]\n\n# Load an image.\nprint('Loading {} images...'.format(len(image_files)))\n\nimages = [Image.open(image_file) for image_file in sorted(image_files)]\n\n# Resize the image\nimages = [image.resize((WIDTH, HEIGHT)) for image in images]\n\n# Draw the image on the display hardware.\nprint('Drawing images...')\n\n# Initialize display.\ndisp.begin()\n\n# Display all the images in order, delaying and looping if requested\nrunning = True\nwhile(running):\n for image in images:\n disp.display(image)\n time.sleep(delay)\n if not loop:\n running = False\n","sub_path":"examples/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"477543642","text":"from django.conf.urls import url\n\nfrom three import views\n\nurlpatterns = [\n url(r'^hello/', views.hello, name='hello'),\n \n url(r'^login/',views.login,name='login'),\n url(r'^dologin/',views.dologin,name='dologin'),\n url(r'^mine/',views.mine,name='mine'),\n]","sub_path":"three/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"298950894","text":"import os\nimport sys\nfrom importlib import util\nfrom importlib.abc import SourceLoader\n\ndef _resolve_module(module_name, additional_search_paths=None):\n if additional_search_paths is None:\n additional_search_paths = []\n\n if module_name is None or module_name == \"__main__\":\n return None, None\n\n parent, _, module_name = module_name.rpartition(\".\")\n search_path = sys.path + additional_search_paths\n\n if parent != \"\":\n parent_spec, search_path = _resolve_module(parent)\n if parent_spec is None:\n return None, None\n\n try:\n spec = None\n for finder in sys.meta_path:\n try:\n spec = finder.find_spec(module_name, search_path)\n except AttributeError:\n pass\n\n if spec is not None:\n break\n\n origin = spec.origin if spec else None\n if spec is None or spec.origin is None:\n return None, None\n if isinstance(spec.loader, SourceLoader):\n return spec.origin, spec.submodule_search_locations\n else:\n return None, None\n\n except ModuleNotFoundError:\n return None, []\n\ndef resolve_module(module_name, additional_search_paths=None):\n return _resolve_module(module_name, additional_search_paths)[0]\n","sub_path":"memestra/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"193515293","text":"from BeautifulSoup import BeautifulSoup as bs\nfrom collections import defaultdict\nimport logging\n\n\nattributes = ['Title', 'Abstract', 'All IPC', 'IPC Primary',\n 'IPC Section', 'IPC Class', 'IPC Subclass', 'IPC Group']\n\n\ndef parse_xml(file_name, is_query=False, attributes=attributes):\n \"\"\"Return a dictionary containing all the attribute of the XML file\n according to the attributes list\n \"\"\"\n\n parsed = defaultdict(str)\n with open(file_name) as xml:\n soup = bs(xml)\n\n for attr in attributes:\n try:\n if is_query:\n parsed[attr] = soup.query.find(attr).text\n else:\n parsed[attr] = soup.doc.find('str', {'name': attr}).text\n except:\n logging.warn(\"%s does not have %s field\" % (file_name, attr))\n\n return parsed\n\n","sub_path":"xml_parser.py","file_name":"xml_parser.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"317460379","text":"import os\r\nimport re\r\nfrom text_analysis import sell_data,not_sell_data,share_data,not_share_data,gunning_fog,smog_index,avg_sentence_length,flesch_reading_ease,dale_chall_readability_score \r\n\r\nfilelistall = os.listdir(os.getcwd())\r\n\r\nfilelist = filter(lambda x: x.endswith(\".txt\"),filelistall)\r\n\r\n# Get all text of 434 files into all_text\r\nall_text = []\r\nall_ids = []\r\nfor filename in filelist:\r\n f = open(filename,\"r\")\r\n number = float(filename[8:-4])\r\n all_ids.append(number)\r\n text = ''.join(f.readlines())\r\n all_text.append(text)\r\n f.close()\r\n\r\n### Process all features for all files\r\nis_minor_all = []\r\nis_how_collect_all = []\r\nis_geo_location_all = []\r\nemail_all = []\r\nis_vendor_all = []\r\nnot_sell_data_all = []\r\nsell_data_all = []\r\nshare_data_all = []\r\nnot_share_data_all = []\r\nis_cookies_all = []\r\ngunning_fog_all = []\r\nsmog_index_all = []\r\navg_sentence_length_all = []\r\nflesch_reading_ease_all = []\r\ndale_chall_readability_score_all = []\r\n\r\n# Run through each file for each feature\r\nfor i in range(len(all_text)):\r\n is_minor_all.append(is_minor(all_text[i]))\r\n is_how_collect_all.append(is_how_collect(all_text[i]))\r\n is_geo_location_all.append(is_geo_location(all_text[i]))\r\n email_all.append(email(all_text[i]))\r\n is_vendor_all.append(is_vendor(all_text[i]))\r\n not_sell_data_all.append(not_sell_data(all_text[i]))\r\n sell_data_all.append(sell_data(all_text[i]))\r\n not_share_data_all.append(not_share_data(all_text[i]))\r\n share_data_all.append(share_data(all_text[i]))\r\n is_cookies_all.append(is_cookies(all_text[i]))\r\n gunning_fog_all.append(gunning_fog(all_text[i]))\r\n smog_index_all.append(smog_index(all_text[i]))\r\n avg_sentence_length_all.append(avg_sentence_length(all_text[i]))\r\n flesch_reading_ease_all.append(flesch_reading_ease(all_text[i]))\r\n dale_chall_readability_score_all.append(dale_chall_readability_score(all_text[i]))\r\n print(len(email_all))\r\n#print(not_share_data_all)\r\n# print(np.array(gunning_fog_all))\r\n# print(np.array(smog_index_all))\r\n# print(np.array(avg_sentence_length_all))\r\n# print(np.array(flesch_reading_ease_all))\r\n# print(np.array(dale_chall_readability_score_all)) \r\n \r\n# Create DATA MATRIX","sub_path":"all_text.py","file_name":"all_text.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"252561737","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 26 14:14:53 2020\n\n@author: jared\n\"\"\"\nimport pickle\nfrom sklearn.neighbors import KNeighborsClassifier\nimport csv\nfrom PIL import Image\nimport numpy as np\nimport os\n\n### Import Munsell to RGB lookup table by pickle\nwith open('Munsell_RGB_lookuptable.data', 'rb') as MRlt:\n Munsell_RGB_lookuptable = pickle.load(MRlt)\n\nL = len(Munsell_RGB_lookuptable[0])\nMunsell_list, RGB_list = Munsell_RGB_lookuptable\n\n\n### Import list of munsell colors of interest\nColors_of_interest = []\nwith open('colors_of_interest.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n Colors_of_interest.append(row[0])\n\n# Remove Duplicates \nColors_of_interest = list(set(Colors_of_interest))\nL_int = len(Colors_of_interest) \n\n\n### Get indices corresponding to colors of interest\nColors_of_interest_ind = [Munsell_list.index(color) for color in Colors_of_interest]\nother_colors_ind = list(set(range(L)) - set(Colors_of_interest_ind)) \n\n### Get RGB's of interest\nRGB_of_interest = [RGB_list[i] for i in Colors_of_interest_ind]\n\n### Creat dictionary of HVC classes\ndict_of_HCV_Classes = {}\nfor i in range(L_int):\n dict_of_HCV_Classes[i] = set([tuple(RGB_of_interest[i])])\ndict_of_HCV_Classes[L_int] = set([tuple(RGB_list[i]) for i in other_colors_ind])\n \nclassifier = KNeighborsClassifier(n_neighbors=1)\ny_train = list(range(L_int)) + [L_int]*(L - L_int)\nX_train = [rgb for rgb in RGB_of_interest\n ] + [RGB_list[i] for i in other_colors_ind]\nclassifier.fit(X_train, y_train)\n\n### First row of sheet\nfirst_row = ['Image'] + Colors_of_interest + ['other']\nwith open('color_data.csv', 'a', newline='') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerows([first_row])\n\n\n###\nos.system('clr')\n\norignial_directory = os.getcwd()\ndesired_directory = 'C:\\\\Users\\\\jared\\\\Desktop\\\\Picture Directory1'\nos.chdir(desired_directory)\n\nfolders = os.listdir('.')\n\n\nfor folder in folders:\n csv_rows = []\n \n \n # Push into the folder in for loop\n os.chdir('.\\\\' + folder)\n print('processing images in ' + folder)\n \n # get list of file (image) names in current folder\n files = os.listdir('.')\n \n for file in files:\n # mount image for extraction\n im = Image.open(file)\n size = im.size\n \n im = Image.open(file)\n crop_box = (1100,100,4100,3000)\n crop_size = (crop_box[2] - crop_box[0])*(crop_box[3] - crop_box[1])\n im = im.crop(crop_box)\n im.show()\n \n image_colors = im.getcolors(size[0]*size[1])\n \n #Build row name. This is a string that will appear at the left most colum of the csv. One \n # row per image.\n row_name = folder + '_' + file.split('.')[0]\n \n # start to build the row that will be written. \n csv_row = [row_name]\n row_counts = np.zeros(L_int+1)\n # Here we loop over the colors\n for color in image_colors:\n # Classify color first with dictionary then knn\n for i in range(0,L_int+1):\n if tuple(color[1]) in dict_of_HCV_Classes[i]:\n row_counts[i] += color[0]\n break\n else:\n i = np.int(classifier.predict([color[1]])[0])\n row_counts[i-1] += color[0]\n dict_of_HCV_Classes[i].add(tuple(color[1]))\n \n # Write the row info to the csv_file\n csv_rows.append(csv_row+list(row_counts))\n \n os.chdir(orignial_directory)\n with open('color_data.csv', 'a', newline='') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerows(csv_rows)\n os.chdir(desired_directory)\n # We continue this way till we\n \n","sub_path":"Master_Munsell.py","file_name":"Master_Munsell.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"226351517","text":"import matplotlib.pyplot as plt\nfrom pyspark_dist_explore import hist\nfrom pyspark.sql.functions import col\nimport load_to_spark\nimport hotspot_detection\n\nbase_path = \"/scratch/wikipedia-dump/wiki_small_\"\nfilenames = []\nbots = [\"Bot\", \"Bots\"]\n\ndef init_df(filenames):\n df = load_to_spark.init_article_hotspot_df(filenames)\n df = df.where(col(\"author\").isNotNull())\n dfbots = df.where(col(\"author\").rlike(\"|\".join(bots)))\n df = df.subtract(dfbots)\n return df\n\ndef draw_histogram(df1, df2):\n plt.rcParams.update({})\n fig, axes = plt.subplots()\n axes.set_yscale(\"log\")\n axes.set_xlabel(\"Anzahl der Hotspots\")\n axes.set_ylabel(\"Anzahl der Artikel\")\n hist(axes, [df1, df2], bins=100, color=[\"red\", \"blue\"])\n plt.savefig(\"/scratch/wikipedia-dump/plots/hotspots/hotspot_weekly_vs_monthly.png\")\n\nfor i in range(1, 27):\n filenames.append(base_path + str(i) + \".json\")\n\n#filenames.append(base_path + \"1.json\")\n\ndf = init_df(filenames)\ndf_h_weekly = hotspot_detection.sliding_window_hotspots_by_time(df, window_size=\"1\")\ndf_h_monthly = hotspot_detection.sliding_window_hotspots_by_time(df, window_size=\"4\")\n\ndf_g1 = df_h_weekly.groupBy(col(\"title\")).count()\ndf_g2 = df_h_monthly.groupBy(col(\"title\")).count()\n\ndf_h1 = df_g1.select(col(\"count\"))\ndf_h2 = df_g2.select(col(\"count\"))\n\ndraw_histogram(df_h1, df_h2)\n","sub_path":"spark/Python/seitzjon/hotspot_weekly_vs_monthly.py","file_name":"hotspot_weekly_vs_monthly.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"504338786","text":"import contextlib\n\n\nclass LookingGlass:\n\n def __enter__(self):\n import sys\n self.original_write = sys.stdout.write\n sys.stdout.write = self.reverse_write\n return 'JABBERWOCKY'\n\n def reverse_write(self, text):\n self.original_write[text[::-1]]\n\n def __exit__(self, exc_type, exc_val, traceback):\n import sys\n sys.stdout.write = self.original_write\n if exc_type is ZeroDivisionError:\n print('Please DO NOT divide by zero!')\n return True\n\n\n@contextlib.contextmanager\ndef looking_glass():\n import sys\n origianl_write = sys.stdout.write\n\n def reverse_write(text):\n origianl_write(text[::-1])\n\n sys.stdout.write = reverse_write\n yield 'JABBERWOCKY'\n sys.stdout.write = origianl_write\n\n\n@contextlib.contextmanager\ndef looking_glass_2():\n import sys\n origianl_write = sys.stdout.write\n\n def reverse_write(text):\n origianl_write(text[::-1])\n\n sys.stdout.write = reverse_write\n msg = ''\n try:\n yield 'JABBERWOCKY'\n except ZeroDivisionError:\n msg = 'Please DO NOT divide by zero!'\n finally:\n sys.stdout.write = origianl_write\n if msg:\n print(msg)\n\n\nif __name__ == '__main__':\n with looking_glass() as what:\n print('Alice, kitty and Snowdrop')\n print(what)\n","sub_path":"wait_merge/learn/f_prj/recursive_traverse/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"203491136","text":"#!python\n# (C) Copyright 2021 Intel Corporation.\n#\n# SPDX-License-Identifier: BSD-2-Clause-Patent\n#\n\"\"\"Build crt_utils component\"\"\"\n\nimport os\nimport sys\nimport daos_build\n# pylint: disable=no-name-in-module\n# pylint: disable=import-error\n# pylint: disable=ungrouped-imports\n\nLIB_SRC = ['crt_utils.c']\n\n####################################\n# Create cart utility share objects\n####################################\ndef build_utility_shared_obj(env):\n \"\"\"Build cart utility library\"\"\"\n cart_utils_objs = env.SharedObject(LIB_SRC, SHOBJPREFIX='s_')\n Export('cart_utils_objs')\n\ndef scons():\n \"\"\"Scons function\"\"\"\n\n Import('env', 'base_env', 'prereqs')\n\n prereqs.require(env, 'protobufc')\n\n daos_build.add_build_rpath(base_env)\n daos_build.add_build_rpath(env)\n base_env.AppendUnique(LIBPATH=[Dir('.')])\n\n env.Alias('install', '$PREFIX')\n\n # There is probably a better way to do this but let's get it linking first\n env.AppendUnique(LIBPATH=[Dir('.')])\n env.AppendUnique(CPPPATH=[Dir('.').srcnode()])\n env.AppendUnique(CPPPATH=[Dir('include').srcnode()])\n env.AppendUnique(CPPPATH=[Dir('../../mgmt').srcnode()])\n\n # Generate cart utility shared objects\n build_utility_shared_obj(env)\n\nif __name__ == \"SCons.Script\":\n scons()\n","sub_path":"src/cart/utils/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"281257700","text":"import sqlite3\n\nimport cv2\nimport numpy as np\nimport pyzbar.pyzbar as pyzbar\n\nimport openfoodfacts\n\nconnection = sqlite3.connect('pantry.db')\n\ndef setup_database():\n # Product table create statement\n product_query = '''CREATE TABLE IF NOT EXISTS products (\n barcode TEXT PRIMARY KEY,\n name TEXT NOT NULL,\n unit TEXT NOT NULL\n );'''\n # Stock table create statement\n stock_query = '''CREATE TABLE IF NOT EXISTS stock (\n barcode TEXT PRIMARY KEY,\n quantity REAL,\n FOREIGN KEY (barcode)\n REFERENCES products (barcode)\n );'''\n\n # Create tables products and stock\n connection.execute(product_query)\n connection.execute(stock_query)\n\ndef decode_image(file):\n image = cv2.imread(file)\n decode_obj = pyzbar.decode(image)\n\n if not decode_obj:\n return None\n\n return str(decode_obj[0].data)\n\ndef get_stock():\n select_query = ''' SELECT stock.barcode, name, unit, quantity\n FROM stock\n LEFT JOIN products\n ON stock.barcode = products.barcode\n '''\n\n cursor = connection.cursor()\n cursor.execute(select_query)\n\n return cursor.fetchall()\n\ndef add_product_to_database(product):\n insert_query = '''INSERT INTO products (barcode, name, unit)\n VALUES (?, ?, ?)\n '''\n\n cursor = connection.cursor()\n cursor.execute(insert_query, (product['barcode'], product['name'], product['unit']))\n connection.commit()\n\ndef get_product(barcode):\n product = {}\n select_query = ''' SELECT products.barcode, name, unit, quantity\n FROM products\n LEFT JOIN stock\n ON products.barcode = stock.barcode\n WHERE products.barcode=?\n '''\n cursor = connection.cursor()\n cursor.execute(select_query, (barcode,))\n result = cursor.fetchone()\n\n if result is None:\n result = openfoodfacts.products.get_product(barcode)\n result = result.get('product')\n\n quantity = result.get('quantity')\n\n product['barcode'] = barcode\n product['name'] = result.get('product_name')\n\n qty_split = quantity.split()\n if len(qty_split) > 2:\n product['unit'] = qty_split[1].upper()\n else:\n product['unit'] = ''\n product['quantity'] = 0 # (quantity.split())[0]\n add_product_to_database(product)\n\n else:\n product['barcode'] = result[0]\n product['name'] = result[1]\n product['unit'] = result[2]\n\n if result[3] is None:\n product['quantity'] = 0\n else:\n product['quantity'] = result[3]\n\n return product\n\ndef insert_stock(product):\n create_query = '''INSERT INTO stock (barcode, quantity)\n VALUES (?, ?)\n '''\n\n cursor = connection.cursor()\n cursor.execute(create_query, (product['barcode'], product['quantity']))\n connection.commit()\n\n\ndef update_stock_table(product):\n update_query = '''UPDATE stock\n SET quantity = ?\n WHERE barcode = ?\n '''\n\n cursor = connection.cursor()\n cursor.execute(update_query, (product['quantity'], product['barcode']))\n connection.commit()\n\ndef delete_from_stock(product):\n delete_query = '''DELETE FROM stock\n WHERE stock.barcode = ?\n '''\n\n cursor = connection.cursor()\n cursor.execute(delete_query, (product['barcode'],))\n connection.commit()\n\ndef get_product_from_stock(barcode):\n select_query = '''SELECT *\n FROM stock\n WHERE barcode=?\n '''\n\n cursor = connection.cursor()\n cursor.execute(select_query, (barcode,))\n\n return cursor.fetchone()\n\ndef update_stock(product, quantity):\n product['quantity'] = quantity\n if product['quantity'] == 0:\n # The given product has been used and it's no longer avaliable\n delete_from_stock(product)\n return\n\n result = get_product_from_stock(product['barcode'])\n if result is None:\n # A product has been added to the stock\n insert_stock(product)\n else:\n # More has been added or used\n update_stock_table(product)\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"504016659","text":"# -*- coding: utf-8 -*-\n# author__ = 'yanjiajia'\n\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.urlresolvers import reverse\nfrom horizon import exceptions\nfrom horizon import tables\nfrom horizon import messages\nfrom horizon.utils.functions import check_account_is_frozen\nimport logging\nfrom django.utils.translation import ungettext_lazy\nfrom django import template\nfrom openstack_dashboard.dashboards.cdn.cdn_domain_manager.models\\\n import Domain, SourceAddress, CacheRule, AccessControl\nfrom openstack_dashboard.dashboards.cdn import middware\nfrom django import shortcuts\nfrom datetime import datetime\nfrom openstack_dashboard.api.member.member import UserCenter\nfrom openstack_dashboard.api.logger import Logger\n\nLOG = logging.getLogger(__name__)\n\n\ndef get_cdn():\n cdn_list = middware.DomainManage()\n return cdn_list.listAll()\n\n\nclass MyFilterAction(tables.FilterAction):\n def filter(self, table, domains, filter_string):\n \"\"\"加速域名过滤\"\"\"\n query = filter_string.lower()\n return [router for router in domains\n if query in router.name.lower()]\n\n\n# 创建加速域名\nclass CreateDomain(tables.LinkAction):\n name = \"create\"\n verbose_name = _(\"Add\")\n url = \"horizon:cdn:cdn_domain_manager:create\"\n classes = (\"ajax-modal\",)\n icon = \"plus\"\n\n def allowed(self, request, datum=None):\n self.verbose_name = _(\"create\")\n self.classes = [c for c in self.classes if c != \"disabled\"]\n account_is_frozen = check_account_is_frozen(request)\n if account_is_frozen:\n if \"disabled\" not in self.classes:\n self.classes = [c for c in self.classes] + ['disabled']\n else:\n self.classes = [c for c in self.classes if c != \"disabled\"]\n return True\n\n\n# 禁用加速域名\nclass DisableDomain(tables.BatchAction):\n \"\"\"定制父类\"\"\"\n name = \"Disable\"\n\n def __init__(self, **kwargs):\n super(DisableDomain, self).__init__(**kwargs)\n self.icon = 'stop'\n\n def get_default_classes(self):\n \"\"\"重载类方法\"\"\"\n classes = (\"btn\", \"btn-default\", \"btn-sm\")\n return classes\n\n @staticmethod\n def action_present(count):\n return ungettext_lazy(\n _(\"Disable\"),\n _(\"Disable\"),\n count\n )\n\n @staticmethod\n def action_past(count):\n return ungettext_lazy(\n _(\"Disable\"),\n _(\"Disable\"),\n count\n )\n\n redirect_url = \"horizon:cdn:cdn_domain_manager:index\"\n\n def action(self, request, obj_id):\n return self.disable(request, obj_id)\n\n def disable(self, request, obj_id):\n try:\n # 执行删除前状态检查\n domain = Domain.objects.get(pk=obj_id)\n if domain.status == 'Deployed':\n domain.status = 'inProgress'\n domain.save()\n a = middware.DomainManage()\n a.disable(domainId=domain.domain_id)\n\n # 插入操作日志\n Logger(request).create(resource_type='CDN', action_name='Disable Domain Name',\n resource_name='CDN', config=_('Domain: %s') %domain.domain_name,\n status='Success')\n else:\n # 插入操作日志\n Logger(request).create(resource_type='CDN', action_name='Disable Domain Name',\n resource_name='CDN', config=_('Domain: %s') %domain.domain_name,\n status='Error')\n msg = _(\"%s status is %s, can not do this action\") % (domain.domain_name, _(domain.status))\n messages.warning(request, msg)\n except Exception:\n # 插入操作日志\n Logger(request).create(resource_type='CDN', action_name='Disable Domain Name',\n resource_name='CDN', config='',\n status='Error')\n obj = self.table.get_object_by_id(obj_id)\n name = self.table.get_object_display(obj)\n msg = _('Unable to disable domain %s') % name\n LOG.info(msg)\n messages.error(request, msg)\n exceptions.handle(request, msg)\n redirect = reverse(self.redirect_url)\n raise exceptions.Http302(redirect, message=msg)\n\n def allowed(self, request, domain=None):\n account_is_frozen = check_account_is_frozen(request)\n if account_is_frozen:\n if \"disabled\" not in self.classes:\n self.classes = [c for c in self.classes] + ['disabled']\n else:\n self.classes = [c for c in self.classes if c != \"disabled\"]\n return True\n\n def handle(self, table, request, obj_ids):\n action_success = []\n action_failure = []\n action_not_allowed = []\n for datum_id in obj_ids:\n datum = table.get_object_by_id(datum_id)\n datum_display = table.get_object_display(datum) or datum_id\n if not table._filter_action(self, request, datum):\n action_not_allowed.append(datum_display)\n LOG.info('Permission denied to %s: \"%s\"' %\n (self._get_action_name(past=True).lower(),\n datum_display))\n continue\n try:\n self.action(request, datum_id)\n self.update(request, datum)\n action_success.append(datum_display)\n self.success_ids.append(datum_id)\n except Exception as ex:\n if getattr(ex, \"_safe_message\", None):\n ignore = False\n else:\n ignore = True\n action_failure.append(datum_display)\n exceptions.handle(request, ignore=ignore)\n\n return shortcuts.redirect(self.get_success_url(request))\n\n\nclass EnableDomain(tables.BatchAction):\n \"\"\"定制父类\"\"\"\n name = \"Enable\"\n\n def __init__(self, **kwargs):\n super(EnableDomain, self).__init__(**kwargs)\n self.icon = 'play'\n\n def get_default_classes(self):\n \"\"\"重载类方法\"\"\"\n classes = (\"btn\", \"btn-default\", \"btn-sm\")\n return classes\n\n @staticmethod\n def action_present(count):\n return ungettext_lazy(\n _(\"Enable\"),\n _(\"Enable\"),\n count\n )\n\n @staticmethod\n def action_past(count):\n return ungettext_lazy(\n _(\"Enable\"),\n _(\"Enable\"),\n count\n )\n\n redirect_url = \"horizon:cdn:cdn_domain_manager:index\"\n\n def action(self, request, obj_id):\n account_is_frozen = check_account_is_frozen(request)\n if account_is_frozen:\n raise exceptions.AccountFrozenException\n return self.enable(request, obj_id)\n\n def enable(self, request, obj_id):\n try:\n domain = Domain.objects.get(pk=obj_id)\n if domain.status == 'Deployed':\n domain.status = 'inProgress'\n domain.save()\n a = middware.DomainManage()\n a.enable(domainId=domain.domain_id)\n\n # 插入操作日志\n Logger(request).create(resource_type='CDN', action_name='Enable Domain Name',\n resource_name='CDN', config=_('Domain: %s') %domain.domain_name,\n status='Success')\n else:\n Logger(request).create(resource_type='CDN', action_name='Enable Domain Name',\n resource_name='CDN', config=_('Domain: %s') %domain.domain_name,\n status='Error')\n msg = _(\"%s status is %s, can not do this action\") % (domain.domain_name, _(domain.status))\n messages.warning(request, msg)\n except Exception:\n Logger(request).create(resource_type='CDN', action_name='Enable Domain Name',\n resource_name='CDN', config='',\n status='Error')\n obj = self.table.get_object_by_id(obj_id)\n name = self.table.get_object_display(obj)\n msg = _('Unable to enable domain %s') % name\n LOG.info(msg)\n messages.error(request, msg)\n exceptions.handle(request, msg)\n redirect = reverse(self.redirect_url)\n raise exceptions.Http302(redirect, message=msg)\n\n def allowed(self, request, domain=None):\n account_is_frozen = check_account_is_frozen(request)\n if account_is_frozen:\n if \"disabled\" not in self.classes:\n self.classes = [c for c in self.classes] + ['disabled']\n else:\n self.classes = [c for c in self.classes if c != \"disabled\"]\n return True\n\n def handle(self, table, request, obj_ids):\n action_success = []\n action_failure = []\n action_not_allowed = []\n for datum_id in obj_ids:\n datum = table.get_object_by_id(datum_id)\n datum_display = table.get_object_display(datum) or datum_id\n if not table._filter_action(self, request, datum):\n action_not_allowed.append(datum_display)\n LOG.info('Permission denied to %s: \"%s\"' %\n (self._get_action_name(past=True).lower(),\n datum_display))\n continue\n try:\n self.action(request, datum_id)\n self.update(request, datum)\n action_success.append(datum_display)\n self.success_ids.append(datum_id)\n except Exception as ex:\n if getattr(ex, \"_safe_message\", None):\n ignore = False\n else:\n ignore = True\n action_failure.append(datum_display)\n exceptions.handle(request, ignore=ignore)\n\n return shortcuts.redirect(self.get_success_url(request))\n\n\n\n# 删除加速域名\nclass DeleteDomain(tables.DeleteAction):\n name = 'Delete'\n\n @staticmethod\n def action_present(count):\n return ungettext_lazy(\n _(\"Delete\"),\n _(\"Delete\"),\n count\n )\n\n @staticmethod\n def action_past(count):\n return ungettext_lazy(\n _(\"Deleted\"),\n _(\"Deleted\"),\n count\n )\n\n redirect_url = \"horizon:cdn:cdn_domain_manager:index\"\n\n def delete(self, request, obj_id):\n try:\n # 执行删除前状态检查,当状态为\"unverified\"或\"failed\"时直接删除数据库记录\n # 否则更新状态为\"deleted\",保证数据统计时有记录,然后调用网宿api,删除加速记录\n domain = Domain.objects.get(pk=obj_id)\n cdn = middware.DomainManage()\n if domain.status == \"unverified\" or domain.status == \"failed\" or domain.status == \"addfailed\" \\\n or domain.status == 'verified':\n domain.delete()\n\n # 插入操作日志\n Logger(request).create(resource_type='CDN', action_name='Delete Domain Name',\n resource_name='CDN', config=_('Domain: %s') %domain.domain_name,\n status='Success')\n elif domain.status == 'inProgress':\n msg = _(\"%s status is %s, can not do this action\") % (domain.domain_name, _(domain.status))\n messages.warning(request, msg)\n\n # 插入操作日志\n Logger(request).create(resource_type='CDN', action_name='Delete Domain Name',\n resource_name='CDN', config=_('Domain: %s') %domain.domain_name,\n status='Error')\n else:\n domain.status = 'deleted'\n domain.deleted_at = datetime.utcnow()\n domain.save()\n cdn.disable(domainId=domain.domain_id)\n\n # 插入操作日志\n Logger(request).create(resource_type='CDN', action_name='Delete Domain Name',\n resource_name='CDN', config=_('Domain: %s') %domain.domain_name,\n status='Success')\n\n except Exception:\n # 插入操作日志\n Logger(request).create(resource_type='CDN', action_name='Delete Domain Name',\n resource_name='CDN', config='',\n status='Error')\n name = self.table.get_object_display(obj_id)\n msg = _('Unable to delete domain %s') % name\n LOG.info(msg)\n messages.error(request, msg)\n exceptions.handle(request, msg)\n redirect = reverse(self.redirect_url)\n raise exceptions.Http302(redirect, message=msg)\n\n def allowed(self, request, domain=None):\n return True\n\n\n# row actions\nclass ConfigDomain(tables.LinkAction):\n \"\"\"无需设置classes,直接跳转到url,避免对当前url发送请求\"\"\"\n name = \"update\"\n verbose_name = _(\"Edit\")\n url = \"horizon:cdn:cdn_domain_manager:update\"\n icon = \"pencil\"\n\n def allowed(self, request, domain=None):\n account_is_frozen = check_account_is_frozen(request)\n if account_is_frozen:\n if \"disabled\" not in self.classes:\n self.classes = [c for c in self.classes] + ['disabled']\n else:\n self.classes = [c for c in self.classes if c != \"disabled\"]\n return True\n\n\nclass VerifyDomain(tables.LinkAction):\n name = \"verify\"\n verbose_name = _(\"Verify\")\n url = \"horizon:cdn:cdn_domain_manager:verify\"\n classes = (\"ajax-modal\",)\n icon = \"camera\"\n\n def allowed(self, request, domain=None):\n account_is_frozen = check_account_is_frozen(request)\n if account_is_frozen:\n if \"disabled\" not in self.classes:\n self.classes = [c for c in self.classes] + ['disabled']\n else:\n self.classes = [c for c in self.classes if c != \"disabled\"]\n return True\n\n\nclass UpdateRow(tables.Row):\n ajax = True\n\n def get_data(self, request, id):\n domain_api = middware.DomainManage()\n domain = Domain.objects.get(pk=id)\n cdn = domain_api.find(domainId=domain.domain_id)\n if not cdn.domain:\n return domain\n if domain.status != cdn.domain.status:\n domain.status = cdn.domain.status\n domain.Enable = cdn.domain.enabled\n domain.save()\n return domain\n\n\n# 加速域名数据展示表格\nclass DomainManagerTable(tables.DataTable):\n STATUS_DISPLAY_CHOICES_A = (\n (\"unverified\", _(\"Unverified\")),\n (\"failed\", _(\"Verify Failed\")),\n (\"addfailed\", _(\"Accelerate Failed\")),\n (\"Deployed\", _(\"Deployed\")),\n (\"inProgress\", _(\"inProgress\")),\n )\n\n STATUS_CHOICES = (\n ('unverified', True),\n ('failed', False),\n ('addfailed', False),\n ('Deployed', True),\n ('inProgress', None),\n )\n domain_name = tables.Column(\"domain_name\", verbose_name=_(\"Domain Name\"))\n source_type = tables.Column(\"source_type\", verbose_name=_(\"Origin Domain Type\"))\n domain_cname = tables.Column('domain_cname', verbose_name=_(\"CNAME\"))\n status = tables.Column('status', verbose_name=_(\"Status\"), status=True,\n display_choices=STATUS_DISPLAY_CHOICES_A,\n status_choices=STATUS_CHOICES)\n Enable = tables.Column('Enable', verbose_name=_(\"Enable\"))\n\n def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):\n super(DomainManagerTable, self).__init__(\n request,\n data=data,\n needs_form_wrapper=needs_form_wrapper,\n **kwargs)\n\n def get_object_display(self, obj):\n return obj.domain_name\n\n def render_row_actions(self, datum, pull_right=True, row=True):\n if row:\n template_path = self._meta.row_actions_row_template\n else:\n template_path = self._meta.row_actions_dropdown_template\n\n row_actions_template = template.loader.get_template(template_path)\n bound_actions = self.get_row_actions(datum)\n extra_context = {\"row_actions\": bound_actions,\n \"row_id\": self.get_object_id(datum),\n \"pull_right\": pull_right}\n context = template.RequestContext(self.request, extra_context)\n return row_actions_template.render(context)\n\n class Meta(object):\n name = \"DomainManager\"\n verbose_name = _(\"Domain List\")\n status_columns = [\"status\"]\n row_class = UpdateRow\n table_actions = (CreateDomain, DisableDomain,\n EnableDomain, DeleteDomain, MyFilterAction)\n row_actions = (ConfigDomain, VerifyDomain)\n\n\n# 访问控制表\nclass AddAccessRule(tables.LinkAction):\n name = \"AddAccessControl\"\n verbose_name = _(\"Add\")\n url = \"horizon:cdn:cdn_domain_manager:addaccess\"\n classes = (\"ajax-modal\",)\n icon = \"plus\"\n\n def get_link_url(self, datum=None):\n \"\"\"将url传参的domain_id发���到table的table_actions\"\"\"\n domain_id = self.table.kwargs['domain_id']\n return reverse(self.url, args=(domain_id,))\n\n\nclass ModifyAccessRule(tables.LinkAction):\n name = \"ModifyAccessRule\"\n verbose_name = _(\"Modify\")\n url = \"horizon:cdn:cdn_domain_manager:modifyaccess\"\n classes = (\"ajax-modal\",)\n icon = \"pencil\"\n\n def get_link_url(self, datum=None):\n \"\"\"将url传参的domain_id发送到table的table_actions\"\"\"\n domain_id = self.table.kwargs['domain_id']\n access_id = self.table.get_object_id(datum)\n return reverse(self.url, args=(domain_id, unicode(access_id)))\n\n\nclass DeleteAccessRule(tables.DeleteAction):\n name = 'Delete'\n\n @staticmethod\n def action_present(count):\n return ungettext_lazy(\n _(\"Delete\"),\n _(\"Delete\"),\n count\n )\n\n @staticmethod\n def action_past(count):\n return ungettext_lazy(\n _(\"Deleted\"),\n _(\"Deleted\"),\n count\n )\n\n def delete(self, request, obj_id):\n try:\n access = AccessControl.objects.get(pk=obj_id)\n domain = Domain.objects.get(pk=access.domain_id)\n domain_manager = middware.DomainManage()\n ret = domain_manager.find(domain.domain_id)\n domain_class = ret.getDomain()\n visitControlRules = domain_class.visitControlRules\n if visitControlRules is not None:\n for i in domain_class.visitControlRules:\n if access.pathPattern == i.pathPattern:\n visitControlRules.remove(i)\n domain_class = middware.domainApi.Domain(domainId=domain.domain_id,\n visitControlRules=domain_class.visitControlRules)\n domain_manager.modify(domain_class)\n access.delete()\n\n # 插入操作日志\n Logger(request).create(resource_type='CDN', action_name='Delete Access',\n resource_name='CDN', config=_('Domain: %s') %domain.domain_name,\n status='Success')\n\n except Exception:\n obj = self.table.get_object_by_id(obj_id)\n name = self.table.get_object_display(obj)\n msg = _('Unable to delete domain access rule %s') % name\n LOG.info(msg)\n messages.error(request, msg)\n exceptions.handle(request, msg)\n redirect = reverse(self.redirect_url)\n\n # 插入操作日志\n Logger(request).create(resource_type='CDN', action_name='Delete Access',\n resource_name='CDN', config=_('Unable to delete domain access rule %s') % name,\n status='Error')\n raise exceptions.Http302(redirect, message=msg)\n\n\n\n def allowed(self, request, domain=None):\n return True\n\n\nclass UpdateAccessControlTable(tables.DataTable):\n pathPattern = tables.Column(\"pathPattern\", verbose_name=_(\"Type\"))\n allowNullReffer = tables.Column(\"allowNullReffer\", verbose_name=_(\"Refer\"),\n help_text=_(\"If allow request referer to be empty\"))\n validRefers = tables.Column('validRefers', verbose_name=_(\"White List\"))\n invalidRefers = tables.Column(\"invalidRefers\", verbose_name=_(\"Black List\"))\n forbiddenIps = tables.Column(\"forbiddenIps\", verbose_name=_(\"Forbid IP\"))\n\n def __init__(self, request, data=None, **kwargs):\n super(UpdateAccessControlTable, self).__init__(\n request,\n data=data,\n **kwargs)\n\n def get_object_id(self, datum):\n return datum.id\n\n def get_object_display(self, datum):\n return datum.pathPattern\n\n class Meta(object):\n name = \"access\"\n verbose_name = _(\"Update Access Control\")\n table_actions = (AddAccessRule, DeleteAccessRule)\n row_actions = (ModifyAccessRule,)\n\n\n# 缓存规则表\nclass AddCacheRule(tables.LinkAction):\n name = \" EditCacheRule\"\n verbose_name = _(\"Add\")\n url = \"horizon:cdn:cdn_domain_manager:addcache\"\n classes = (\"ajax-modal\",)\n icon = \"plus\"\n\n def get_link_url(self, datum=None):\n \"\"\"将url传参的domain_id发送到table的table_actions\"\"\"\n domain_id = self.table.kwargs['domain_id']\n return reverse(self.url, args=(domain_id,))\n\n\n\n\nclass ModifyCacheRule(tables.LinkAction):\n name = \"ModifyCacheRule\"\n verbose_name = _(\"Modify\")\n url = \"horizon:cdn:cdn_domain_manager:modifycache\"\n classes = (\"ajax-modal\",)\n icon = \"pencil\"\n\n def get_link_url(self, datum=None):\n \"\"\"将url传参的domain_id发送到table的table_actions\"\"\"\n domain_id = self.table.kwargs['domain_id']\n cache_id = self.table.get_object_id(datum)\n return reverse(self.url, args=(domain_id, unicode(cache_id)))\n\n\nclass DeleteCacheRule(tables.DeleteAction):\n name = 'Delete'\n\n @staticmethod\n def action_present(count):\n return ungettext_lazy(\n _(\"Delete\"),\n _(\"Delete\"),\n count\n )\n\n @staticmethod\n def action_past(count):\n return ungettext_lazy(\n _(\"Deleted\"),\n _(\"Deleted\"),\n count\n )\n\n def delete(self, request, obj_id):\n try:\n cache = CacheRule.objects.get(pk=obj_id)\n domain = Domain.objects.get(pk=cache.domain_id)\n domain_manager = middware.DomainManage()\n ret = domain_manager.find(domain.domain_id)\n domain_class = ret.getDomain()\n cacheBehaviors = domain_class.cacheBehaviors\n if cacheBehaviors is not None:\n for i in cacheBehaviors:\n if cache.pathPattern == i.pathPattern:\n cacheBehaviors.remove(i)\n domain_class = middware.domainApi.Domain(domainId=domain.domain_id,\n cacheBehaviors=cacheBehaviors)\n domain_manager.modify(domain_class)\n cache.delete()\n\n # 插入操作日志\n Logger(request).create(resource_type='CDN', action_name='Delete Cache',\n resource_name='CDN', config=_('Domain: %s') %domain.domain_name,\n status='Success')\n\n except Exception:\n obj = self.table.get_object_by_id(obj_id)\n name = self.table.get_object_display(obj)\n msg = _('Unable to delete domain cache rule %s') % name\n LOG.info(msg)\n messages.error(request, msg)\n exceptions.handle(request, msg)\n redirect = reverse(self.redirect_url)\n\n # 插入操作日志\n Logger(request).create(resource_type='CDN', action_name='Delete Cache',\n resource_name='Domain', config= _('Unable to delete domain cache rule %s') % name,\n status='Error')\n\n raise exceptions.Http302(redirect, message=msg)\n\n def allowed(self, request, domain=None):\n return True\n\n\nclass UpdateCacheRuleTable(tables.DataTable):\n pathPattern = tables.Column(\"pathPattern\", verbose_name=_(\"Type\"))\n ignoreCacheControl = tables.Column('ignoreCacheControl', verbose_name=_(\"Ignore\"),\n help_text=_(\"If set the cache-control in the HTTP header\"))\n cacheTtl = tables.Column('cacheTtl', verbose_name=_(\"Time\"))\n\n def __init__(self, request, data=None, **kwargs):\n super(UpdateCacheRuleTable, self).__init__(\n request,\n data=data,\n **kwargs)\n\n def get_object_id(self, datum):\n return datum.id\n\n def get_object_display(self, datum):\n return datum.pathPattern\n\n class Meta(object):\n name = \"cache\"\n verbose_name = _(\"Update Cache Rule\")\n table_actions = (AddCacheRule, DeleteCacheRule)\n row_actions = (ModifyCacheRule,)\n\n\n","sub_path":"horizon/openstack_dashboard/dashboards/cdn/cdn_domain_manager/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":25894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"254380352","text":"import FWCore.ParameterSet.Config as cms\n\n### genjet cleaning for improved matching in HI environment\n\nfrom RecoHI.HiJetAlgos.HiGenCleaner_cff import *\n\niterativeCone5HiCleanedGenJets = heavyIonCleanedGenJets.clone( src = cms.InputTag('iterativeCone5HiGenJets'))\niterativeCone7HiCleanedGenJets = heavyIonCleanedGenJets.clone( src = cms.InputTag('iterativeCone7HiGenJets'))\nak3HiCleanedGenJets = heavyIonCleanedGenJets.clone( src = cms.InputTag('ak3HiGenJets'))\nak5HiCleanedGenJets = heavyIonCleanedGenJets.clone( src = cms.InputTag('ak5HiGenJets'))\nak7HiCleanedGenJets = heavyIonCleanedGenJets.clone( src = cms.InputTag('ak7HiGenJets'))\n\n### jet analyzer for two radii (0.5, 0.7) and three algorithms:\n### iterative cone with PU, anti-kt with PU, anti-kt with fastjet PU\n\nJetAnalyzerICPU5Calo = cms.EDAnalyzer(\"CaloJetTester\",\n src = cms.InputTag(\"iterativeConePu5CaloJets\"),\n srcGen = cms.InputTag(\"iterativeCone5HiCleanedGenJets\"),\n genEnergyFractionThreshold = cms.double(0.05),\n genPtThreshold = cms.double(1.0),\n RThreshold = cms.double(0.3),\n reverseEnergyFractionThreshold = cms.double(0.5) \n)\n\nJetAnalyzerICPU7Calo = cms.EDAnalyzer(\"CaloJetTester\",\n src = cms.InputTag(\"iterativeConePu7CaloJets\"),\n srcGen = cms.InputTag(\"iterativeCone7HiCleanedGenJets\"), \n genEnergyFractionThreshold = cms.double(0.05),\n genPtThreshold = cms.double(1.0),\n RThreshold = cms.double(0.3),\n reverseEnergyFractionThreshold = cms.double(0.5) \n)\n\nJetAnalyzerAkPU5Calo = cms.EDAnalyzer(\"CaloJetTester\",\n src = cms.InputTag(\"akPu5CaloJets\"),\n srcGen = cms.InputTag(\"ak5HiCleanedGenJets\"), \n genEnergyFractionThreshold = cms.double(0.05),\n genPtThreshold = cms.double(1.0),\n RThreshold = cms.double(0.3),\n reverseEnergyFractionThreshold = cms.double(0.5) \n)\n\nJetAnalyzerAkPU3Calo = cms.EDAnalyzer(\"CaloJetTester\",\n src = cms.InputTag(\"akPu3CaloJets\"),\n srcGen = cms.InputTag(\"ak3HiCleanedGenJets\"), \n genEnergyFractionThreshold = cms.double(0.05),\n genPtThreshold = cms.double(1.0),\n RThreshold = cms.double(0.3),\n reverseEnergyFractionThreshold = cms.double(0.5)\n)\n\nJetAnalyzerAkPU5PF = cms.EDAnalyzer(\"CaloJetTester\",\n src = cms.InputTag(\"akPu5PFJets\"),\n srcGen = cms.InputTag(\"ak5HiCleanedGenJets\"),\n genEnergyFractionThreshold = cms.double(0.05),\n genPtThreshold = cms.double(1.0),\n RThreshold = cms.double(0.3),\n reverseEnergyFractionThreshold = cms.double(0.5)\n )\n\nJetAnalyzerAkPU3PF = cms.EDAnalyzer(\"CaloJetTester\",\n src = cms.InputTag(\"akPu3PFJets\"),\n srcGen = cms.InputTag(\"ak3HiCleanedGenJets\"),\n genEnergyFractionThreshold = cms.double(0.05),\n genPtThreshold = cms.double(1.0),\n RThreshold = cms.double(0.3),\n reverseEnergyFractionThreshold = cms.double(0.5)\n )\n\n\nhiJetValidation = cms.Sequence(\n ak3HiCleanedGenJets * ak5HiCleanedGenJets\n * JetAnalyzerAkPU5Calo\n * JetAnalyzerAkPU3PF * JetAnalyzerAkPU5PF \n )\n","sub_path":"2013/cmssw5312p3_relval/Validation/RecoHI/python/JetValidationHeavyIons_cff.py","file_name":"JetValidationHeavyIons_cff.py","file_ext":"py","file_size_in_byte":3495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"310828636","text":"from neo4j import GraphDatabase\nimport time\nimport xlsxwriter\n\nuri = \"bolt://localhost:7687\"\nuser = \"neo4j\"\npsw = \"giuseppe\"\n\ndriver = GraphDatabase.driver(uri, auth=(user, psw))\nsession = driver.session()\n\n\n\ndef tempo(): return int(round(time.time() * 1000))\n\ndef query_1(name):\n query_1 = \"\"\"MATCH (a:USER) WHERE a.name = $name \n RETURN count(a.name) AS conta\"\"\"\n risultati = session.run(query_1, name = name)\n dati = [dato[\"conta\"] for dato in risultati]\n return dati\n\ndef query_2(name, cf):\n query_2 = \"\"\"MATCH (a:USER)-[r:is_RECALL]->(c:CLAIM)\n WHERE a.name = $name OR \n a.cf = $cf \n\n RETURN r AS tipo_relazione, c AS richiamo, a AS utente\n ORDER BY a.name\"\"\"\n risultati = session.run(query_2, name = name, cf = cf)\n dati = [dato[\"utente\"] for dato in risultati]\n\n return dati\n\ndef query_3(name, cf, email):\n query_3= \"\"\" MATCH (a:USER)-[r1:is_RECALL]->(c:CLAIM),\n (b:EVAL)-[r2:is_INVOLVED_EVAL]->(c:CLAIM)<-[r3:is_INVOLVED_LAWYER]-(d:LAWYER)\n WHERE a.name = $name AND\n a.cf = $cf AND\n a.email = $email \n RETURN a AS utente, r1, c AS richiamo, r2, r3, b AS perito, d AS avvocato\n \"\"\"\n\n risultati = session.run(query_3, name = name, cf = cf, email = email)\n dati = [dato[\"utente\"] for dato in risultati]\n\n return dati\n\ndef query_4(name, cf, email, cell):\n query_4 = \"\"\"MATCH (a:USER)-[r1:is_RECALL]->(c:CLAIM),\n (b:EVAL)-[r4:is_INVOLVED_EVAL]->(c:CLAIM)<-[r5:is_INVOLVED_LAWYER]-(d:LAWYER),\n (b:EVAL)-[r6:work_FOR_EVAL]->(e:COMPANY)<-[r7:work_FOR_LAWYER]-(d:LAWYER) \n WHERE a.name = $name AND\n a.cf = $cf AND\n a.email = $email AND \n a.cell = $cell \n\n RETURN a AS utente, b as perito, c AS richiamo, d as avvocato, e AS Compagnia, r1\n \n \"\"\"\n\n\n risultati = session.run(query_4, name = name, cf = cf, email = email,cell = cell )\n dati = [dato[\"utente\"] for dato in risultati]\n\n return dati\n\ndef query_5(name, cf, email, cell, address):\n query_5 = \"\"\"MATCH (a:USER)-[r1:is_RECALL]->(c:CLAIM),\n (b:EVAL)-[r4:is_INVOLVED_EVAL]->(c:CLAIM)<-[r5:is_INVOLVED_LAWYER]-(d:LAWYER),\n (b:EVAL)-[r6:work_FOR_EVAL]->(e:COMPANY)<-[r7:work_FOR_LAWYER]-(d:LAWYER)\n \n WHERE a.name = $name AND\n a.cf = $cf AND\n a.email = $email AND\n a.cell = $cell AND\n a.address STARTS WITH $address \n \n\n RETURN a AS utente, b AS Perito,c AS richiamo, d AS avvocato, e AS Compagnia, r1, r4, r5, r6, r7\n ORDER BY a.cf\n \"\"\"\n\n risultati = session.run(query_5, name= name, cf = cf, email = email, cell = cell, address = address)\n dati = [dato[\"utente\"] for dato in risultati]\n return dati\n\nworkbook = xlsxwriter.Workbook('RisultatiDati10000Neo4j.xlsx')\nworksheet = workbook.add_worksheet()\nrow = 1\ncol = 0\n\nfor y in range(31):\n a1 = tempo()\n #query_1(\"Costantino Spinola\")\n #query_2(\"Costantino Spinola\", \"EJXDYF51Z17L243B\")\n #query_3(\"Costantino Spinola\", \"EJXDYF51Z17L243B\", \"gpacomio@tim.it\")\n #query_4(\"Costantino Spinola\", \"EJXDYF51Z17L243B\", \"gpacomio@tim.it\", \"+39 8188 04739575\")\n query_5(\"Costantino Spinola\", \"EJXDYF51Z17L243B\", \"gpacomio@tim.it\", \"+39 8188 04739575\", \"Vicolo\")\n a2 = tempo()\n print(\"abbiamo ottenuto il (\", y + 1, \"°) risultato in\", a2 - a1, \"millisecondi\\n\")\n\n worksheet.write(row, col, a2 - a1)\n row += 1\n\nworksheet.write(row, col, a2 - a1)\nworkbook.close()\nsession.close()\ndriver.close()","sub_path":"Prova/4_Centomila_record/query_neo4j_100000.py","file_name":"query_neo4j_100000.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"217646612","text":"#!/usr/bin/env python3\n#Uses alot of cpu - 100% of 3/8 cores\nimport LoginUI, Utilities, Serial, Bioreactor, Updater\nimport c_ui_v2 as C_UI\nfrom Constants import *\nfrom serial.serialutil import SerialException as SerialError\nimport tkinter as tk\n\nclass UI_Manager:\n def __init__(self, parent):\n self.parent, self.connected, self.logged_in = parent, False, False\n self.loaded, self.graph_colour, self.serial = False, None, None\n self.vals, self.user = {'pH': [], 'temperature': [], 'speed': []}, 'GUEST'\n self.current_graph, self.updater_win, self.modify_ui = '', None, None\n\n def open_login(self):\n self.logged_in = False\n self.user = 'GUEST'\n self.close_all()\n self.login=LoginUI.LoginMenu(self.parent,self,SCREENWIDTH,SCREENHEIGHT,\n bg=GREY, highlightthickness=0)\n self.login.grid(row=0, column=0, sticky='nesw')\n \n def check_login(self, event=None):\n username = self.login.user_entry.get()\n password = self.login.pass_entry.get()\n if username == USERNAME and password == PASSWORD:\n self.open_dashboard('ADMIN')\n else:\n msg = 'Your login details were incorrect, please try again'\n tk.messagebox.showwarning('Login unsuccessful', msg)\n self.open_login()\n \n def open_dashboard(self, user='GUEST'):\n self.close_all()\n self.logged_in = True\n self.user = user\n self.dashboard=C_UI.BioreactorUI(self.parent,self,user,SCREENWIDTH,\n SCREENHEIGHT, bg=GREY,\n highlightthickness=0)\n self.dashboard.grid(row=0, column=0, sticky='nesw')\n self.parent.update()\n self.run_dashboard()\n\n def config_updater(self):\n self.updater_win = tk.Toplevel(win)\n self.updater_win.geometry('{}x{}'.format(int(0.5*SCREENWIDTH), int(0.5*SCREENHEIGHT)))\n self.updater_win.title('Alter pH, Temperature and Stirring Speed of bioreactor')\n self.updater_win.columnconfigure(0, weight=1)\n self.updater_win.rowconfigure(0, weight=1)\n self.updater_win.attributes('-topmost',True)\n self.updater_win.attributes('-topmost',False)\n Utilities.centralise(self.updater_win)\n\n def open_updater(self):\n self.config_updater()\n self.modify_ui = Updater.UpdaterUI(self.updater_win, reactor,\n width=0.5*SCREENWIDTH,\n height=0.5*SCREENHEIGHT)\n self.modify_ui.grid(row=0, column=0, sticky='nesw')\n self.updater_win.update()\n self.parent.update()\n\n def run_dashboard(self):\n self.dashboard.graph_display.default_screen()\n self.serial_connect()\n if self.connected: self.serial_begin()\n \n def serial_begin(self):\n if self.user == USERNAME:\n self.dashboard.menu_bar.modify_btn.grid(row=1, column=0, sticky='nesw',pady=(0,20),padx=20)\n reactor.serial_port = self.serial\n self.graph_setup()\n self.read_from_serial_port()\n\n def end_connection(self):\n self.connected = False\n if self.dashboard.graph_display.anim.event_source:\n self.dashboard.graph_display.anim.event_source.stop()\n self.close_serial_port()\n self.open_login()\n self.parent.update()\n\n def get_serial_port(self):\n ports = Serial.list_available_ports()\n if len(ports) == 0: raise SerialError\n return ports[1] #after trial and error, the correct port is always 2nd\n #element in list of ports\n \n def serial_connect(self, depth=0):\n try:\n depth += 1\n port = self.get_serial_port()\n self.serial = Serial.SerialPort(port=port, baud_rate=BAUD_RATE)\n self.serial.open_port()\n self.connected = True\n if depth > 1: #if we couldn't connect on the first attempt\n self.serial_begin()\n except (OSError, SerialError):\n Utilities.error_msg('No connection established',\n 'Board is not connected! Cannot read serial data from it.')\n self.parent.update_idletasks()\n if self.logged_in: #wait 1s before checking for connection again\n self.parent.after(RETRY_DELAY, lambda depth=depth:\n self.serial_connect(depth)) \n\n def graph_setup(self):\n self.dashboard.graph_display.setup_graph()\n self.change_graph('Stirring Speed', 'Stirring Speed (RPM)', RED,\n self.dashboard.menu_options.spd_canv, 'speed')\n self.dashboard.graph_display.run_animation()\n \n def read_from_serial_port(self):\n if self.serial is None or not self.connected:\n self.end_connection()\n return\n subsystem, value, elapsed_time = self.serial.read_value()\n #fyi 0 == false\n if len([x for x in (subsystem, value, elapsed_time) if x == -1]) > 0:\n self.end_connection()\n return\n if (len([x for x in (subsystem, value, elapsed_time) if x is None]) == 0\n and subsystem in self.vals.keys()):\n self.vals[subsystem] = Utilities.truncate(self.vals[subsystem])\n self.vals[subsystem].append((elapsed_time, value))\n if self.connected:\n self.update_data()\n self.parent.after(SERIAL_DELAY, self.read_from_serial_port)\n \n def toggle_btns(self, active):\n btns=[self.dashboard.menu_options.ph_canv,\n self.dashboard.menu_options.temp_canv,\n self.dashboard.menu_options.spd_canv]\n for btn in btns:\n if btn == active:\n btn.config(highlightbackground='black', highlightthickness=3)\n else: btn.config(highlightbackground=MENU, highlightthickness=0)\n\n def update_data(self):\n if self.serial is not None and self.serial.running:\n if len(self.vals['pH']) > 0:\n reactor.ph = self.vals['pH'][-1][1]\n if len(self.vals['temperature']) > 0:\n reactor.temperature = self.vals['temperature'][-1][1]\n if len(self.vals['speed']) > 0:\n reactor.speed = self.vals['speed'][-1][1]\n self.dashboard.graph_display.xs = [x[0] for x in self.vals[self.current_graph]]\n self.dashboard.graph_display.ys = [y[1] for y in self.vals[self.current_graph]]\n self.dashboard.menu_options.update_lbls()\n self.dashboard.menu_bar.update_status()\n \n \n def change_graph(self, title, y_axis, colour, btn, graph, event=None):\n self.toggle_btns(btn)\n self.current_graph = graph\n self.dashboard.graph_display.graph_title = title\n self.update_data()\n self.dashboard.graph_display.y_axis = y_axis\n self.dashboard.graph_display.graph_colour = colour\n self.dashboard.graph_display.update_graph_axes()\n\n def close_serial_port(self): #fix\n pass\n## if self.serial is not None and self.serial.running:\n## self.serial.running = False\n## self.serial.close()\n## self.serial = None\n \n def shutdown(self):\n if tk.messagebox.askyesno('Confirm shutdown', 'Shutdown bioreactor?'):\n self.close_serial_port()\n self.open_login()\n \n def close_all(self):\n self.close_serial_port()\n self.__init__(self.parent)\n for widget in self.parent.winfo_children():\n widget.destroy()\n\ndef close_ui(win, event=None):\n try:\n if tk.messagebox.askokcancel('Quit?','Do you really want to close the UI?'):\n win.quit() #stop mainloop before destroying window\n win.destroy()\n except AttributeError:\n win.quit()\n win.destroy()\n\ndef init_win(): \n win = tk.Tk(className='bioreactor UI')\n win.title('Bioreactor UI')\n win.wm_iconbitmap(bitmap = \"@icon.XBM\")\n win.geometry('{}x{}'.format(SCREENWIDTH, SCREENHEIGHT))\n win.resizable(False, False)\n win.bind('', lambda event, win=win: close_ui(win, event))\n win.protocol(\"WM_DELETE_WINDOW\", lambda win=win: close_ui(win))\n Utilities.centralise(win)\n return win\n\nif __name__ == '__main__':\n reactor = Bioreactor.Bioreactor()\n win = init_win()\n UI = UI_Manager(win)\n UI.open_login()\n win.mainloop()\n","sub_path":"UI/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":8469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"89322941","text":"from torch.utils import data\nimport numpy as np\n\n\nclass Dataset(data.Dataset):\n def __init__(self, relations, labels, questions_lookup, answers_lookup):\n 'Initialization'\n self.relations = relations\n self.labels = labels\n self.questions_lookup = questions_lookup\n self.answers_lookup = answers_lookup\n\n def __len__(self):\n 'Denotes the total number of samples'\n return len(self.relations)\n\n def __getitem__(self, index):\n 'Generates one sample of data'\n # Select sample\n ID = self.relations[index]\n\n # Load data and get label\n ques = self.questions_lookup[ID[0]]\n ans = self.answers_lookup[ID[1]]\n y = self.labels[ID]\n\n # convert to numpy array\n ques = np.array(ques)\n ans = np.array(ans)\n y = np.array(y).astype(np.float32)\n\n return ques, ans, y\n","sub_path":"utils/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"196142383","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 2 05:31:07 2019\n\n@author: User\n\"\"\"\n\nfrom flask import Flask, jsonify, request, abort\napp = Flask(__name__)\nimport landdmark\n\n@app.route('/todo/tasks', methods=['POST'])\ndef get_base():\n if not request.json or not 'code' in request.json:\n abort(400)\n code = request.json['code']\n val = landdmark.getns(code)\n return jsonify({'Nasal Index':str(val[0]),\n 'Eye Index' : str(val[1])\n }),201\nif __name__ == '__main__':\n app.run(debug=False, port=4996)","sub_path":"public/images/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"483317224","text":"'''\r\nCreated on Apr 8, 2019\r\n\r\n\r\nHere things are normalized by QcBio: biomass C\r\n@author: keiin\r\n'''\r\n\r\nfrom pylab import *\r\nfrom FigSetting2 import *\r\nfrom Savefig3 import *\r\nfrom Savetxt import *\r\nrcParams['axes.prop_cycle'] = cycler('color',['b', 'r', 'g'])\r\nrcParams.update({'font.size': 15})\r\nrcParams.update({'lines.linewidth': 2.5})\r\nrcParams.update({'figure.autolayout': True})\r\nrcParams['figure.figsize']=5,4\r\nrcParams.update({'axes.linewidth':1.2}) \r\nrcParams.update({'xtick.major.width':1.2}) #\r\nrcParams.update({'ytick.major.width':1.2}) #\r\nrcParams.update({'lines.markeredgewidth': 1}) #default = 1\r\nrcParams.update({'ytick.right': 'True'})\r\nrcParams.update({'ytick.direction': 'in'})\r\nrcParams.update({'xtick.top': 'True'})\r\nrcParams.update({'xtick.direction': 'in'})\r\nrcParams['xtick.major.pad']='8'\r\n\r\n#=====================================\r\n# Defining parameters\r\n#=====================================\r\n\r\nCO2air = 1e6 #(uM) DIC concentration when equilibrium with air\r\n\r\nEno3 = 0.774 #From a800 05 12 07 Phytoplankton model \r\nEnstore = 0.383 #From 317 12 05 with epNH4=0.6 and C5H7O2N1\r\nE = Eno3\r\n\r\n#----------------------\r\n# Storage\r\n#----------------------\r\npCstoreMax = 8 #(d-1)\r\nCstoreMax = 0.65 #(molC molC-1)\r\n\r\n#:::::::::::::::::::::::::::::::::::::::::::::::\r\n# Key parameters; These are really keys\r\n#:::::::::::::::::::::::::::::::::::::::::::::::\r\nPhoMax = 4.5 #(d-1) Maximum photosynthetsis rate\r\nKco2 = 250 #(uM)\r\n\r\nHco2 = 1.65 #(d-1) gas exchange constant \r\nCvco2 = 2.9e3 #(dimensionless) factor for adjusting CO2 consumption\r\n\r\nCstoreloss = 0.6 #(d-1) Cstoreloss due to its conversion to N containing molecules (biomass)\r\n\r\n#============================\r\n# \r\n#============================\r\ndts = 10 #(s) dt in second\r\ndt = dts/86400 #(d) dt in day\r\nt = arange(0,86400/24*24+dts,dts)/86400 #(d) time\r\nth = t*24 #(h) time\r\nU = arange(size(t))\r\nNightTime = 3600*14/86400\r\n#============================\r\n\r\nQcBio = 1\r\n\r\n#===============================\r\n# Preparing for arrays \r\n#===============================\r\ndef upd1(f,df): #upd = update\r\n f[i+1] = f[i] + df*dt \r\n return f\r\n\r\ndef o(): \r\n return zeros((size(t)))\r\n\r\nPho = o()\r\nCO2 = o()\r\nCstore = o()\r\nCstore2 = o()\r\nQc = o()\r\nX = o() #(uM C) proxy of population (assuming constant cell C of QcBio).\r\nMu = o()\r\nMuC = o()\r\npCstore = o()\r\n\r\n#=====================\r\n# Initial conditions\r\n#=====================\r\nCO2[0] = 812.06 #(uM)\r\nCO2 = CO2air*ones(size(o()))\r\nCstore[0] = 0.38 #(molC molC-1)\r\nCstore2[0] = Cstore[0]\r\nX[0] = 1\r\n\r\n#=====================\r\n\r\nfor i in U[:-1]:\r\n if t[i] < NightTime:\r\n Pho[i] = PhoMax*CO2[i]/(CO2[i]+Kco2)\r\n pCstore[i] = pCstoreMax*(CstoreMax - Cstore[i])/CstoreMax\r\n Mu[i] = (Pho[i] - pCstore[i]*(1+E))/(1+(1-Cstore[i])*E)\r\n \r\n if Pho[i] <= 0:\r\n Pho[i] = 0\r\n pCstore[i] = 0\r\n Mu[i] = 0\r\n \r\n if Pho[i] < pCstore[i]*(1+E):\r\n pCstore[i] = Pho[i]/(1+E)\r\n Mu[i] = 0\r\n \r\n Qc[i] = QcBio + Cstore[i]\r\n \r\n if t[i]=NightTime] = nan\r\nCO2[t>=NightTime] = nan\r\n\r\n#C:N ======================\r\n#Average = 5.137\r\nCN = 5.137*ones(size(Qc)) #C:N based on the data average\r\nYcn = CN/Qc #(mol mol-1) C:N of biomass\r\nMuC0 = copy(MuC); MuC0[t>=NightTime] = 0\r\nVn = MuC0/CN #(molN molC-1 d-1) N uptake rate\r\nCNdata = genfromtxt('..\\\\Data\\\\CNdataNO3.csv',delimiter=',').T\r\n\r\nVn[t>=NightTime] = nan\r\nYcn[t>=NightTime] = nan\r\nCN[t>=NightTime ] = nan\r\nPho[t>=NightTime] = nan\r\n#OOOOOOOOOOOOOOOOOOOOOOOOOOOO\r\n# Data mpreparation\r\n#OOOOOOOOOOOOOOOOOOOOOOOOOOOO\r\n\r\na = genfromtxt('C:\\\\Users\\\\keiin\\\\Google Drive\\\\Croco collaboration\\\\\\\r\n04 GAP\\\\02 Excel files\\\\Data\\\\DataNO3.csv',delimiter=',').T\r\n\r\nOD = genfromtxt('C:\\\\Users\\\\keiin\\\\Google Drive\\\\Croco collaboration\\\\\\\r\n04 GAP\\\\02 Excel files\\\\Data\\\\ODdataNO3.csv',delimiter=',').T\r\n\r\nQcData = a[24,2] #(fg cell-1)\r\n\r\n#OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO\r\n# Plot\r\n#OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO\r\n\r\nXlabel = '$\\mathit{t}$ (h)'\r\n\r\ndef sup(loc,Title,Ylabel):\r\n subplot(2,2,loc)\r\n title(Title,y=1.02)\r\n xlabel(Xlabel)\r\n ylabel(Ylabel)\r\n\r\ndef sup1(loc,Title,Ylabel):\r\n figure(loc)\r\n # title(Title,y=1.02)\r\n xlabel(Xlabel)\r\n ylabel(Ylabel)\r\n\r\ndef ebar(x,y,e):\r\n errorbar(x,y,(e,e),fmt='o',color='red',\\\r\n elinewidth=1,markeredgecolor='k',ecolor='k',capthick=1,capsize=5)\r\n\r\ndef ebar2(x,y,e,l):\r\n errorbar(x,y,(e,e),fmt='o',color='red',\\\r\n elinewidth=1,markeredgecolor='k',ecolor='k',capthick=1,capsize=5,label=str(l))\r\n\r\ndef Xlim():\r\n xlim(left=-0.3,right=14.3)\r\n xticks(arange(0,14.01,2))\r\n\r\ndef Xlim2():\r\n xlim(left=-0.3,right=24.3)\r\n xticks(arange(0,24.1,4))\r\n\r\ndef Xlim3():\r\n xlim(left=0,right=14)\r\n xticks(arange(0,14.01,2))\r\n\r\ndef sf(name):\r\n Savefig3('02\\\\02 GAP1\\\\04 paper\\\\NO3',name,300)\r\n\r\n\r\n# #figure(1,figsize=(12,9))\r\n# \r\nsup1(1,'$\\mathit{C_{Sto}}$','$\\mathit{C_{Sto}}$ (mol C mol C$^{-1}$)')\r\nplot(th,Cstore)\r\nplot(th,Cstore2,'b--')\r\nebar(a[12],a[13]/QcData,a[14]/QcData)\r\nXlim2()\r\nylim(0.,1.1)\r\nsf('Cstore')\r\n \r\nsup1(2,'$\\mathit{DIC}$','[$\\mathit{DIC}]$ ($\\mu$M C)')\r\nplot(th, CO2)\r\nebar(a[4],a[5],a[6])\r\nylim()\r\nXlim()\r\nylim(-100,1000)\r\nsf('CO2')\r\n \r\nsup1(3,'$\\mathit{XQ_C}$','[$\\mathit{C_{Cell}}$]')\r\nplot(th,X*Qc/(X[0]*(Cstore[0]+1)))\r\nplot(OD[0],OD[1])\r\nXlim2()\r\nylim(0.97,1.5)\r\nsf('OD')\r\n \r\nsup1(4,'$\\mathit{\\mu_{C}}$','$\\mathit{\\mu_{C}}$ (d$^{-1}$)')\r\nplot(th,MuC)\r\nebar(a[0],a[1]*24,a[2]*24)\r\nylim(-0.2,3.0)\r\nXlim()\r\nsf('MuC')\r\n \r\nrcParams.update({'xtick.top': 'False'})\r\nrcParams.update({'ytick.direction': 'in'})\r\nrcParams.update({'xtick.direction': 'out'})\r\nsup1(5,'','Fate of C (d$^{-1}$)')\r\nstackplot(th,Mu*1,pCstore,(pCstore-Mu*Cstore)*E+Mu*E)\r\nXlim3()\r\nylim(0,5)\r\n\r\nNames = [\"Biomass\",\"C store\",\"Biosyn. cost\"]\r\nColors = ['blue','red','green']\r\nstackplot([],[],[],[],colors=Colors[::-1],labels=Names[::-1])\r\nlegend(loc=4,edgecolor = 'k')\r\nsf('C_fate')\r\n# \r\n# figure(6)\r\n# plot(th,pCstore)\r\n# \r\n# figure(7)\r\n# plot(th,Cstore)\r\n\r\nsup1(8,'','C:N (mol mol$^{-1}$)')\r\nplot(th,CN,label='Model')\r\nebar2(CNdata[0],CNdata[1],CNdata[2],\"Data\")\r\n#plot(CNdata[0],CNdata[1],'o',label='Data')\r\n#plot(th,Ycn,label='Bio')\r\nlegend(loc=4,edgecolor='k')\r\nylim(0,8)\r\nsf('CN')\r\n\r\nsup1(9,'','V$_{N}$ (molN molC$^{-1}$ d$^{-1}$)')\r\nplot(th,Vn*(QcBio + Cstore))\r\nylim(bottom=0)\r\nprint(Vn)\r\nsf('Vn')\r\n\r\nsup1(10,'','e$_{Cfix}$ : e$_{NO3}$ electron ratio')\r\n#plot(th,CN*4/8)\r\nplot(th,MuC0*4/(Vn*8)) \r\nprint(MuC0*4/(Vn*8)) \r\nylim(0,5)\r\nsf('CNelectron')\r\n\r\nsup1(11,'','Cell N (molN molC$^{-1}$)')\r\n#plot(th,CN,label='Model')\r\nplot(th,1/Ycn)#,label='Bio')\r\n#legend(loc=4,edgecolor='k')\r\nylim(0,0.4)\r\nsf('N_Cell')\r\n\r\n#ylim(0,8)\r\n\r\n#Savefig3('02\\\\02 GAP1\\\\02 testing','All',300)\r\n#Savetxt(Mu,'02\\\\02 GAP1\\\\02 testing','DIC')\r\n\r\nshow()\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"G003_03_15_keep_going.py","file_name":"G003_03_15_keep_going.py","file_ext":"py","file_size_in_byte":7749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"279656359","text":"#conform to mongodb standards\ndef id_correcter(item):\n if 'uid' in item:\n item['_id'] = item['uid']\n del item['uid']\n else:\n try:\n item['_id'] = item['id']\n del item['id']\n except KeyError as e:\n print(item)\n return item","sub_path":"utils/id_correcter.py","file_name":"id_correcter.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"475774802","text":"import sqlite3 as db\nfrom datetime import datetime\n\n#Here we made a connection to database\nconn = db.connect('spent.db')\n\n#Here we use \"c\" as a cursor for database\nc = conn.cursor()\n\ndef init():\n ''' initialize a new table to store data'''\n c.execute('''CREATE TABLE IF NOT EXISTS expenses(\n price INTEGER,\n event TEXT COLLATE NOCASE,\n message TEXT,\n date TEXT\n )''')\n conn.commit()\n conn.close()\n\n\ndef add(price, event, message = ''):\n # add new item to data base\n now_time = str(datetime.now().strftime('%Y - %m - %d | %H : %M'))\n c.execute('INSERT INTO expenses VALUES (:price, :event, :message, :date)',{'price' : price , 'event' : event , 'message' : message , 'date' : now_time})\n conn.commit()\n conn.close()\n \n\ndef show(event = None):\n #Show all data in data base or with an event \n if event:\n c.execute('SELECT * FROM expenses WHERE event = (:event)',{'event' : event})\n result = c.fetchall()\n c.execute('SELECT sum(price) FROM expenses WHERE event = :event',{'event':event })\n total_price = c.fetchone()[0]\n else:\n c.execute('SELECT * FROM expenses')\n result = c.fetchall()\n c.execute('SELECT sum(price) FROM expenses')\n total_price = c.fetchone()[0]\n\n return total_price, result\n conn.close()\n\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"170277036","text":"import tcod\nimport tcod.event\nimport globs\nfrom pydispatch import Dispatcher\n\nclass MainControlsEmitter(Dispatcher):\n _events_ = [\"interaction\", \"inventory_open\", \"in_game_menu\", \"pickup_item\", \"simple_crafting_menu\"]\n\nclass MainControls(object):\n def __init__(self, player, renderer, level):\n #mapobj and renderer is needed because it controls the scrolling and camera\n self.player = player\n self.r = renderer\n self.level = level\n self.emitter = MainControlsEmitter()\n\n def handlekeys(self):\n for event in tcod.event.get():\n if event.type == \"QUIT\":\n raise SystemExit()\n elif event.type == \"KEYDOWN\":\n # print(\"playerx:\",self.player.x,\"playery:\",self.player.y)\n if event.scancode == tcod.event.SCANCODE_DOWN:\n if self.player.y+2 < self.level.mapobj.h:\n if self.level.check_walkable(self.player.x, self.player.y+1):\n self.player.y += 1\n elif event.scancode == tcod.event.SCANCODE_UP:\n if self.player.y > 0:\n if self.level.check_walkable(self.player.x, self.player.y-1):\n self.player.y -= 1\n elif event.scancode == tcod.event.SCANCODE_LEFT:\n if self.player.x > 0:\n if self.level.check_walkable(self.player.x-1, self.player.y):\n self.player.x -= 1\n elif event.scancode == tcod.event.SCANCODE_RIGHT:\n if self.player.x+2 < self.level.mapobj.w:\n if self.level.check_walkable(self.player.x+1, self.player.y):\n self.player.x += 1\n elif event.scancode == tcod.event.SCANCODE_E:\n self.emitter.emit(\"interaction\")\n elif event.scancode == tcod.event.SCANCODE_I:\n self.emitter.emit(\"inventory_open\")\n elif event.scancode == tcod.event.SCANCODE_ESCAPE:\n self.emitter.emit(\"in_game_menu\")\n elif event.scancode == tcod.event.SCANCODE_U:\n globs.gEventHandler.emit(\"use_tool\")\n elif event.scancode == tcod.event.SCANCODE_C:\n self.emitter.emit(\"simple_crafting_menu\")\n elif event.scancode == tcod.event.SCANCODE_G:\n obj = self.level.check_item(self.player.x, self.player.y) or self.level.check_plant(self.player.x, self.player.y)\n if obj:\n self.emitter.emit(\"pickup_item\", obj)\n obj = self.level.check_item(self.player.x, self.player.y) or self.level.check_plant(self.player.x, self.player.y)\n if obj:\n globs.gEventHandler.emit(\"update_help_text\", \"Press 'g' to pick up \" + obj.name)\n \n\n\nclass InteractionControlsEmitter(Dispatcher):\n _events_ = [\"interaction_direction\"]\n\nclass InteractionControls(object):\n def __init__(self, player, level):\n self.level = level\n self.player = player\n self.emitter = InteractionControlsEmitter()\n\n def handlekeys(self):\n globs.gEventHandler.emit(\"update_help_text\", \"Press a direction key to interact\")\n for event in tcod.event.wait():\n if event.type == \"QUIT\":\n raise SystemExit()\n elif event.type == \"KEYDOWN\":\n globs.gEventHandler.emit(\"update_help_text\",\"\")\n interactible, obj = (None, None)\n if event.scancode == tcod.event.SCANCODE_RIGHT:\n interactible, obj = self.level.check_interactible(self.player.x+1, self.player.y)\n elif event.scancode == tcod.event.SCANCODE_LEFT:\n interactible, obj = self.level.check_interactible(self.player.x-1, self.player.y)\n elif event.scancode == tcod.event.SCANCODE_UP:\n interactible, obj = self.level.check_interactible(self.player.x, self.player.y-1)\n elif event.scancode == tcod.event.SCANCODE_DOWN:\n interactible, obj = self.level.check_interactible(self.player.x, self.player.y+1)\n self.emitter.emit(\"interaction_direction\", obj=obj)\n\nclass ToolControlsEmitter(Dispatcher):\n _events_ = [\"direction\"]\n\nclass ToolControls(object):\n def __init__(self, player, level):\n self.level = level\n self.player = player\n self.emitter = ToolControlsEmitter()\n\n def handlekeys(self):\n globs.gEventHandler.emit(\"update_help_text\", \"Press a direction key to use selected item\")\n for event in tcod.event.wait():\n if event.type == \"QUIT\":\n raise SystemExit()\n elif event.type == \"KEYDOWN\":\n if event.scancode == tcod.event.SCANCODE_LEFT:\n self.emitter.emit(\"direction\", data=(-1,0))\n elif event.scancode == tcod.event.SCANCODE_RIGHT:\n self.emitter.emit(\"direction\", data=(1,0))\n elif event.scancode == tcod.event.SCANCODE_UP:\n self.emitter.emit(\"direction\", data=(0,-1))\n elif event.scancode == tcod.event.SCANCODE_DOWN:\n self.emitter.emit(\"direction\", data=(0,1))\n \nclass NonToolsControlsEmitter(ToolControlsEmitter):\n pass\n\nclass NonToolsControls(ToolControls):\n def __init__(self, player, level):\n super().__init__(player, level)\n self.emitter = NonToolsControlsEmitter()\n\n\nclass ConversationControlsEmitter(Dispatcher):\n _events_ = [\"confirm\"]\n\nclass ConversationControls(object):\n def __init__(self):\n self.emitter = ConversationControlsEmitter()\n \n def handlekeys(self):\n globs.gEventHandler.emit(\"update_help_text\", \"Press space to advance text\")\n for event in tcod.event.wait():\n if event.type == \"QUIT\":\n raise SystemExit()\n elif event.type == \"KEYDOWN\":\n if event.scancode == tcod.event.SCANCODE_SPACE:\n print(\"space pressed\")\n self.emitter.emit(\"confirm\")\n \n\n\nclass MenuControlsEmitter(Dispatcher):\n _events_ = [\"move_up\", \"move_down\", \"select\", \"cancel\"]\n\nclass MenuControls(object):\n def __init__(self):\n self.emitter = MenuControlsEmitter()\n\n def handlekeys(self):\n for event in tcod.event.wait():\n if event.type == \"QUIT\":\n raise SystemExit()\n elif event.type == \"KEYDOWN\":\n if event.scancode == tcod.event.SCANCODE_DOWN:\n self.emitter.emit(\"move_down\")\n elif event.scancode == tcod.event.SCANCODE_UP:\n self.emitter.emit(\"move_up\")\n elif event.scancode == tcod.event.SCANCODE_I or event.scancode == tcod.event.SCANCODE_ESCAPE:\n self.emitter.emit(\"cancel\")\n elif event.scancode == tcod.event.SCANCODE_SPACE:\n self.emitter.emit(\"select\")\n\nclass SimpleCraftingControlsEmitter(MenuControlsEmitter):\n pass\n\nclass SimpleCraftingControls(MenuControls):\n def __init__(self):\n super().__init__()\n self.emitter = SimpleCraftingControlsEmitter()\n\nclass SimpleCraftingSubmenuEmitter(Dispatcher):\n _events_ = [\"move_left\", \"move_right\", \"cancel\", \"select\"]\n\nclass SimpleCraftingSubmenuControls(object):\n def __init__(self):\n self.emitter = SimpleCraftingSubmenuEmitter()\n\n def handlekeys(self):\n for event in tcod.event.wait():\n if event.type == \"QUIT\":\n raise SystemExit()\n elif event.type == \"KEYDOWN\":\n if event.scancode == tcod.event.SCANCODE_LEFT:\n self.emitter.emit(\"move_left\")\n elif event.scancode == tcod.event.SCANCODE_RIGHT:\n self.emitter.emit(\"move_right\")\n elif event.scancode == tcod.event.SCANCODE_ESCAPE:\n self.emitter.emit(\"cancel\")\n elif event.scancode == tcod.event.SCANCODE_SPACE:\n self.emitter.emit(\"select\")\n\nclass InventoryControlsEmitter(MenuControlsEmitter):\n _events_ = [\"move_down\", \"move_up\", \"cancel\", \"select\", \"drop\"]\n\nclass InventoryControls(MenuControls):\n def __init__(self):\n self.emitter = InventoryControlsEmitter()\n\n def handlekeys(self):\n for event in tcod.event.wait():\n if event.type == \"QUIT\":\n raise SystemExit()\n elif event.type == \"KEYDOWN\":\n if event.scancode == tcod.event.SCANCODE_DOWN:\n self.emitter.emit(\"move_down\")\n elif event.scancode == tcod.event.SCANCODE_UP:\n self.emitter.emit(\"move_up\")\n elif event.scancode == tcod.event.SCANCODE_I or event.scancode == tcod.event.SCANCODE_ESCAPE:\n self.emitter.emit(\"cancel\")\n elif event.scancode == tcod.event.SCANCODE_SPACE:\n self.emitter.emit(\"select\")\n elif event.scancode == tcod.event.SCANCODE_D:\n self.emitter.emit(\"drop\")\n","sub_path":"control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":9226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"358128799","text":"import sys\n\nif sys.version_info[0] == 2: #Python 2.7.x\n from Tkinter import *\n import ttk\n import tkMessageBox as messagebox\n import tkFileDialog as filedialog\n from tkColorChooser import askcolor\n import urllib2\nelse:\n from tkinter import *\n from tkinter import messagebox\n from tkinter import filedialog\n from tkinter.colorchooser import *\n from tkinter import ttk\n import urllib.request as urllib2\nimport random\nimport time\nimport webbrowser\n\n#Konstante\nVRSTICE = 10\nSTOLPCI = 10\nMINE = 10\nPOKAZI = True #False Ne prikazuje števila min, uro v glavnem oknu, True prikazuje.\nNEPOKAZI_ST = False\n\n#Barve\nC1 = 'green'\nC2='green yellow'\nC3='blue'\nC4='red'\n\nclass Gumb:\n def __init__(self, gumb, mina, sosedi, vrstica, stolpec):\n self.gumb = gumb\n self.mina = mina\n self.sosedi = sosedi\n self.vrstica=vrstica\n self.stolpec=stolpec\n\n def __repr__(self):\n return 'Gumb({0}, {1}, {2}, {3}, {4})'.format(self.gumb, self.mina, self.sosedi, self.vrstica, self.stolpec)\n\nclass Minesweeper():\n def __init__(self, master, vrstice,stolpci,mine, nevidne_st, pokazi,c1,c2,c3,c4):\n self.odkje = True\n self.pokazi = pokazi\n self.c1=c1\n self.c2=c2\n self.c3=c3\n self.c4=c4\n self.nevidne_st = nevidne_st\n self.master = master\n self.st_vrstic = vrstice\n self.st_stolpcev = stolpci\n self.mines = mine\n self.sez_praznih = [] #tu notri pospravimo gumbe, ki nimajo min.\n #Nastavi nove vrednosti -Podatke uporabi iz nastavitev\n self.st_vrstic1234 = vrstice\n self.st_stolpcev1234 = stolpci\n self.mines1234=mine\n # Full screen app\n self.state = False\n self.master.bind(\"\", self.toggle_fullscreen)\n self.master.bind(\"\", self.end_fullscreen)\n\n #konfiguracija menija\n self.menu = Menu(self.master)\n master.config(menu=self.menu)\n\n # game menu\n filemenu = Menu(self.menu, tearoff=0)\n filemenu.add_command(label=\"Nova igra\", command=self.nova)\n filemenu.add_command(label=\"Odpri\", command=self.file_open)\n filemenu.add_command(label=\"Shrani\", command=self.file_save)\n filemenu.add_separator()\n filemenu.add_command(label=\"Izhod\", command=self.master.destroy)\n self.menu.add_cascade(label=\"Igra\", menu=filemenu)\n\n # create more pulldown menus\n levelmenu = Menu(self.menu, tearoff=1)\n levelmenu.add_command(label=\"Po meri\", command=self.nastavitve)\n levelmenu.add_command(label=\"Obrni\", command=self.obrni)\n levelmenu.add_separator()\n levelmenu.add_command(label=\"Lahka\", command=self.lahka)\n levelmenu.add_command(label=\"Srednje težka\", command=self.srednja)\n levelmenu.add_command(label=\"Težka\", command=self.tezka)\n levelmenu.add_command(label=\"Zelo težka\", command=self.zelo_tezka)\n self.menu.add_cascade(label=\"Težavnost\", menu=levelmenu)\n\n netmenu = Menu(self.menu, tearoff=0)\n netmenu.add_command(label='Github',command=self.preveri_posodobitve)\n self.menu.add_cascade(label=\"Internet\", menu=netmenu)\n\n # display the menu\n self.master.config(menu=self.menu)\n\n self.nova_igra()\n\n def preveri_posodobitve(self):\n '''Preveri ali obstajajo posodobitve na githubu'''\n url = 'https://github.com/martincesnovar/Minolovec'\n vir = urllib2.urlopen(url)\n webbrowser.open_new(url)\n\n def skrij_uro(self):\n self.pokazi = not self.pokazi\n\n def nova(self):\n self.t1=time.time()\n self.konec_igre(False)\n\n def lahka(self):\n '''ustvari 10*10 veliko polje z 10 minami'''\n self.st_vrstic1234 = 10\n self.st_stolpcev1234 = 10\n self.mines1234=10\n self.nevidne_st = False\n self.t1=time.time()\n self.konec_igre(False)\n\n def srednja(self):\n '''ustvari 15*15 veliko polje z 50 minami'''\n self.st_vrstic1234 = 15\n self.st_stolpcev1234 = 15\n self.mines1234=50\n self.nevidne_st = False\n self.t1=time.time()\n self.konec_igre(False)\n \n def tezka(self):\n '''ustvari 24*30 veliko polje z 688 minami'''\n self.st_vrstic1234 = 24\n self.st_stolpcev1234 = 30\n self.mines1234=668\n self.nevidne_st = False\n self.t1=time.time()\n self.konec_igre(False)\n\n def zelo_tezka(self):\n '''ustvari 24*30 veliko polje z 688 minami z izklopljenimi številkami'''\n self.tezka()\n self.nevidne_st=True\n\n def obrni(self):\n '''Obrne število min'''\n polje = self.st_vrstic1234*self.st_stolpcev1234\n self.mines1234 = polje - self.mines1234\n self.t1=time.time()\n self.konec_igre(False)\n\n def nastavitve(self):\n okno = Nastavitve(self)\n\n def zbrisi_polje(self):\n '''Zbriše polje'''\n for vrstica in range(self.st_vrstic):\n for stolpec in range(self.st_stolpcev):\n self.buttons[vrstica][stolpec].gumb.destroy()\n self.buttons=None\n\n def file_save(self):\n '''Shrani gumbe'''\n try:\n name=filedialog.asksaveasfile(mode='w',defaultextension=\".txt\", filetypes=[('Text Files', '*.txt')])\n #self.sez_praznih\n name.write('{0} {1} {2}\\n'.format(self.st_vrstic1234, self.st_stolpcev1234, self.mines1234))\n for el in sorted(self.izbrane_mine):\n text2save=str(el)+ ' '\n name.write(text2save)\n name.write('\\n\\n#Nikoli ne preseži {0}\\n#1 vrstice stolpci mine\\n#2 položaj min'.format(self.st_vrstic1234*self.st_stolpcev1234-1))\n name.close\n except:\n Exception('Napaka')\n\n def file_open(self):\n '''Naloži igro iz datoteke'''\n self.odkje=False\n self.nova_igra()\n self.odkje=True\n\n def odpri1(self,odkje):\n if odkje:\n #Dobi podatke iz nastavitev\n self.mines=self.mines1234\n self.st_vrstic = self.st_vrstic1234\n self.st_stolpcev = self.st_stolpcev1234\n self.izbrane_mine = random.sample([i for i in range(self.st_vrstic * self.st_stolpcev)], self.mines1234)\n else:\n try:\n file_path = filedialog.askopenfilename(filetypes=[('Text Files', '*.txt'),('All', '*.*')])\n with open(file_path) as f:\n self.st_vrstic1234, self.st_stolpcev1234, self.mines1234 = list(map(int, f.readline().split()))\n self.izbrane_mine = list(map(int,f.readline().split()))\n except:\n Exception('Napaka')\n finally:\n self.odkje=True\n self.mines=self.mines1234\n self.st_vrstic = self.st_vrstic1234\n self.st_stolpcev = self.st_stolpcev1234\n self.buttons = [[None for i in range(self.st_stolpcev)] for j in range(self.st_vrstic)]\n self.st_nepoklikanih = self.st_vrstic * self.st_stolpcev\n \n\n def nova_igra(self):\n self.sez_praznih = []\n self.prvic = True\n self.odpri1(self.odkje) #Naloži novo igro iz datoteke (False) ali iz igre (True)\n num_proximity_mines = 0\n frame = Frame(self.master)\n Grid.rowconfigure(self.master, 0, weight=1)\n Grid.columnconfigure(self.master, 0, weight=1)\n \n #Dodamo polje za število min in čas\n \n if self.pokazi: #pokaže/skrije število min in uro.\n var = StringVar()\n var.set('Število min: ' + str(self.mines))\n l = Label(self.master, textvariable=var, anchor=NW, justify=LEFT, wraplength=398)\n l.grid(row=0, column=0, columnspan=10, sticky = N + S + E +W)\n self.now=-1\n self.label = Label(self.master,text=self.now,anchor=NW, justify=LEFT, wraplength=398)\n self.label.grid(row=0, column=2, columnspan=10, sticky = N + S + E +W)\n self.update_clock()\n \n frame.grid(row=1, column=0, sticky = N + S + E + W) if self.pokazi else frame.grid(row=0, column=0, sticky = N + S + E + W)\n## self.label1 = Label(frame, text=\"Minesweeper\")\n## self.label1.grid(row=0, column=0, columnspan=10)\n st = 0\n for vrstica in range(self.st_vrstic):\n Grid.rowconfigure(frame, vrstica, weight=1)\n for stolpec in range(self.st_stolpcev):\n Grid.columnconfigure(frame, stolpec, weight=1)\n mine = False\n if st in self.izbrane_mine:\n mine = True \n \n gumb = Gumb(Button(frame, bg=self.c1, width=3), mine, num_proximity_mines, vrstica, stolpec) # Objekt\n\n if mine == False: #Prazne mine doda v seznam - da \"premaknem\" mino.\n self.sez_praznih.append(gumb) \n\n self.buttons[vrstica][stolpec] = gumb\n # GLUPI PYTHON NAROBE DELA SPREMENLJIVKE V LAMBDAH\n self.buttons[vrstica][stolpec].gumb.bind('',\n (lambda v, s: lambda e: self.lclick(v, s))(vrstica, stolpec))\n self.buttons[vrstica][stolpec].gumb.bind('',\n (lambda v, s: lambda e: self.rclick(v, s))(vrstica, stolpec))\n self.buttons[vrstica][stolpec].gumb.grid(row=vrstica, column=stolpec, sticky=N + S + E + W)\n st+=1\n\n for v in range(self.st_vrstic):\n for s in range(self.st_stolpcev):\n self.buttons[v][s].sosedi = self.sosednje_mine(v, s)\n\n self.master.attributes(\"-topmost\", True)\n\n def update_clock(self):\n if not self.prvic:\n self.now = -1\n self.now += 1\n \n self.label.configure(text=self.now)\n if self.prvic:\n self.label.after(1000, self.update_clock)\n \n def sosedi(self, vrstica, stolpec):\n sez = [(vrstica - 1, stolpec - 1), (vrstica - 1, stolpec), (vrstica - 1, stolpec + 1),\n (vrstica, stolpec - 1), (vrstica, stolpec + 1),\n (vrstica + 1, stolpec - 1), (vrstica + 1, stolpec), (vrstica + 1, stolpec + 1)]\n return [(v, s) for (v, s) in sez if 0 <= v < self.st_vrstic and 0 <= s < self.st_stolpcev]\n\n def sosednje_mine(self, vrstica, stolpec):\n \"\"\"Stevilo sosednjih polj, ki so mina\"\"\"\n m = 0\n for (v, s) in self.sosedi(vrstica, stolpec):\n if self.buttons[v][s].mina == 1: m += 1\n return m\n\n def lclick(self, vrstica, stolpec, preveri_konec=True):\n prva = self.st_nepoklikanih == self.st_vrstic*self.st_stolpcev #ali je prva poteza?\n if prva: self.t1 = time.time()\n\n sez = self.buttons[vrstica][stolpec]\n if sez.gumb[\"bg\"] == self.c1:\n # polje se ni odkrito, ga odkrijemo\n self.st_nepoklikanih -= 1\n sez.sosedi = self.sosednje_mine(vrstica, stolpec)\n #Stopimo na mino v 1. koraku\n #Na igralnem polju je 1 mina manj.\n if sez.mina == True and prva == True:\n sez.mina= False\n #self.mines-=1\n\n #Dodamo še 1 mino, ker smo jo kliknili v 1. potezi\n izbrana = random.choice(self.sez_praznih)\n izbrana.mina = True\n sez.sosedi = self.sosednje_mine(vrstica, stolpec)\n #self.mines += 1\n \n m = sez.sosedi # stevilo sosednjih min\n if m != 0 or sys.platform ==\"darwin\":\n if self.nevidne_st == False:\n sez.gumb.config(text=str(m))\n else:\n sez.gumb.config(text='')\n \n sez.gumb.config(bg=self.c2)\n elif sez.mina == 1:\n # stopili smo na mino\n for i in range(len(self.buttons)):\n for x in range(len(self.buttons[i])):\n if self.buttons[i][x].mina == 1:\n self.buttons[i][x].gumb.config(bg=self.c4, text='*')\n preveri_konec = False\n self.konec_igre(False)\n else:\n # polje je bilo prazno\n m = sez.sosedi # stevilo sosednjih min\n if m != 0 or sys.platform == \"darwin\":\n if self.nevidne_st == False:\n sez.gumb.config(text=str(m))\n else:\n sez.gumb.config(text='')\n if m == 0 and sys.platform == \"darwin\": #Označi odprte\n sez.gumb.config(text=str(m))\n \n sez.gumb.config(bg=self.c2)\n if m == 0:\n for (v, s) in self.sosedi(vrstica, stolpec):\n self.lclick(v, s, preveri_konec=False)\n\n # ali je konec igre?\n if preveri_konec and self.st_nepoklikanih-self.mines == 0:\n self.konec_igre(True)\n \n def rclick(self, vrstica, stolpec):\n sez = self.buttons[vrstica][stolpec]\n if sez.gumb[\"bg\"] == self.c1:\n sez.gumb.config(bg=self.c3, text=chr(9873) if sys.version_info[0] == 3 and TkVersion >= 8.6 else ':)')\n self.st_nepoklikanih -= 1\n if sez.mina == True:\n self.mines -= 1\n\n elif sez.gumb[\"bg\"] == self.c3:\n if sez.mina == True:\n self.mines += 1\n self.st_nepoklikanih += 1\n sez.gumb.config(bg=self.c1, text=\"\")\n\n if self.st_nepoklikanih == 0 and self.mines == 0:\n self.konec_igre(True)\n\n def konec_igre(self, od_kje):\n self.prvic = False\n self.t2 = time.time()\n if od_kje:\n result = messagebox.askyesno('Winner!', 'Igram znova?\\nČas igranja {0}'.format(int(self.t2-self.t1)))\n else:\n result = messagebox.askyesno('Looser!', 'Igram znova?\\nČas igranja {0}'.format(int(self.t2-self.t1)))\n if result:\n self.zbrisi_polje()\n self.nova_igra()\n else:\n self.master.destroy()\n\n def toggle_fullscreen(self, event=None):\n self.state = not self.state # Just toggling the boolean\n self.master.attributes(\"-fullscreen\", self.state)\n return \"break\"\n\n def end_fullscreen(self, event=None):\n self.state = False\n self.master.attributes(\"-fullscreen\", False)\n return \"break\"\n\nclass Nastavitve():\n def __init__(self, minesweeper):\n self.minesweeper = minesweeper\n self.minesweeper.prvic = False #Štoparica\n self.top = Toplevel()\n self.top.title(\"Nastavi\")\n self.top.attributes(\"-topmost\", True)\n\n frame = Frame(self.top)\n\n frame.pack()\n\n Label(frame, text=\"Število vrstic\").grid(row=0, column=0, sticky=W)\n Label(frame, text=\"Število stolpcev\").grid(row=1, column=0, sticky=W)\n Label(frame, text=\"Število min\").grid(row=2, column=0, sticky=W)\n self.e1 = ttk.Entry(frame)\n self.e2 = ttk.Entry(frame)\n self.e3 = ttk.Entry(frame)\n\n self.e1.grid(row=0, column=1)\n self.e2.grid(row=1, column=1)\n self.e3.grid(row=2, column=1)\n\n self.var = IntVar()\n c = ttk.Checkbutton(self.top, text=\"Skrij številke\", variable=self.var)\n c.pack()\n\n\n c1 = Button(self.top, text = 'Neodkriti', command = self.getColor1, bg = self.minesweeper.c1)\n c1.pack()\n\n c2 = Button(self.top, text = 'Odkriti', command = self.getColor2, bg = self.minesweeper.c2)\n c2.pack()\n\n c3 = Button(self.top, text = 'Zastava', command = self.getColor3, bg = self.minesweeper.c3)\n c3.pack()\n\n c4 = Button(self.top, text = 'Mina', command = self.getColor4, bg = self.minesweeper.c4)\n c4.pack()\n \n b = ttk.Button(self.top, text=\"OK\", command=self.callback)\n self.top.bind(\"\", self.callback)\n b.pack()\n\n\n def getColor1(self):\n self.minesweeper.c1 = askcolor()[1]\n def getColor2(self):\n self.minesweeper.c2 = askcolor()[1]\n def getColor3(self):\n self.minesweeper.c3 = askcolor()[1]\n def getColor4(self):\n self.minesweeper.c4 = askcolor()[1]\n\n def callback(self,event=None):\n '''Dobi podatke iz okna če obstajajo, sicer ohrani stare vrednosti'''\n \n self.e1.get1 = self.e1.get() or self.minesweeper.st_vrstic1234\n self.e2.get2 = self.e2.get() or self.minesweeper.st_stolpcev1234\n self.e3.get3 = self.e3.get() or self.minesweeper.mines1234\n \n self.minesweeper.st_vrstic1234 = max(int(self.e1.get1),1)\n self.minesweeper.st_stolpcev1234 = max(int(self.e2.get2),1)\n self.minesweeper.mines1234 = max(min(int(self.e3.get3),self.minesweeper.st_stolpcev1234*self.minesweeper.st_vrstic1234 - 1),0) #Število min ne sme biti večje od velikosti igralnega polja\n self.minesweeper.nevidne_st = self.var.get()\n\n self.top.destroy()\n \n self.minesweeper.zbrisi_polje()\n self.minesweeper.nova_igra()\n\n#Glavni program\nroot = Tk()\nroot.title('Minolovec')\nminesweeper = Minesweeper(root,VRSTICE,STOLPCI,MINE, NEPOKAZI_ST, POKAZI,C1,C2,C3,C4)\nroot.mainloop()\n","sub_path":"Minolovec.pyw","file_name":"Minolovec.pyw","file_ext":"pyw","file_size_in_byte":17352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"458327292","text":"# Copyright 2022 The Kubeflow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test Vertex AI Batch Prediction Job Launcher Client module.\"\"\"\n\nimport os\n\nfrom google_cloud_pipeline_components.container.v1.model.delete_model import launcher\nfrom google_cloud_pipeline_components.container.v1.model.delete_model import remote_runner\n\nimport unittest\nfrom unittest import mock\n\n\nclass LauncherDeleteModelUtilsTests(unittest.TestCase):\n\n def setUp(self):\n super(LauncherDeleteModelUtilsTests, self).setUp()\n self._gcp_resources = os.path.join(\n os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),\n 'test_file_path/test_file.txt')\n self._input_args = [\n '--type', 'DeleteModel', '--project', '', '--location',\n '', '--payload', 'test_payload', '--gcp_resources',\n self._gcp_resources\n ]\n\n @mock.patch.object(\n remote_runner, 'delete_model', autospec=True)\n def test_launcher_on_delete_model_type(self, mock_delete_model):\n launcher.main(self._input_args)\n mock_delete_model.assert_called_once_with(\n type='DeleteModel',\n project='',\n location='',\n payload='test_payload',\n gcp_resources=self._gcp_resources)\n","sub_path":"components/google-cloud/tests/container/v1/model/delete_model/test_delete_model_launcher.py","file_name":"test_delete_model_launcher.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"531491705","text":"# kf.py updated file for robot estimation assignment\n# pset[3]\n# (C) 2017 David Feil-Seifer\n\nimport numpy as np\nimport math\nfrom scipy import stats\nimport scipy.stats\n\n# kf_update: update state estimate [u, sigma] with new control [xdot] and measurement [z]\n# \tparameters:\n#\t\t\tu : 2x1 vector with state estimate (x) at time t-1 and control (xdot) at time t\n#\t\t\tsigma: 2x2 matrix with covariance at time t-1\n#\t\t\tz (int): observed (uncertain) measurement of state (x) at time t\n#\treturns: [u sigma] updated state with estimate at time t\n\ndef kf_update(u, sigma, z):\n\tF = np.matrix([[1.0, 1.0], [0.0, 1.0]])\n\tH = np.matrix([1.0, 0.0])\n\tstd_dev = 3\n\tsv = std_dev*u.item(1)\n\tG = np.matrix([[.3], [.1]])\n\tsigma_x = G*(G.T)*sv**2\n\tsigma_z = np.matrix([np.power(std_dev, 2)])\n\tsigma_f = np.add(np.dot(np.dot(F, sigma), F.T), sigma_x)\n\tkt = np.dot(np.dot(sigma_f, H.T), np.add(np.dot(np.dot(H, sigma_f), H.T), sigma_z).I)\n\tmeasure = np.dot(kt, (np.subtract(np.matrix([z]), np.dot(np.dot(H, F), u))))\n\tu = np.add(np.dot(F, u), measure)\n\tsigma = np.dot(np.subtract(np.identity(2), np.dot(kt, H)), sigma_f)\n\treturn [u, sigma]\n\n# door_update: update estimate of door locations\n# \tparameters:\n#\t\t\tu : 2x1 vector with state estimate (x) at time t-1 and control (xdot) at time t-1\n#\t\t\tsigma: 2x2 matrix with covariance at time t-1\n#\t\t\td (binary): door sensor at time t-1 \n#\t\t\tdoor_dist (array of size 10): probability (0..1) that a door exists at each location (0..9)\n#\treturns: [door_dist] updated door distribution\n\ndef door_update(u, sigma, d, door_dist):\n\treturn door_dist","sub_path":"Kalman Filter/kf.py","file_name":"kf.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"621141831","text":"\"\"\"medictor URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n\n path(\"\", views.home, name=\"home\"),\n path('about/',views.about,name='about'),\n path('sign_up_patient/',views.sign_up_patient,name='sign_up_patient'),\n path('user_profile_patient/',views.user_profile_patient,name='user_profile_patient'),\n path('sign_in_patient/',views.sign_in_patient,name='sign_in_patient'),\n path('diseasepred/',views.diseasepred,name=\"diseasepred\"),\n path('logout_patient/',views.logout_patient,name=\"logout_patient\"),\n path('input_symptoms/',views.input_symptoms,name=\"input_symptoms\")\n]\n","sub_path":"medictor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"328050841","text":"#!/usr/bin/env python\n# _*_ coding:utf-8 _*_\nimport requests\nfrom bs4 import BeautifulSoup\n\nif __name__ == '__main__':\n target = 'http://library_web.ahut.edu.cn/MainWeb/datasource/mainshow.asp?d_id=2'\n req = requests.get(url=target)\n html = req.text\n bf = BeautifulSoup(html)\n texts = bf.find_all('div')\n print(texts[0].text.replace())\n","sub_path":"爬虫.py","file_name":"爬虫.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"21784987","text":"\"\"\"Code originally from Jupyter Notebook.\nMoved to separate Python file due to kernel\nrepeatedly dying.\n\n\"\"\"\nimport json\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport seaborn as sns\nfrom collections import defaultdict\nfrom datetime import datetime\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\nfrom sklearn.model_selection import (\n GridSearchCV,\n StratifiedShuffleSplit,\n train_test_split,\n)\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.svm import SVC\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorboard.plugins.hparams import api as hp\n\n\nlog_dir = \"logs/hparam_tuning_small_2lyrs/\"\n\n\n# Read in saved data.\ncombined = pd.read_csv(os.path.join(\"formatted_data\", \"combined.csv\"))\n\n# Seed the random generator, so we get the same results each time\nrandom_seed_1 = 3141592\nrandom_seed_2 = 8675309\n\n# Specify which column contains the class label (Success or Not)\nclass_label_col = \"offer_successful\"\n\n# Split the data and class labels, so we can use train_test_split.\ndata_no_label = combined.drop(class_label_col, axis=1)\nclass_labels = combined.loc[:, [class_label_col]]\n\n# We want 80/20, and then we will split the training set into validation.\n(X_train, X_test, y_train, y_test) = train_test_split(\n data_no_label, class_labels, test_size=0.2, random_state=random_seed_1\n)\n\n# Split into training/validation.\n# To get 60/20/20 split from 80, we use a test size of 0.25 since 60/80 = 0.75.\n(X_train, X_valid, y_train, y_valid) = train_test_split(\n X_train, y_train, test_size=0.25, random_state=random_seed_2\n)\n\n# Need to change the shape of the y values to be (n, ).\ny_train = y_train.values.ravel()\ny_test = y_test.values.ravel()\ny_valid = y_valid.values.ravel()\n\n# Since we want to be able to create new offers to add to our portfolio, we don't want to associate\n# too much importance to the exact id_offer. We want to generalize.\n\n# Therefore, we will store the id_offer's for each training set in case we need it later.\nid_offer_train = X_train.loc[:, \"id_offer\"]\nid_offer_test = X_test.loc[:, \"id_offer\"]\nid_offer_valid = X_valid.loc[:, \"id_offer\"]\n\n# Now remove 'id_offer' from each.\nX_train = X_train.drop([\"id_offer\"], axis=1)\nX_test = X_test.drop([\"id_offer\"], axis=1)\nX_valid = X_valid.drop([\"id_offer\"], axis=1)\n\n# Remove any logs from previous runs\nos.system(\"rm -rf ./\" + log_dir)\n\n\ndef f_beta_score(matrix, beta):\n \"\"\"Calculate the f_beta score.\n\n Args:\n -matrix (1d array) = [tn fp fn tp]\n -beta (int) = indicates if we use F1 score, F2 score, etc.\n\n Returns:\n -score (float)\n \"\"\"\n tn, fp, fn, tp = matrix\n\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n\n score = (\n (1 + beta ** 2) * (precision * recall) / (((beta ** 2) * precision) + recall)\n )\n\n return score\n\n\ndef f1_score(matrix):\n \"\"\"Calculate F1 score\"\"\"\n return f_beta_score(matrix, 1)\n\n\ndef f2_score(matrix):\n \"\"\"Calculate F2 score\"\"\"\n return f_beta_score(matrix, 2)\n\n\n# List of hyperparameters\nHP_NUM_UNITS_1 = hp.HParam(\"num_units_1\", hp.Discrete([32, 64, 128]))\nHP_DROPOUT_1 = hp.HParam(\"dropout_1\", hp.RealInterval(0.1, 0.5))\nHP_NUM_LAYERS = hp.HParam(\n \"num_layers\", hp.Discrete([2])\n) # hp.HParam(\"num_layers\", hp.Discrete([1, 2]))\nHP_NUM_UNITS_2 = hp.HParam(\n \"num_units_2\", hp.Discrete([32, 64, 128])\n) # hp.HParam(\"num_units_2\", hp.Discrete([32, 64, 128]))\nHP_DROPOUT_2 = hp.HParam(\n \"dropout_2\", hp.RealInterval(0.1, 0.5)\n) # hp.HParam(\"dropout_2\", hp.RealInterval(0.1, 0.5))\nHP_OPTIMIZER = hp.HParam(\n \"optimizer\", hp.Discrete([\"adam\"])\n) # hp.HParam(\"optimizer\", hp.Discrete([\"adam\", \"sgd\"]))\nHP_OPTIM_LR = hp.HParam(\"lr\", hp.Discrete([1e-4, 1e-3]))\n\nMETRIC_ACCURACY = \"accuracy\"\nMETRIC_FN = \"false_negatives\" # tf.keras.metrics.FalseNegatives()\nMETRIC_FP = \"false_positives\" # tf.keras.metrics.FalsePositives()\nMETRIC_TN = \"true_negatives\" # tf.keras.metrics.TrueNegatives()\nMETRIC_TP = \"true_positives\" # tf.keras.metrics.TruePositives()\nMETRIC_F1 = \"f1_score\"\nMETRIC_F2 = \"f2_score\"\n\nwith tf.summary.create_file_writer(log_dir).as_default():\n hp.hparams_config(\n hparams=[\n HP_NUM_UNITS_1,\n HP_DROPOUT_1,\n HP_NUM_UNITS_2,\n HP_DROPOUT_2,\n HP_OPTIMIZER,\n HP_OPTIM_LR,\n ],\n metrics=[\n hp.Metric(METRIC_ACCURACY, display_name=\"Accuracy\"),\n hp.Metric(METRIC_FN, display_name=\"False Negatives\"),\n hp.Metric(METRIC_FP, display_name=\"False Positives\"),\n hp.Metric(METRIC_TN, display_name=\"True Negatives\"),\n hp.Metric(METRIC_TP, display_name=\"True Positives\"),\n hp.Metric(METRIC_F1, display_name=\"F1 Score\"),\n hp.Metric(METRIC_F2, display_name=\"F2 Score\"),\n ],\n )\n\n\ndef validate_model(hparams):\n \"\"\"Perform hyperparameter tuning on the validation set.\"\"\"\n model = keras.Sequential()\n # First hidden layer.\n model.add(\n keras.layers.Dense(\n hparams[HP_NUM_UNITS_1], activation=\"relu\", input_shape=[X_train.shape[1]]\n )\n )\n model.add(keras.layers.Dropout(hparams[HP_DROPOUT_1]))\n # Possibly add second hidden layer.\n if hparams[HP_NUM_LAYERS] == 2:\n model.add(keras.layers.Dense(hparams[HP_NUM_UNITS_2], activation=\"relu\"))\n model.add(keras.layers.Dropout(hparams[HP_DROPOUT_2]))\n # Output Layer\n model.add(keras.layers.Dense(1, activation=\"sigmoid\"))\n\n # Display model summary\n model.summary()\n\n # Initialize optimizer with learning rate.\n if hparams[HP_OPTIMIZER] == \"adam\":\n optim = keras.optimizers.Adam(learning_rate=hparams[HP_OPTIM_LR])\n elif hparams[HP_OPTIMIZER] == \"sgd\":\n optim = keras.optimizers.SGD(learning_rate=hparams[HP_OPTIM_LR])\n\n # Compile the model.\n model.compile(\n optimizer=optim,\n loss=\"binary_crossentropy\",\n metrics=[\n \"accuracy\",\n tf.keras.metrics.FalseNegatives(),\n tf.keras.metrics.FalsePositives(),\n tf.keras.metrics.TrueNegatives(),\n tf.keras.metrics.TruePositives(),\n ],\n )\n\n # Callbacks\n # Early Stopping\n # -monitor validation loss.\n # -when validation loss stops decreasing, stop.\n # -patience is number of epochs with no improvement.\n cb_es = keras.callbacks.EarlyStopping(\n monitor=\"val_loss\", mode=\"min\", patience=20, verbose=2\n )\n # Model Checkpoint\n # -call our model \"best_model.h5\".\n # -monitor validation loss.\n # -when validation loss stops decreasing, stop.\n # -save the best overall model.\n cb_ckpt = keras.callbacks.ModelCheckpoint(\n \"best_model_small_2lyr.h5\",\n monitor=\"val_loss\",\n mode=\"min\",\n save_best_only=True,\n verbose=2,\n )\n\n # Fit\n model.fit(\n X_train,\n y_train,\n validation_data=(X_valid, y_valid),\n callbacks=[cb_es, cb_ckpt],\n epochs=200,\n verbose=2,\n )\n\n _, test_acc, test_fn, test_fp, test_tn, test_tp = model.evaluate(\n X_test, y_test, verbose=2\n )\n\n return test_acc, test_fn, test_fp, test_tn, test_tp\n\n\n# For each run, log an hparams summary with hyperparameters and metrics.\ndef run(run_dir, hparams):\n with tf.summary.create_file_writer(run_dir).as_default():\n hp.hparams(hparams) # record the values used in this trial.\n accuracy, fn, fp, tn, tp = validate_model(hparams)\n tf.summary.scalar(METRIC_ACCURACY, accuracy, step=1)\n tf.summary.scalar(METRIC_FN, fn, step=1)\n tf.summary.scalar(METRIC_FP, fp, step=1)\n tf.summary.scalar(METRIC_TN, tn, step=1)\n tf.summary.scalar(METRIC_TP, tp, step=1)\n tf.summary.scalar(METRIC_F1, f1_score(np.array([tn, fp, fn, tp])), step=1)\n tf.summary.scalar(METRIC_F2, f2_score(np.array([tn, fp, fn, tp])), step=1)\n\n\n# Grid search over parameters and log values.\nsession_num = 0\n\nfor num_units_1 in HP_NUM_UNITS_1.domain.values:\n for dropout_1 in np.arange(\n HP_DROPOUT_1.domain.min_value, HP_DROPOUT_1.domain.max_value + 0.1, 0.2\n ):\n for num_layers in HP_NUM_LAYERS.domain.values:\n for num_units_2 in HP_NUM_UNITS_2.domain.values:\n for dropout_2 in np.arange(\n HP_DROPOUT_2.domain.min_value,\n HP_DROPOUT_2.domain.max_value + 0.1,\n 0.2,\n ):\n for optimizer in HP_OPTIMIZER.domain.values:\n for optim_lr in HP_OPTIM_LR.domain.values:\n hparams = {\n HP_NUM_UNITS_1: num_units_1,\n HP_DROPOUT_1: dropout_1,\n HP_NUM_LAYERS: num_layers,\n HP_NUM_UNITS_2: num_units_2,\n HP_DROPOUT_2: dropout_2,\n HP_OPTIMIZER: optimizer,\n HP_OPTIM_LR: optim_lr,\n }\n run_name = \"run-%d\" % session_num\n print(f\"--- Starting trial: {run_name}\")\n print({h.name: hparams[h] for h in hparams})\n run(log_dir + run_name, hparams)\n session_num += 1\n","sub_path":"Capstone-Starbucks/tensorflow_nn.py","file_name":"tensorflow_nn.py","file_ext":"py","file_size_in_byte":9433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"282573031","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef func(X,y):\r\n pos=[]\r\n neg=[]\r\n for i in range(len(y)):\r\n if y[i]==1:\r\n pos=np.append(pos,i)\r\n else:\r\n neg=np.append(neg,i)\r\n pos=[int(i) for i in pos]\r\n neg=[int(i) for i in neg]\r\n fig,ax=plt.subplots()\r\n a, =ax.plot(X[pos,0],X[pos,1],'+',color='black')\r\n b, =ax.plot(X[neg,0],X[neg,1],'o',color='yellow',mec='black')\r\n return fig,ax,a,b;\r\n","sub_path":"week 6/plotData.py","file_name":"plotData.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"281979957","text":"# this project is licensed under the WTFPLv2, see COPYING.txt for details\n\nfrom PyQt5.QtCore import QModelIndex, QRegExp\nfrom PyQt5.QtWidgets import QTreeWidget, QTreeWidgetItem, QPushButton, QMenu\nfrom PyQt5.QtWidgets import QWidget, QActionGroup, QGridLayout, QLineEdit, QComboBox\n\nimport os\n\nfrom ..three import str\nfrom .. import consts\nfrom .helpers import WidgetMixin\nfrom ..helpers import file_search, buffers\nfrom ..reutils import csToQtEnum\nfrom ..qt import Signal, Slot\nfrom .locationlist import LocationList\n\n\n__all__ = ('SearchWidget',)\n\n\nclass SearchOptionsButton(QPushButton):\n\tdef __init__(self, **kwargs):\n\t\tsuper(SearchOptionsButton, self).__init__(**kwargs)\n\n\t\tself.setText(self.tr('Options'))\n\n\t\tmenu = QMenu()\n\t\tself.actionCi = menu.addAction(self.tr('Case insensitive'))\n\n\t\tmenu.addSeparator()\n\t\tself.actionFormat = QActionGroup(self)\n\t\tself.actionPlain = menu.addAction(self.tr('Plain text'))\n\t\tself.actionPlain.setEnabled(False)\n\t\tself.actionRe = menu.addAction(self.tr('Regular expression'))\n\t\tself.actionGlob = menu.addAction(self.tr('Glob pattern'))\n\t\tself.actionGlob.setEnabled(False)\n\t\tself.actionFormat.addAction(self.actionPlain)\n\t\tself.actionFormat.addAction(self.actionRe)\n\t\tself.actionFormat.addAction(self.actionGlob)\n\n\t\tself.actionRoot = menu.addAction(self.tr('Search in best root dir'))\n\n\t\tfor act in [self.actionCi, self.actionRe, self.actionPlain, self.actionGlob, self.actionRoot]:\n\t\t\tact.setCheckable(True)\n\t\tself.actionRe.setChecked(True)\n\n\t\tself.setMenu(menu)\n\n\tdef shouldFindRoot(self):\n\t\treturn self.actionRoot.isChecked()\n\n\tdef caseSensitive(self):\n\t\treturn not self.actionCi.isChecked()\n\n\tdef reFormat(self):\n\t\tif self.actionPlain.isChecked():\n\t\t\treturn QRegExp.FixedString\n\t\telif self.actionRe.isChecked():\n\t\t\treturn QRegExp.RegExp\n\t\telif self.actionGlob.isChecked():\n\t\t\treturn QRegExp.WildcardUnix\n\n\nclass SearchWidget(QWidget, WidgetMixin):\n\tdef __init__(self, **kwargs):\n\t\tsuper(SearchWidget, self).__init__(**kwargs)\n\n\t\tlayout = QGridLayout()\n\t\tself.setLayout(layout)\n\n\t\tself.exprEdit = QLineEdit()\n\t\tself.exprEdit.returnPressed.connect(self.returnPressed)\n\t\tself.setFocusProxy(self.exprEdit)\n\n\t\tself.optionsButton = SearchOptionsButton()\n\n\t\tself.pluginChoice = QComboBox()\n\t\tplugins = sorted(file_search.enabledPlugins(), key=lambda p: p.name())\n\t\tfor plugin in plugins:\n\t\t\tself.pluginChoice.addItem(plugin.name(), plugin.id)\n\n\t\tself.results = LocationList()\n\t\tself.results.setColumns(['path', 'line', 'snippet'])\n\n\t\tself.searcher = None\n\n\t\tlayout.addWidget(self.exprEdit, 0, 0)\n\t\tlayout.addWidget(self.optionsButton, 0, 1)\n\t\tlayout.addWidget(self.pluginChoice, 0, 2)\n\t\tlayout.addWidget(self.results, 1, 0, 1, -1)\n\n\t\tself.addCategory('file_search_widget')\n\n\tdef setPlugin(self, id):\n\t\tindex = self.pluginChoice.findData(id)\n\t\tif index >= 0:\n\t\t\tself.pluginChoice.setCurrentIndex(index)\n\n\tdef setText(self, text):\n\t\tself.exprEdit.setText(text)\n\n\tdef selectedPlugin(self):\n\t\treturn self.pluginChoice.itemData(self.pluginChoice.currentIndex())\n\n\tdef regexp(self):\n\t\tre = QRegExp(self.exprEdit.text())\n\t\tre.setCaseSensitivity(csToQtEnum(self.optionsButton.caseSensitive()))\n\t\tre.setPatternSyntax(self.optionsButton.reFormat())\n\t\treturn re\n\n\tdef shouldFindRoot(self):\n\t\treturn self.optionsButton.shouldFindRoot()\n\n\tdef makeArgs(self, plugin):\n\t\ted = buffers.currentBuffer()\n\n\t\tif self.shouldFindRoot():\n\t\t\tpath = plugin.searchRootPath(ed.path)\n\t\telse:\n\t\t\tpath = os.path.dirname(ed.path)\n\t\tpattern = self.exprEdit.text()\n\t\tci = self.optionsButton.caseSensitive()\n\t\treturn (path, pattern, ci)\n\n\t@Slot()\n\tdef doSearch(self):\n\t\tself.results.clear()\n\t\tplugin_type = file_search.getPlugin(self.selectedPlugin())\n\t\tself.searcher = plugin_type()\n\t\tfile_search.setupLocationList(self.searcher, self.results)\n\t\targs = self.makeArgs(self.searcher)\n\t\tself.searcher.search(*args)\n\n\treturnPressed = Signal()\n","sub_path":"eye/widgets/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"495837412","text":"# -*- encoding: utf-8 -*-\n\nfrom PyQt4 import QtCore, QtGui\n\nfrom gui.graphicItems.commandWidgets.genericCommand import GenericCommand\n\n\nclass ControllerGeneric(QtGui.QGraphicsView):\n\n parameterChanged = QtCore.pyqtSignal(int, float)\n\n def __init__(self, commands, channels, parent=None):\n QtGui.QGraphicsView.__init__(self, parent)\n\n\n self.verticalScrollMode = True\n\n # the scroll bars will be manually shown or not from self.arrangeItems\n self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n\n self.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)\n\n self.commands = commands\n self.channels = channels\n\n self.setBackgroundBrush(QtGui.QBrush(QtCore.Qt.lightGray))\n self.setStyleSheet(\"\"\"\n .ControllerGeneric {\n border-style: none;\n }\n \"\"\")\n\n self.scene = QtGui.QGraphicsScene()\n\n self.cablePen = QtGui.QPen()\n self.cablePen.setColor(QtGui.QColor(0, 0, 0))\n self.cablePen.setWidth(2)\n self.cablePen.setCosmetic(True)\n\n self.items = list()\n for command in self.commands:\n commandItem = GenericCommand(command)\n self.scene.addItem(commandItem)\n self.items.append(commandItem)\n\n self.setScene(self.scene)\n\n self.arrangeItems()\n\n # self.sendPendingCommandsButton = QtGui.QPushButton()\n # self.cancelPendingCommandsButton = QtGui.QPushButton()\n #\n # self.sendPendingCommandsButtonProxy = self.scene.addWidget(self.sendPendingCommandsButton)\n # self.sendPendingCommandsButton.setText(\"send now\")\n # self.sendPendingCommandsButton.clicked.connect(self.sendPendingCommands)\n # self.sendPendingCommandsButton.setGeometry(10, 10, 80, 30)\n #\n # self.cancelPendingCommandsButtonProxy = self.scene.addWidget(self.cancelPendingCommandsButton)\n # self.cancelPendingCommandsButton.setText(\"cancel\")\n # self.cancelPendingCommandsButton.clicked.connect(self.cancelPendingCommands)\n # self.cancelPendingCommandsButton.setGeometry(100, 10, 80, 30)\n\n # def sendPendingCommands(self):\n # self.commands.movePendingCommandsToSendList()\n #\n # def cancelPendingCommands(self):\n # self.commands.cancelPendingCommands()\n\n def arrangeItems(self):\n \"\"\"\n All commands will be arranged in a grid, that either fits horizontally or vertically\n depending on the value of self.verticalScrollMode.\n\n If verticalScrollMode is True, a horizontal scrollbar will appear and\n only so many rows are drawn, that they fit in the available vertical space.\n\n If verticalScrollMode is False, a vertical scrollbar will appear and\n only so many columns are drawn, that they fit in the available horizontal space.\n \"\"\"\n\n # adjust here for the size of possible scrollbars\n maxHeight = self.height() - 20\n maxWidth = self.width() - 20\n\n\n # position the commandItems in a grid\n column = 0\n row = 0\n if self.verticalScrollMode is True:\n # arrange for a vertical scroll bar\n for item in self.items:\n rightCornerPosition = column * item.width + item.width\n if rightCornerPosition > maxWidth:\n column = 0\n row += 1\n positionX = column * item.width\n positionY = row * item.height # + 100\n item.setPos(positionX, positionY)\n column += 1\n else:\n # arrange for a horizontal scroll bar\n for item in self.items:\n lowerCornerPosition = row * item.height + item.height\n if lowerCornerPosition > maxHeight:\n row = 0\n column += 1\n positionX = column * item.width\n positionY = row * item.height\n item.setPos(positionX, positionY)\n row += 1\n\n # calculate the new width and height\n if len(self.items) > 0:\n totalWidth = column * self.items[-1].width + self.items[-1].width + 20\n totalHeight = row * self.items[-1].height + self.items[-1].height + 20\n else:\n totalWidth = 0\n totalHeight = 0\n\n # show scrollbars if needed\n if self.verticalScrollMode is True:\n if totalHeight > self.height():\n self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)\n else:\n self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n else:\n if totalWidth > self.width():\n self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)\n else:\n self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n\n #\n self.scene.setSceneRect(0, 0, totalWidth, totalHeight)\n self.scene.update()\n self.resize(self.width(), self.height())\n\n def updateSymbols(self):\n self.scene.update()\n\n def resizeEvent(self, QResizeEvent):\n super(ControllerGeneric, self).resizeEvent(QResizeEvent)\n self.arrangeItems()\n","sub_path":"gui/controllerGeneric.py","file_name":"controllerGeneric.py","file_ext":"py","file_size_in_byte":5283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"469034982","text":"\"\"\"Train script.\"\"\"\n\nfrom __future__ import division, print_function\n\nimport functools\nimport os\nimport time\n\nimport numpy as np\nimport simplejson as json\nimport smile as sm\nimport tensorflow as tf\nfrom smile import flags, logging\n\nimport base_hparams\nimport reader\nfrom data_utils import Vocabulary\nfrom models import DiscoveryModel\nfrom reader import vectorize_smile\n\nflags.DEFINE_string(\"dataset_spec\", \"{}\", \"Data csv path for training.\")\nflags.DEFINE_string(\"train_dir\", \"\",\n \"Directory path used to store the checkpoints and summary.\")\nflags.DEFINE_string(\"data_hparams\", \"{}\", \"Data hparams JSON string.\")\nflags.DEFINE_string(\"hparams\", \"{}\", \"Model hparams JSON string.\")\nflags.DEFINE_integer(\"epochs\", 10, \"Total training epochs.\")\nflags.DEFINE_integer(\"steps_per_checkpoint\", 200,\n \"Steps to perform test and save checkpoints.\")\n\nFLAGS = flags.FLAGS\n\n\ndef make_train_data(dataset_spec, vocab, data_hparams, epochs):\n \"\"\"Make training and validation dataset.\"\"\"\n # Make SMILE vectorization function.\n vec_func = functools.partial(\n vectorize_smile, vocab=vocab, data_hparams=data_hparams)\n\n # Prepare both train and val datasets.\n dataset_cls = getattr(reader, data_hparams.reader)\n datasets = dataset_cls(data_hparams, dataset_spec)()\n train_data, val_data, test_data = (datasets[\"train\"], datasets[\"val\"], \n datasets[\"test\"])\n\n buckets = data_hparams.buckets\n val_batch_size = data_hparams.val_batch_size\n seq_len_fn = lambda data: data[\"decoder_lens\"]\n train_bucket_fn = tf.contrib.data.bucket_by_sequence_length(\n seq_len_fn, buckets, [data_hparams.batch_size] * (1 + len(buckets)))\n # For both val and test data, we do not perform bucket and batch padding.\n # The only process was padding.\n val_bucket_fn = tf.contrib.data.bucket_by_sequence_length(\n seq_len_fn, [], [val_batch_size]\n )\n\n # Train inputs are one shot iterator, repeating `epochs` times.\n train_inputs = train_data.map(\n vec_func, num_parallel_calls=16).apply(train_bucket_fn).repeat(\n epochs).make_one_shot_iterator().get_next()\n # Validation inputs are re-initializable iterator.\n val_inputs = val_data.map(vec_func).apply(\n val_bucket_fn).make_initializable_iterator()\n test_inputs = test_data.map(vec_func).apply(\n val_bucket_fn).make_initializable_iterator()\n return train_inputs, val_inputs, test_inputs\n\n\ndef train(hparams, data_hparams):\n vocab = Vocabulary.get_default_vocab(not data_hparams.skip_at_symbol)\n # Create global step variable first.\n\n train_data, val_data, test_data = make_train_data(\n json.loads(FLAGS.dataset_spec), vocab, data_hparams, FLAGS.epochs)\n model = DiscoveryModel(data_hparams, hparams, vocab)\n train_outputs, _, _ = model.build_train_graph(train_data)\n seq_loss_op, train_op = model.build_train_loss(train_data, train_outputs)\n with tf.control_dependencies([\n val_data.initializer, test_data.initializer]):\n _, val_ctr_smile_op, val_sampled_smiles_op = model.build_val_net(val_data.get_next())\n model.build_test_net(val_ctr_smile_op, val_sampled_smiles_op, test_data.get_next())\n \n\n train_summary_ops = tf.summary.merge(tf.get_collection(\"train_summaries\"))\n val_summary_ops = tf.summary.merge(tf.get_collection(\"val_summaries\"))\n test_summary_ops = tf.summary.merge(tf.get_collection(\"test_summaries\"))\n\n stale_global_step_op = tf.train.get_or_create_global_step()\n with tf.train.MonitoredTrainingSession(\n checkpoint_dir=FLAGS.train_dir or None,\n save_checkpoint_steps=FLAGS.steps_per_checkpoint or None,\n log_step_count_steps=FLAGS.steps_per_checkpoint or None) as sess:\n if FLAGS.train_dir:\n summary_writer = tf.summary.FileWriterCache.get(FLAGS.train_dir)\n else:\n summary_writer = None\n # step = 0\n while not sess.should_stop():\n # while step < 10: \n # step += 1\n stale_global_step, seq_loss, _, train_summary = sess.run(\n [stale_global_step_op, seq_loss_op, train_op, train_summary_ops])\n if summary_writer is not None:\n summary_writer.add_summary(train_summary, stale_global_step)\n # Run validation and test.\n # Trigger test events.\n if stale_global_step % FLAGS.steps_per_checkpoint == 0:\n # if True:\n try:\n sess.run([val_data.initializer, test_data.initializer])\n _, _ = sess.run([val_summary_ops, test_summary_ops])\n # The monitored training session will pick up the summary \n # and automatically add them.\n except Exception as ex:\n logging.error(str(ex))\n raise\n except tf.errors.OutOfRangeError:\n logging.info(\"Test finished. Continue training.\")\n continue\n logging.info(\"Coordinator request to stop.\")\n\n\ndef main(_):\n \"\"\"Main train script.\"\"\"\n data_hparams = base_hparams.build_base_data_hparams()\n data_hparams.override_from_dict(json.loads(FLAGS.data_hparams))\n hparams = base_hparams.build_base_hparams()\n hparams.override_from_dict(json.loads(FLAGS.hparams))\n\n train(hparams, data_hparams)\n\n\nif __name__ == \"__main__\":\n sm.app.run()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"315363369","text":"\n######################################################################\n## Imports\n######################################################################\n\nimport matplotlib\nmatplotlib.use('TkAgg')\n\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nfrom matplotlib.backend_bases import key_press_handler\n\nimport sys\nif sys.version_info[0] < 3:\n from Tkinter import *\nelse:\n from tkinter import *\n\n######################################################################\n## PlotWindow\n######################################################################\n\nclass PlotWindow(Frame):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, parent, figure, plotType=None):\n \"\"\"\n Constructor\n\n @param parent:\n @param figure:\n @param plotType:\n @return\n \"\"\"\n Frame.__init__(self, parent)\n\n # a tk.DrawingAre\n self.plotType = plotType\n self.canvas = FigureCanvasTkAgg(figure, master=self)\n self.canvas.show()\n self.canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)\n self.toolbar = NavigationToolbar2TkAgg(self.canvas, self)\n self.toolbar.update()\n self.canvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=1)\n self.canvas.mpl_connect('key_press_event', self.on_key_event)\n\n def getPlotType(self):\n \"\"\"\n\n @return\n \"\"\"\n return self.plotType\n\n def updateFigure(self, figure):\n \"\"\"\n\n @param figure:\n @return\n \"\"\"\n\n self.canvas._tkcanvas.pack_forget()\n\n self.canvas = FigureCanvasTkAgg(figure, master=self)\n self.canvas.show()\n self.canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)\n\n self.toolbar = NavigationToolbar2TkAgg(self.canvas, self)\n self.toolbar.update()\n self.canvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=1)\n\n self.canvas.mpl_connect('key_press_event', self.on_key_event)\n\n def on_key_event(self, event):\n \"\"\"\n\n @param event:\n @return\n \"\"\"\n print('you pressed %s' % event.key)\n key_press_handler(event, self.canvas, self.toolbar)\n","sub_path":"PlotWindow.py","file_name":"PlotWindow.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"643827633","text":"import pygame\nimport os\nimport sys\nimport random\n\nfrom Board import new_Board\n\nfrom Frontend import fill\nfrom Frontend import Draw_pieces\nfrom Frontend import draw_board\nfrom Frontend import draw_big_pieces\n\nfrom Check_game import Check_horizontally, Check_vertically, Check_diagonals, Check_Big_Board, empty_cells_big_board, Check_empty_cells\nfrom Check_game import get_possible_moves\nfrom Check_game import Validate_box\nfrom Check_game import valid_locations, set_locations\nfrom Check_game import check_game\n\nfrom minimax import Minimax\n\n\n\npygame.font.init()\n\nWidth, Height = 810,810\nSquare = Width//3\nSmall_Square = Square//3\nmargin = Width//30\n\nWin = pygame.display.set_mode((Width, Height))\npygame.display.set_caption(\"Ultimate Tic Tac Toe\")\nclock = pygame.time.Clock()\n\nCross_small = pygame.transform.scale(pygame.image.load(os.path.join(\"Assets\", \"cross.png\")), (Small_Square, Small_Square))\nCross = pygame.transform.scale(pygame.image.load(os.path.join(\"Assets\", \"cross.png\")), (Square, Square))\n\nCircle_small = pygame.transform.scale(pygame.image.load(os.path.join(\"Assets\", \"circle.png\")), (Small_Square, Small_Square))\nCircle = pygame.transform.scale(pygame.image.load(os.path.join(\"Assets\", \"circle.png\")), (Square, Square))\n\nBg = (0,0,0)\nLines_color = (211,211,211)\nLines_color_2 = (250, 0, 0)\n\nGame_Board = new_Board()\n\n\ndef update_window(Win, Lines_color, Lines_color_2, Width, Square, Small_Square, margin, Small_Cross, Small_Circle, Cross, Circle,board,big_board, player):\n Win.fill(Bg)\n Draw_pieces(Win,Small_Cross, Small_Circle,Cross, Circle, Small_Square, Square, board)\n draw_board(Win, Lines_color, Lines_color_2,Width, Square, Small_Square, margin)\n draw_big_pieces(Win, big_board, Square, Circle, Cross)\n pygame.display.update()\n\n\ndef main():\n run = True\n turn = random.choice([-1,1])\n AI = 1\n Human = -1\n #Game_Board.test()\n\n FPS = 120\n green = (0,178,0,0)\n game_over = False\n good = False\n\n\n box = None\n\n main_board = Game_Board.create_board()\n small_boards = Game_Board.every_small_boards()\n\n while run:\n\n clock.tick(FPS)\n fill(Circle_small,green)\n fill(Circle, green)\n update_window(Win, Lines_color, Lines_color_2, Width, Square, Small_Square, margin, Cross_small, Circle_small, Cross, Circle, small_boards, main_board, turn)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n\n if event.type == pygame.KEYDOWN and game_over:\n if event.key == pygame.K_SPACE and game_over:\n Game_Board.reset(small_boards, main_board, game_over)\n game_over = False\n\n if event.type == pygame.MOUSEBUTTONDOWN and turn == Human and not game_over:\n if pygame.mouse.get_pressed()[0]:\n pos = pygame.mouse.get_pos()\n\n\n if set_locations(small_boards, main_board, pos[0]//(Small_Square), pos[1]//(Small_Square), turn, box):\n #print(\"pos\",pos[0]//(Small_Square), pos[1]//(Small_Square))\n check_game(small_boards, main_board,turn)\n new_box = get_possible_moves(small_boards,pos[0]//(Small_Square), pos[1]//(Small_Square))\n #print(\"box after get_possible_moves\", new_box)\n box = Validate_box(small_boards, main_board, new_box,pos[0]//(Small_Square), pos[1]//(Small_Square))\n #print(\"Box validated\", box)\n\n #print(\"small_boards\", small_boards)\n if Check_Big_Board(main_board, turn):\n game_over = True\n\n turn = AI\n if turn == AI:\n #print(\"in AI\")\n Depth = 3\n new_Board,value, pos = Minimax(small_boards, main_board,Depth, box,turn, True)\n #print(\"Board, value, pos\", new_Board,value, pos)\n small_boards = new_Board\n check_game(small_boards,main_board,turn)\n\n new_box = get_possible_moves(small_boards,pos[0], pos[1])\n box = Validate_box(small_boards, main_board, new_box,pos[0], pos[1])\n\n\n if Check_Big_Board(main_board, turn):\n game_over = True\n\n turn = Human\n\n\n\nmain()\n","sub_path":"Human Vs Minimax (Alpha Beta Pruning)/UTTT.py","file_name":"UTTT.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"29663728","text":"# Copyright 2016, 2017 Matteo Franchin\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nimport gobject\nimport gtk\nimport gtk.gdk\n\nfrom . import layout\nfrom . import icons\nfrom .thumbnailers import build_empty_thumbnail\nfrom .orchestrator import Orchestrator, THUMBNAIL_DONE\nfrom .backcaller import BackCaller\nfrom .file_utils import FileList\nfrom .config import logger, INT2, COLOR\n\n\nclass Location(object):\n def __init__(self, full_path, y_pos=0):\n self.path = os.path.realpath(full_path)\n self.y = y_pos\n\n\nclass ImageBrowser(gtk.DrawingArea, BackCaller):\n __gsignals__ = \\\n {'set-scroll-adjustment': (gobject.SIGNAL_RUN_LAST,\n gobject.TYPE_NONE,\n (gtk.Adjustment, gtk.Adjustment))}\n\n def __init__(self, start_dir, hadjustment=None, vadjustment=None,\n config=None):\n BackCaller.__init__(self,\n directory_changed=None,\n image_clicked=None)\n gtk.DrawingArea.__init__(self)\n\n assert config is not None\n self._config = config\n config.override('thumb.final_size', self._thumb_final_size_getter)\n\n # Directory this object is browsing.\n self.location = Location(start_dir)\n self.file_list = None\n self.album = None\n self.previous_locations = []\n self.next_locations = []\n\n # Adjustment objects to control the scrolling.\n self._hadjustment = hadjustment\n self._vadjustment = vadjustment\n\n # Signal handlers for the value-changed signal of the two adjustment\n # objects used for the horizontal and vertical scrollbars\n self._hadj_valchanged_handler = None\n self._vadj_valchanged_handler = None\n\n self.orchestrator = Orchestrator()\n self.orchestrator.set_callback('thumbnail_available',\n self.on_thumbnail_available)\n\n self.last_tooltip_shown = None\n self.props.has_tooltip = True\n\n # Allow the object to receive scroll events and other events.\n mask = (gtk.gdk.POINTER_MOTION_MASK |\n gtk.gdk.BUTTON_PRESS_MASK |\n gtk.gdk.BUTTON_RELEASE_MASK)\n self.add_events(mask)\n\n self.connect('expose_event', self.on_expose_event)\n self.connect('set-scroll-adjustment', ImageBrowser.scroll_adjustment)\n self.connect('configure-event', self.on_size_change)\n self.connect('button-press-event', self.on_button_press_event)\n self.connect('query-tooltip', self.on_query_tooltip)\n\n def _thumb_final_size_getter(self, parent=None, attr_name=None):\n cfg = self._config\n min_x, min_y = cfg.get('thumb.min_abs_size', (32, 32), INT2)\n scr_x, scr_y = cfg.get('screen.size', (1366, 768), INT2)\n scale_exp = cfg.get('thumb.scale_exp', 0.0, float)\n scale_base = cfg.get('thumb.scale_base', 1.25, float)\n scale_base_adjusted = max(1.05, min(2.0, scale_base))\n scale = scale_base_adjusted**scale_exp\n thumb_x, thumb_y = cfg.get('thumb.size')\n return (int(max(min_x, min(scr_x // 2, thumb_x*scale))),\n int(max(min_y, min(scr_y // 2, thumb_y*scale))))\n\n def _get_hadjustment(self):\n return self._hadjustment\n\n def _get_vadjustment(self):\n return self._vadjustment\n\n def _set_hadjustment(self, adjustment):\n self._hadjustment = adjustment\n\n def _set_vadjustment(self, adjustment):\n self._vadjustment = adjustment\n\n hadjustment = property(_get_hadjustment, _set_hadjustment)\n vadjustment = property(_get_vadjustment, _set_vadjustment)\n\n def _lay_out_album(self, width=None):\n if width is None:\n width, _ = self.window.get_size()\n\n # Create the file list based on the current configuration.\n cfg = self._config\n self.file_list = file_list = \\\n FileList(\n self.location.path,\n show_hidden_files=cfg.get('browser.show_hidden_files', True),\n reversed_sort=cfg.get('browser.reversed_sort', False),\n sort_type=cfg.get('browser.sort_type', FileList.SORT_BY_MOD_DATE)\n )\n\n self.album = layout.ImageAlbum(file_list,\n max_width=width,\n max_size=cfg.get('thumb.final_size'),\n border=cfg.get('thumb.border', (5, 5)))\n\n def scroll_adjustment(self, hadjustment, vadjustment):\n self._hadjustment = hadjustment\n self._vadjustment = vadjustment\n if isinstance(hadjustment, gtk.Adjustment):\n self._hadj_valchanged_handler = \\\n hadjustment.connect(\"value-changed\", self._adjustments_changed)\n if isinstance(vadjustment, gtk.Adjustment):\n self._vadj_valchanged_handler = \\\n vadjustment.connect(\"value-changed\", self._adjustments_changed)\n self._update_scrollbars()\n\n def _set_y_location(self):\n value = self._vadjustment.get_value()\n upper = self._vadjustment.get_upper()\n self.location.y = (value / float(upper) if upper != 0 else 0)\n\n def _get_y_location(self):\n return int(self.location.y * self._vadjustment.get_upper())\n\n def _adjustments_changed(self, adjustment):\n va = self._vadjustment\n self._set_y_location()\n self.queue_draw()\n\n def _update_scrollbars(self):\n '''(internal) Update the ranges and positions of the scrollbars.'''\n\n ha = self._hadjustment\n va = self._vadjustment\n\n # For now we do not need an horizontal bar.\n ha.lower = 0.0\n ha.upper = 1.0\n ha.value = 0.0\n ha.page_size = 1.0\n ha.page_increment = 0.0\n ha.step_increment = 0.05\n\n if self.album is None:\n va.lower = 0.0\n va.upper = 1.0\n va.value = 0.0\n va.page_size = 1.0\n va.page_increment = 0.0\n va.step_increment = 0.05\n return\n\n if self.window is None:\n window_height = 100\n va.value = 0.0\n else:\n window_size = self.window.get_size()\n window_height = window_size[1]\n\n # When resizing the window we want to make sure we view roughly the\n # same images we saw before the resize.\n old_album_height = va.upper\n new_album_height = self.album.get_height() + 5\n relative_pos = va.value / float(old_album_height)\n new_value = relative_pos * new_album_height\n\n # Set the new view, making sure it is within the interval.\n va.lower = 0.0\n va.upper = new_album_height\n va.page_size = window_height\n va.page_increment = 0.9*window_height\n va.step_increment = 0.3*window_height\n va.value = max(0, min(new_value, new_album_height - window_height))\n\n def get_thumbnail_pixbuf(self, thumbnail):\n '''Get the pixbuf (possibly from the cache) for the given Thumbnail\n object.\n '''\n\n file_item = thumbnail.get_file_item()\n if not thumbnail.damaged:\n tn = self.orchestrator.request_thumbnail(file_item.full_path,\n thumbnail.size)\n if tn.state is THUMBNAIL_DONE:\n return \\\n gtk.gdk.pixbuf_new_from_array(tn.data,\n gtk.gdk.COLORSPACE_RGB, 8)\n text = 'Loading...\\n' + os.path.basename(file_item.name)\n icon_color = self._config.get_color_triple('thumb.color.loading',\n '#ff0000')\n else:\n text = os.path.basename(file_item.name) + '\\n(Damaged)'\n icon_color = self._config.get_color_triple('thumb.color.damaged',\n '#a00000')\n\n return icons.generate_text_icon(text, thumbnail.size, cache=True,\n color=icon_color,\n out_format=icons.FORMAT_PIXBUF)\n return build_empty_thumbnail(thumbnail.size)\n\n def on_thumbnail_available(self, *args):\n '''Called by the orchestrator when thumbnails become available.'''\n\n # As this function is called by a separate thread, we take the lock.\n with gtk.gdk.lock:\n self.queue_draw()\n\n def on_expose_event(self, draw_area, event):\n '''Function responsible for the rendering of the widget.'''\n\n dy = self._get_y_location()\n ea = event.area\n\n # Compute viewport coordinates.\n vp_x0, vp_y0 = (ea.x, ea.y + dy)\n vp_width, vp_height = (ea.width, ea.height)\n vp_x1 = vp_x0 + vp_width\n vp_y1 = vp_y0 + vp_height\n\n for tn in self.album.get_thumbnails(vp_x0, vp_y0, vp_width, vp_height):\n x, y = tn.pos\n pixbuf = self.get_thumbnail_pixbuf(tn)\n x0 = max(x, vp_x0)\n y0 = max(y, vp_y0)\n x1 = min(x + pixbuf.get_width(), vp_x1)\n y1 = min(y + pixbuf.get_height(), vp_y1)\n sx = x1 - x0\n sy = y1 - y0\n if sx <= 0 or sy <= 0:\n logger.debug('Error: sx={}, sy={}'.format(sx, sy))\n continue\n buf_area = pixbuf.subpixbuf(x0 - x, y0 - y, sx, sy)\n rowstride = buf_area.get_rowstride()\n pixels = buf_area.get_pixels()\n self.window.draw_rgb_image(self.style.black_gc,\n x0, y0 - dy, sx, sy,\n 'normal', pixels, rowstride,\n x0, y0 - dy)\n return True\n\n def on_size_change(self, myself, event):\n '''Called when the size of the object changes.'''\n self._lay_out_album(event.width)\n self._update_scrollbars()\n self.orchestrator.clear_queue()\n\n def on_button_press_event(self, eventbox, event):\n x, y = event.get_coords()\n y += self._get_y_location()\n thumbnail = self.album.find_thumbnail_at_pos((x, y))\n if thumbnail is None:\n return\n file_item = thumbnail.get_file_item()\n self.call('image_clicked', self.file_list, file_item)\n if file_item.is_dir:\n self.previous_locations.append(self.location)\n self.next_locations = []\n self._change_directory(Location(file_item.full_path))\n\n def on_query_tooltip(self, widget, x, y, keyboard_tip, tooltip):\n '''Called before rendering the tooltip.'''\n\n y += self._get_y_location()\n thumbnail = self.album.find_thumbnail_at_pos((x, y))\n if thumbnail is None:\n return False\n\n file_item = thumbnail.get_file_item()\n if (self.last_tooltip_shown is not None and\n self.last_tooltip_shown != file_item.full_path):\n self.last_tooltip_shown = None\n return False\n\n self.last_tooltip_shown = file_item.full_path\n tooltip.set_text(file_item.name)\n return True\n\n def has_next_directory(self):\n '''Whether the list of next directories contains any elements.'''\n\n return len(self.next_locations) > 0\n\n def has_previous_directory(self):\n '''Whether the list of previous directories contains any elements.'''\n\n return len(self.previous_locations) > 0\n\n def go_to_next_directory(self):\n '''Go to the next directory, undoing the effect of\n go_to_previous_directory().'''\n\n if len(self.next_locations) > 0:\n self.previous_locations.append(self.location)\n self._change_directory(self.next_locations.pop())\n\n def go_to_previous_directory(self):\n '''Return back to the previous directory.'''\n\n if len(self.previous_locations) > 0:\n self.next_locations.append(self.location)\n self._change_directory(self.previous_locations.pop())\n\n def go_to_parent_directory(self):\n '''Add the current location to the previous list and go to the parent\n directory.\n '''\n\n parent_directory = os.path.join(self.location.path, os.path.pardir)\n self.next_locations = []\n self.previous_locations.append(self.location)\n self._change_directory(Location(parent_directory))\n\n def go_to_directory(self, location):\n '''Go to a new directory.'''\n\n self.next_locations = []\n self.previous_locations.append(self.location)\n self._change_directory(Location(location))\n\n def _change_directory(self, location):\n '''Internal function. Change directory without updating the previous\n and next lists.\n '''\n\n # Abort current rendering jobs. The user won't necessarily come back to\n # the parent directory and it may be too time consuming to generate all\n # the thumbnails in there.\n self.orchestrator.clear_queue()\n\n if not isinstance(location, Location):\n location = Location(location)\n\n self.location = location\n self._vadjustment.value = self._get_y_location()\n self._lay_out_album()\n self._update_scrollbars()\n self.call('directory_changed', location.path)\n self.queue_draw()\n\n def update_album(self):\n '''Redraw the browser view.'''\n self._lay_out_album()\n self._update_scrollbars()\n self.queue_draw()\n\n def zoom(self, new_exp, relative=True):\n '''Change the scale exponent used to scale thumbnails.'''\n cfg = self._config\n prev_scale_exp = cfg.get('thumb.scale_exp', 0.0, float)\n scale_exp = (prev_scale_exp if relative else 0.0) + new_exp\n prev_size = cfg.get('thumb.final_size')\n cfg.set('thumb.scale_exp', scale_exp)\n new_size = cfg.get('thumb.final_size')\n if new_size[0] == prev_size[0] or new_size[1] == prev_size[1]:\n # Scale had no effect: maybe scale is too small or too big.\n # Restore previous scale exponent and quit.\n cfg.set('thumb.scale_exp', prev_scale_exp)\n return\n self.update_album()\n\nImageBrowser.set_set_scroll_adjustments_signal('set-scroll-adjustment')\n","sub_path":"src/image_browser.py","file_name":"image_browser.py","file_ext":"py","file_size_in_byte":14736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"589146049","text":"# import tkSimpleDialog\nfrom tkinter import *\nfrom tkinter import simpledialog as dialog\nfrom tkinter import messagebox as mbox\n\ndef dosomething(stuff):\n print(stuff)\n\nclass NewGroceryItemDialog(dialog.Dialog):\n\n def body(self, master):\n\n Label(master, text=\"Grocery item:\").grid(row=0, sticky=W)\n Label(master, text=\"Quantity:\").grid(row=1, sticky=W)\n\n self.e1 = Entry(master)\n self.e2 = Entry(master)\n\n self.e1.grid(row=0, column=1)\n self.e2.grid(row=1, column=1)\n self.resizable(width=False, height=False)\n def validate(self):\n try:\n second = int(self.e2.get())\n self.result = self.e1.get(), second\n if second < 0:\n raise ValueError()\n if not self.e1.get():\n raise TypeError\n return 1\n except ValueError:\n mbox.showerror(message=\"Please specify a positive number!\")\n return 0\n except TypeError:\n mbox.showerror(message=\"Please specify a grocery item!\")\n return 0\n\n def apply(self):\n return self.result\n","sub_path":"new_grocery_item_dialog.py","file_name":"new_grocery_item_dialog.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"124253510","text":"#!/usr/bin/python3.6\n\n## import packages\nimport os\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"1\" \n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nimport tensorflow as tf\nconfig = tf.compat.v1.ConfigProto(allow_soft_placement=True)\nconfig.gpu_options.allow_growth = True\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.99\nsess = tf.compat.v1.Session(config=config)\n\nimport sys\nsys.path.append(\"../Orca/preprocessing\")\nfrom aspectawarepreprocessor import AspectAwarePreprocessor\nfrom simplepreprocessor import SimplePreprocessor\nfrom meanpreprocessor import MeanPreprocessor\nfrom patchpreprocessor import PatchPreprocessor\nfrom imagetoarraypreprocessor import ImageToArrayPreprocessor\nsys.path.append(\"../Orca/callbacks\")\nfrom trainingmonitor import TrainingMonitor\nsys.path.append(\"../Orca/io\")\nfrom hdf5datasetgenerator import HDF5DatasetGenerator\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import SGD, RMSprop, Adam\nfrom keras.applications import VGG16, ResNet50\nfrom keras.models import Model # important!\nfrom keras.layers import Input, Dense, GlobalAveragePooling2D, Dropout, \\\n BatchNormalization, Activation\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.utils import plot_model\n\nfrom imutils import paths\n#from config import train_resnet50_config as cfg\nimport argparse\nimport json\nimport numpy as np\n\n\n## cache variables\nNUM_CLASSES = 2\nTRAIN_HDF5 = \"./data/train.hdf5\"\nTRAINVAL_HDF5 = \"./data/trainval.hdf5\"\nVAL_HDF5 = \"./data/val.hdf5\"\nTEST_HDF5 = \"./data/test.hdf5\"\nDATASET_MEAN = \"./output/dogs_vs_cats_mean.json\"\n\n#MODEL_PATH = \"./output/round3_resnet50.model\"\nOUTPUT_PATH = \"./output/test6_aspect_meansub_imgtoarr_version2aug\"\n\nBATCH_SIZE = 128\nEPOCHS = 5\n\n\n## initiate image preprocessors\nsp = SimplePreprocessor(224, 224)\npp = PatchPreprocessor(224, 224)\niap = ImageToArrayPreprocessor()\naap = AspectAwarePreprocessor(224, 224)\n\n# load in RGB mean values of training set\ntrainmeans = json.loads(open(\"./output/dogs_vs_cats_mean.json\").read())\nmp = MeanPreprocessor(trainmeans[\"R\"], trainmeans[\"G\"], trainmeans[\"B\"])\n\n# initiate HDF5DataGenerator for trainset, trainvalset\n\n# version 1\n#aug = ImageDataGenerator(\n# rotation_range=20,\n# zoom_range=0.15,\n# width_shift_range=0.2,\n# height_shift_range=0.2,\n# shear_range=0.15,\n# horizontal_flip=True,\n# fill_mode=\"nearest\",\n# )\n#\n\n# version 2\naug = ImageDataGenerator(\n rotation_range=20,\n zoom_range=0.05,\n width_shift_range=0.05,\n height_shift_range=0.05,\n shear_range=0.05,\n horizontal_flip=True,\n fill_mode=\"nearest\",\n )\n\ntrainGen = HDF5DatasetGenerator(\n TRAIN_HDF5, \n BATCH_SIZE,\n # extract Patch => want to learn discrimitive patterns\n # substract mean, convert to keras array\n #preprocessors=[pp, mp, iap],\n #preprocessors=[pp, iap],\n #preprocessors=[sp, iap],\n #preprocessors=[sp, mp, iap],\n preprocessors=[aap, mp, iap],\n aug=aug,\n classes=2,\n )\n\n# initiate data augmentor for trainval set\ntrainvalGen = HDF5DatasetGenerator(\n TRAINVAL_HDF5, \n BATCH_SIZE,\n # RESIZE the org image => validate/test on the whole image\n # substract mean, convert to keras array\n #preprocessors=[pp, mp, iap],\n #preprocessors=[pp, iap],\n #preprocessors=[sp, iap],\n preprocessors=[aap, mp, iap],\n classes=2,\n )\n\n\n## perform model surgery, load ResNet50 network without head layers, explicitly define input_tensor\nx = Input(shape=(224, 224, 3))\nbackbone = ResNet50(weights=\"imagenet\", include_top=False, input_tensor=x) \n# FREEZE the backbone & train the new head layers for 10-30 epochs\nfor layer in backbone.layers:\n layer.trainable = False\n\nhead = backbone.output\nhead = BatchNormalization(axis=-1)(head)\nhead = GlobalAveragePooling2D()(head)\nhead = Dense(NUM_CLASSES, activation=\"softmax\")(head)\nmodel = Model(inputs=backbone.input, outputs=head)\n\nprint(\"[INFO] plot model architecture...\")\narch_path = os.path.join(OUTPUT_PATH, \"resnet50_new_head.png\")\nplot_model(model, to_file=arch_path, show_shapes=True)\n\n\n## initalize an optimizer & compile model & train NN\nprint(\"[INFO] compiling model for new head layers warm-up...\")\n#opt = RMSprop(lr=0.001)\nopt = Adam(lr=0.001)\nmodel.compile(loss=\"binary_crossentropy\", optimizer=opt, metrics=[\"accuracy\"])\n\n# build callbacks\nfigpath = os.path.sep.join([OUTPUT_PATH,\n \"{}_learning_curve.png\".format(os.getpid())])\ntm = TrainingMonitor(figpath)\n\ncptpath = os.path.sep.join([OUTPUT_PATH,\n \"model-resnet50_new_head-{epoch:03d}-{val_loss:0.4f}-\" + str(os.getpid()) + \".hdf5\"])\nckpt = ModelCheckpoint(cptpath, monitor=\"val_loss\", mode=\"min\", \\\n save_best_only=True, verbose=1)\n\n# train \nmodel.fit_generator(\n trainGen.generator(),\n steps_per_epoch=trainGen.numImages // BATCH_SIZE + 1,\n validation_data=trainvalGen.generator(),\n validation_steps=trainvalGen.numImages // BATCH_SIZE + 1, \n epochs=EPOCHS,\n max_queue_size=BATCH_SIZE,\n callbacks=[tm, ckpt],\n verbose=1)\n\n\n# close HDFDatasetGenerator\ntrainGen.close()\ntrainvalGen.close()\n\nprint(\"[INFO] Done!\")\n\n\n\n\n\"\"\"just in case need to unfreeze deeper layers!\"\"\"\n\n## UNfreeze the CONV layers NEAR the new head & train to localize the weights\n#print(\"[INFO] fine tune the model in %d stages\" % len(args[\"fine_tune_layers\"]))\n#for i, num in enumerate(args[\"fine_tune_layers\"]):\n# # unfree layers\n# index = int(num)\n# for layer in model.layers[index:]:\n# layer.trainable = True\n#\n# print(\"\\n[INFO] stage %d/%d, unfreez layers from %d & recompile model...\" %\n# (i + 1, len(args[\"fine_tune_layers\"]), index))\n# sgd = SGD(lr=0.001)\n# model.compile(loss=\"categorical_crossentropy\", optimizer=sgd, metrics=[\"accuracy\"])\n#\n# print(\"[INFO] fine tuning the model...\")\n# model.fit_generator(aug.flow(trainX, trainY, batch_size=32), \\\n# validation_data=(testX, testY), \\\n# steps_per_epoch=len(trainX) // 32, \\\n# epochs=50, verbose=1)\n# \n# print(\"[INFO] evaluating...\")\n# predictions = model.predict(testX, batch_size=32)\n# print(classification_report(predictions.argmax(axis=1), testY.argmax(axis=1), \\\n# target_names=classNames))\n# \n# # save current model\n# outputName = \"fine_tune_stage_%d_from_layer_%d.hdf5\" % (i + 1, index)\n# filepath = os.path.join(args[\"output_dir\"], outputName)\n# model.save(filepath)\n# pass\n#\n#\n#\n","sub_path":"train_resnet50.py","file_name":"train_resnet50.py","file_ext":"py","file_size_in_byte":6555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"298499375","text":"#-*- coding: utf-8 -*-\nimport sys\nimport os\nimport pprint\n\nimport internetarchive\n\ndef _search_collection(collection_name):\n \"\"\" Searches the internet archive for the specified collection.\n if no items are found for the collection it returns None otherwise\n the Search object is returned.\n \"\"\"\n collection = internetarchive.search_items('collection:{}'.format(collection_name))\n if collection.num_found == 0:\n return None\n else:\n return collection\n\ndef _get_item_data(item):\n data = internetarchive.get_item(item.get('identifier'))\n original_size = 0\n total_size = 0\n for f in data.files:\n total_size += int(f.get('size',0))\n if f['source'] == 'original':\n if f['name'].endswith('_files.xml'):\n xml_file = data.get_file(data.identifier + '_files.xml')\n xml_file.download()\n size = os.path.getsize(data.identifier + '_files.xml')\n os.remove(data.identifier + '_files.xml')\n original_size += size\n total_size += size\n else:\n original_size += int(f.get('size',0))\n\n return total_size,original_size\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print('Missing parameter: collection name')\n sys.exit(-1)\n\n collection = sys.argv[1]\n print('Getting data for the collection {}'.format(collection))\n\n collection_data = _search_collection(collection)\n if not collection:\n print('No collection {} found'.format(collection))\n sys.exit(-1)\n else:\n num_items = collection_data.num_found\n print('Found {} items in the collection {}'.format(num_items, collection))\n proccessed = 1\n # Note the internetarchive library does a http request for each item so this\n # could take some time.\n collection_original_size = 0\n collection_total_size = 0\n for item in collection_data:\n print('[{}/{}] Processing item {}'.format(proccessed, num_items, item['identifier']))\n proccessed += 1\n t,o = _get_item_data(item)\n collection_original_size += o\n collection_total_size += t\n print('Total collection file size: {}'.format(collection_total_size))\n print('Total collection original file size: {}'.format(collection_original_size))\n","sub_path":"dockerized-gists/f77f094032110a7b51e7/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"604449034","text":"import numpy as np\nimport theano.tensor as T\nimport theano\nfloatX = theano.config.floatX\n\nX = T.matrix('X') # X (1~10)\nw = T.vector('w') # theta\ny = T.vector('y') # y (2.7, 4.1, 2.8 ...)\n\ncost_func = T.mean( (y - T.dot(X, T.reshape(w, (-1,1))) )**2 )\n\ndydx = T.grad(cost_func, w)\n\ncalc_output = theano.function([X, y, w], [cost_func, dydx])\n\nX_data = np.array([[1, 1], [1, 2], [1, 3], [1, 4], [1, 5]\n\t, [1, 6], [1, 7], [1, 8], [1, 9], [1, 10]], dtype=floatX)\n\ny_data = np.array([2.7, 4.1, 2.8, 4.5, 5.2, 6.1, 4.7, 8.0, 7.0, 6.2], dtype=floatX)\n\nlearning_rate = 0.01 # constant variable\ntheta = [0, 1] # initial theta\n\n\nfor i in xrange(100):\n\t[cost, tGrad] = calc_output(X_data, y_data, theta)\n\ttheta = theta - learning_rate * tGrad\n\tprint( \"%dtimes, cost : %f\" %(i+1, cost) )\n\tprint(theta)","sub_path":"linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"613999079","text":"# -*- coding: utf-8 -*-\n# @Time  : 19-4-22 下午4:06\n# @Author : gjj\n# @contact : adau22@163.com ============================\n# my github:https://github.com/TerYang/ ===\n# copy from network                     ===\n# good good study,day day up!! ===\n# ======================================================\nimport os, gzip, torch,argparse\nimport torch.nn as nn\nimport numpy as np\n# import scipy.misc\n# import imageio\nimport matplotlib.pyplot as plt\nfrom torchvision import datasets, transforms\nimport torch\nINPUT_SIZE = 48\n\n# def parse_args():\n# \"\"\"parsing and configuration\"\"\"\n# desc = \"Pytorch implementation of GAN collections\"\n# parser = argparse.ArgumentParser(description=desc)\n#\n# # parser.add_argument('--gan_type', type=str, default='None',#'ACGAN',#'BEGAN',#'GAN',#'LSGAN',#default='GAN',\n# # choices=['GAN', 'CGAN', 'infoGAN', 'ACGAN', 'EBGAN', 'BEGAN', 'WGAN', 'WGAN_GP', 'DRAGAN', 'LSGAN'],\n# # help='The type of GAN')\n# # parser.add_argument('--dataset', type=str, default='mnist', choices=['mnist', 'fashion-mnist', 'cifar10', 'cifar100', 'svhn', 'stl10', 'lsun-bed'],\n# # help='The name of dataset')\n#\n# parser.add_argument('--split', type=str, default='', help='The split flag for svhn and stl10')\n# parser.add_argument('--epoch', type=int, default=5, help='The number of epochs to run')\n# parser.add_argument('--batch_size', type=int, default=64, help='The size of batch')\n# parser.add_argument('--input_size', type=int, default=INPUT_SIZE, help='The size of input image')\n# # parser.add_argument('--save_dir', type=str, default='models',\n# # help='Directory name to save the model')\n# parser.add_argument('--save_dir', type=str, default='repeat_lab', help='Directory name to save the model')\n# parser.add_argument('--result_dir', type=str, default='results', help='Directory name to save the generated images')\n# parser.add_argument('--log_dir', type=str, default='logs', help='Directory name to save training logs')\n# parser.add_argument('--lrG', type=float, default=0.0002)\n# parser.add_argument('--lrD', type=float, default=0.0002)\n# parser.add_argument('--beta1', type=float, default=0.05)\n# parser.add_argument('--beta2', type=float, default=0.999)\n# # parser.add_argument('--gpu_mode', type=bool, default=True)\n# parser.add_argument('--gpu_mode', type=bool, default=True)\n# parser.add_argument('--benchmark_mode', type=bool, default=False)\n#\n# return check_args(parser.parse_args())\n#\n#\n# def check_args(args):\n# \"\"\"checking arguments\"\"\"\n# # --save_dir\n# if not os.path.exists(args.save_dir):\n# # os.makedirs(os.path.join(os.getcwd(),args.save_dir))\n# os.makedirs(args.save_dir)\n#\n# # --result_dir\n# if not os.path.exists(args.result_dir):\n# os.makedirs(args.result_dir)\n#\n# # --result_dir\n# if not os.path.exists(args.log_dir):\n# os.makedirs(args.log_dir)\n#\n# # --epoch\n# try:\n# assert args.epoch >= 1\n# except:\n# print('number of epochs must be larger than or equal to one')\n#\n# # --batch_size\n# try:\n# assert args.batch_size >= 1\n# except:\n# print('batch size must be larger than or equal to one')\n#\n# return args\n\ndef adjust_learning_rate(optimizer, epoch, val, lr):\n '''\n fun:Sets the learning rate to the initial LR decayed by 10 every val epochs\n :param optimizer: 优化器\n :param epoch: 当前epoch\n :param val: epoch 设置间隔\n :param lr: 初始学习率\n :return: None\n '''\n lr *= 0.1 ** (epoch // val)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef load_mnist(dataset):\n data_dir = os.path.join(\"./data\", dataset)\n\n def extract_data(filename, num_data, head_size, data_size):\n with gzip.open(filename) as bytestream:\n bytestream.read(head_size)\n buf = bytestream.read(data_size * num_data)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float)\n return data\n\n data = extract_data(data_dir + '/train-images-idx3-ubyte.gz', 60000, 16, 28 * 28)\n trX = data.reshape((60000, 28, 28, 1))\n\n data = extract_data(data_dir + '/train-labels-idx1-ubyte.gz', 60000, 8, 1)\n trY = data.reshape((60000))\n\n data = extract_data(data_dir + '/t10k-images-idx3-ubyte.gz', 10000, 16, 28 * 28)\n teX = data.reshape((10000, 28, 28, 1))\n\n data = extract_data(data_dir + '/t10k-labels-idx1-ubyte.gz', 10000, 8, 1)\n teY = data.reshape((10000))\n\n trY = np.asarray(trY).astype(np.int)\n teY = np.asarray(teY)\n\n X = np.concatenate((trX, teX), axis=0)\n y = np.concatenate((trY, teY), axis=0).astype(np.int)\n\n seed = 547\n np.random.seed(seed)\n np.random.shuffle(X)\n np.random.seed(seed)\n np.random.shuffle(y)\n\n y_vec = np.zeros((len(y), 10), dtype=np.float)\n for i, label in enumerate(y):\n y_vec[i, y[i]] = 1\n\n X = X.transpose(0, 3, 1, 2) / 255.\n # y_vec = y_vec.transpose(0, 3, 1, 2)\n\n X = torch.from_numpy(X).type(torch.FloatTensor)\n y_vec = torch.from_numpy(y_vec).type(torch.FloatTensor)\n return X, y_vec\n\ndef load_celebA(dir, transform, batch_size, shuffle):\n # transform = transforms.Compose([\n # transforms.CenterCrop(160),\n # transform.Scale(64),\n # transforms.ToTensor(),\n # transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n # ])\n\n # data_dir = 'data/celebA' # this path depends on your computer\n dset = datasets.ImageFolder(dir, transform)\n data_loader = torch.utils.data.DataLoader(dset, batch_size, shuffle)\n\n return data_loader\n\ndef print_network(net):\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n # print(net)\n print('Total number of parameters: %d' % num_params)\n#\n# def save_images(images, size, image_path):\n# return imsave(images, size, image_path)\n#\n# def imsave(images, size, path):\n# image = np.squeeze(merge(images, size))\n# return scipy.misc.imsave(path, image)\n#\n# def merge(images, size):\n# h, w = images.shape[1], images.shape[2]\n# if (images.shape[3] in (3,4)):\n# c = images.shape[3]\n# img = np.zeros((h * size[0], w * size[1], c))\n# for idx, image in enumerate(images):\n# i = idx % size[1]\n# j = idx // size[1]\n# img[j * h:j * h + h, i * w:i * w + w, :] = image\n# return img\n# elif images.shape[3]==1:\n# img = np.zeros((h * size[0], w * size[1]))\n# for idx, image in enumerate(images):\n# i = idx % size[1]\n# j = idx // size[1]\n# img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]\n# return img\n# else:\n# raise ValueError('in merge(images,size) images parameter ''must have dimensions: HxW or HxWx3 or HxWx4')\n#\n# def generate_animation(path, num):\n# images = []\n# for e in range(num):\n# img_name = path + '_epoch%03d' % (e+1) + '.png'\n# images.append(imageio.imread(img_name))\n# imageio.mimsave(path + '_generate_animation.gif', images, fps=5)\n\ndef loss_plot(hist, path = 'Train_hist.png', model_name = ''):\n # x = range(len(hist['D_loss']))\n flag = 0\n try:\n x = range(1,len(hist['per_epoch_loss'])+1)\n except:\n try:\n x = range(1,len(hist['per_epoch_G_loss'])+1)\n flag = 1\n except:\n try:\n x = range(1, len(hist['G_loss'] + 1))\n flag = 2\n except:\n print('could not draw loss curve!!')\n if flag==1:\n try:\n y1 = hist['per_epoch_D_loss']\n plt.plot(x, y1, label='D_loss')\n y2 = hist['per_epoch_G_loss']\n plt.plot(x, y2, label='G_loss')\n except:\n pass\n elif flag == 2:\n try:\n y1 = hist['D_loss']\n plt.plot(x, y1, label='D_loss')\n y2 = hist['G_loss']\n plt.plot(x, y2, label='G_loss')\n except:\n pass\n else:\n y1 = hist['per_epoch_loss']\n plt.plot(x, y1, label='D_loss')\n\n # plt.xlabel('Iter')\n plt.xlabel('epoch')\n plt.ylabel('Loss')\n\n # plt.legend(loc=4)\n plt.legend(loc='best')\n plt.grid(True)\n plt.tight_layout()\n\n path = os.path.join(path, model_name + '_loss.png')\n\n plt.savefig(path)\n plt.show()\n plt.close()\n\ndef initialize_weights(net):\n for m in net.modules():\n if isinstance(m, nn.Conv2d):\n m.weight.data.normal_(0, 0.02)\n m.bias.data.zero_()\n elif isinstance(m, nn.ConvTranspose2d):\n m.weight.data.normal_(0, 0.02)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.02)\n m.bias.data.zero_()\n\ndef load_interval(self,epoch):\n save_dir = os.path.join(self.save_dir, self.dataset, self.model_name)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n # 保存模型\n torch.save(self.G, os.path.join(save_dir, self.model_name + '_{}_G.pkl'.format(epoch)))#dictionary ['bias', 'weight']\n torch.save(self.D, os.path.join(save_dir, self.model_name + '_{}_D.pkl'.format(epoch)))\n\ndef f2(l,r):\n import math\n if l == 0:\n if math.fabs(l - r) < 0.5:\n # TN\n return 0\n else:\n # FP\n return 3\n else:\n if math.fabs(l - r) < 0.5:\n # TP\n return 1\n else:\n # FN\n return 2\n\ndef validate(model,data_loader=None,data=None,label=None):#,mark='validate'\n '''\n validate at the same time as training\n func:select data and label or data_loader\n :return:\n '''\n import math\n f1 = lambda l,r:1 if math.fabs(l-r)<0.5 else 0\n model.eval()\n # 为测试 test mix data with normal and attack\n if data_loader is not None:\n y_pre_ = []\n all_l_ = []\n TP = 0\n FN = 0\n\n TN = 0\n FP = 0\n res = {}\n count = 0\n # 默认带标签的验证集数据都传入data_loader,传入data 就可以\n for iter, (x_,l_)in enumerate(data_loader):\n # 带标签\n if iter == data_loader.dataset.__len__() // 64:\n break\n x_ = x_.cuda()\n try:\n D_real,_ = model(x_)\n except:\n D_real = model(x_)\n l_ =np.squeeze(l_.data.numpy()).tolist()\n D_real = np.squeeze(D_real.data.cpu().numpy()).tolist()\n if iter == 0:\n y_pre_ = D_real\n all_l_ = l_\n else:\n y_pre_.extend(D_real)\n all_l_.extend(l_)\n ll = list(map(f2,l_,D_real))\n TP += ll.count(1)\n FN += ll.count(2)\n TN += ll.count(0)\n FP += ll.count(3)\n count += len(l_)\n try:\n # res['pre']='{}'.format(TP/(FP+TP))\n res['pre'] = TP / (FP + TP)\n except ZeroDivisionError:\n res['pre'] = 'NA'\n # 2 precision of negative\n try:\n res['N_pre'] = TN / (TN + FN)\n # res['N_pre']='{}'.format(TN/(TN+FN))\n except ZeroDivisionError:\n # writelog('have no P(normaly event)',file)\n res['N_pre'] = 'NA'\n # # 3 false positive rate,index of ROC , 误报 (Type I error).\n # try:\n # # res['FPR']='{}'.format(FP/(FP+TN))\n # res['FPR'] = FP / (FP + TN)\n # except ZeroDivisionError:\n # res['FPR'] = 'NA'\n # 4 true positive rate,index of ROC\n # try:\n # # res['TPR'] ='{}'.format(TP/(TP+FN))\n # res['TPR'] = TP / (TP + FN)\n # except ZeroDivisionError:\n # # writelog('have no P(normaly event)',file)\n # res['TPR'] = 'NA'\n # 5 accurate\n try:\n # res['acc'] = (TP+NN)/len(flags)\n res['acc'] = (TP + TN) / (count)\n # results['accurate'] = accurate\n except ZeroDivisionError:\n # writelog('Error at get data,flags is None)',file)\n res['acc'] = 'NA'\n # recall same as TPR\n try:\n res['recall'] = TP / (TP + FN)\n except ZeroDivisionError:\n # writelog('Error at get data,flags is None)',file)\n res['recall'] = 'NA'\n\n # F1\n try:\n res['F1'] = 2 * TP / (2 * TP + FP + FN)\n except ZeroDivisionError:\n # writelog('Error at get data,flags is None)',file)\n res['F1'] = 'NA'\n # false negative rate (Type II error).\n # try:\n # # res['fnr']= '{}'.format(FN/(FN+TP))\n # res['fnr'] = FN / (FN + TP)\n # except ZeroDivisionError:\n # # writelog('Error at get data,flags is None)',file)\n # res['fnr'] = 'NA'\n\n print('validate: --D-- pre:%s,N_pre:%s,acc:%s,recall:%s,F1:%s'%\n (str(res['pre']),str(res['N_pre']),str(res['acc']),str(res['recall']),str(res['F1'])),end=',')\n print('size:%d,TP:%d,TN:%d' %(count,TP,TN),end=',')\n return res,y_pre_,all_l_\n\n # 正常\n if data is not None:\n # 带标签\n # a = np.empty((3,1))\n # a.ndim\n # model = model.cuda()\n if data.__class__ == torch.Tensor:\n if data.data.numpy().ndim == 4:\n pass\n elif data.data.numpy().ndim == 3:\n data = torch.unsqueeze(data, 1)\n elif data.__class__ == np.ndarray:\n if data.numpy().ndim == 3:\n TraindataM = torch.from_numpy(data).float() # transform to float torchTensor\n data = torch.unsqueeze(TraindataM, 1)\n elif data.numpy().ndim == 4:\n data = torch.from_numpy(data).float() # transform to float torchTensor\n # data.cuda()\n # try:\n # D_real, _ = model(data)\n # except:\n\n # cup model\n model = model.cpu()\n try:\n D_real = model(data)\n except:\n # print(data.data.cuda().numpy().shape)\n print(data.data.numpy().shape)\n\n if label is not None:\n # print(label.__class__,len(label))#, D_real.item(), D_real[0]\n\n D_real = D_real.data.numpy()\n D_real = np.squeeze(D_real).tolist()#[[],[],[]]\n\n ll = list(map(f1, label, D_real))\n zeros = ll.count(0) # 错误判定\n ones = ll.count(1) # 正确判定\n print('validate: D,size%d,errors:%d,correct:%d'%(len(ll),zeros,ones),end=',')\n print('acc:%.6f,judged as 0.'%(ones/len(ll)),end=',')\n return ones/(len(ll))\n else:\n # 验证集没有标,认为是normal 数据集,判定为0~0.5之间即可认为正确,label 0\n # D_real = D_real.data.cuda().numpy()\n D_real = D_real.data.numpy()\n D_real = np.squeeze(D_real).tolist()#[[],[],[]]\n\n # f = lambda x: 1 if x[0] < 0.5 else 0\n f = lambda x: 0 if x < 0.5 else 1 # test pure normal data\n ll = list(map(f, D_real))\n zeros = ll.count(0)\n ones = ll.count(1)\n print('validate: D,size:%d,zeros:%d,ones:%d'%(len(ll),zeros,ones),end=',')\n print('acc:%.6f,judged as 0' % (zeros/len(ll)), end=',')\n return zeros/len(ll)\n\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":15496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"126230763","text":"import unittest\nimport unittest.mock as mock\nfrom unittest.mock import MagicMock, mock_open\n\nimport urllib.request\nfrom urllib.error import HTTPError\n\n# from tempfile import TemporaryDirectory, NamedTemporaryFile, gettempdir\n\nimport os\nimport logging\nimport re\nfrom datetime import datetime\n\nimport src.tools as tools\nfrom src.sample import rm\n\nclass Tester(unittest.TestCase):\n\treturnValue = b'contents'\n\t\n\t@staticmethod\n\tdef setUpClass():\n\t\tformat = \"%(asctime)-15s %(code)-3s %(url)s %(message)s\\n\\t%(reason)s\"\n\t\tTester.formatter = logging.Formatter(format)\n\t\tTester.log = logging.getLogger(\"milti-download\")\n\t\t\n\t\ttools.Log.debug = True\n\t\t\n\tdef setUp(self):\n\t\tcm = MagicMock()\n\t\tcm.getcode.return_value = 200\n\t\tcm.read.return_value = Tester.returnValue\n\t\tcm.__enter__.return_value = cm\n\t\tself.cm = cm\n\n\t@staticmethod\n\tdef genException(number):\n\t\tmsg = {\n\t\t\t404: \"404 Page Not Found\",\n\t\t\t503: \"503 Service Unavailable\"\n\t\t}\n\t\treturn lambda x: Tester.raiseException(\n\t\t\tHTTPError(x.full_url, number, msg[number], None, None)\n\t\t)\n\t\n\t@staticmethod\n\tdef raiseException(exception):\n\t\traise exception\n\t\n\tdef test_random_header_create(self):\n\t\tversion, header = tools.__randomHeader__()\n\n#https://stackoverflow.com/questions/1289894/how-do-i-mock-an-open-used-in-a-with-statement-using-the-mock-framework-in-pyth\n\t@mock.patch('src.tools.urlopen')\n\tdef test_download_process(self, mockUrlopen):\n\t\tpath, header, interval = \"./test/files\", tools.randomHeader(), 0\n\t\turls = [\n\t\t\t(\"fileName1\", \"http://example.org/fileName1\"),\n\t\t\t(\"fileName2\", \"http://example.org/fileName2\")\n\t\t]\n\t\t\n\t\ttools.urlopen.return_value = self.cm\n\t\twith tools.Log() as log:\n\t\t\ttools.__downloadProcess__(path, header, urls, interval, 0, log)\n\t\t\t\n\t@mock.patch('src.tools.urlopen')\n\tdef test_multidownload(self, mockUrlopen):\n\t\tpath, header, interval = \"./test/files\", tools.randomHeader(), 0\n\t\turls = [\n\t\t\t(\"fileName1\", \"http://example.org/fileName1\"),\n\t\t\t(\"fileName2\", \"http://example.org/fileName2\")\n\t\t]\n\t\t\n\t\ttools.urlopen.return_value = self.cm\n# \t\t\tmultiDownload(path, referer, urls, interval=0.5, chunkSize=5)\n\t\ttools.multiDownload(path, 'http://example.org', urls)\n\n#\t @mock.patch('src.tools.Request')\n\t@mock.patch('src.tools.urlopen')\n\t@unittest.skip(\"analyzing log is not a easy thing. i need more practices.\")\n\tdef test_download_process_404_error(self, mockUrlopen):\n\t\ttools.urlopen.side_effect = Tester.genException(404)\n\t\t\n\t\tpath, header, interval = \"./test/files\", tools.randomHeader(), 0\n\t\turls = [\n\t\t\t(\"file404Name1\", \"http://example.org/fileName1\"),\n\t\t\t(\"file404Name2\", \"http://example.org/fileName2\")\n\t\t]\n\t\twith tools.Log() as log:\n\t\t\tnow = datetime.now()\n\t\t\tstrNow = now.strftime(\"%Y-%m-%d\")\n\n\t\t\ttools.__downloadProcess__(path, header, urls, interval, 0, log)\n\t\t\tfiles = [i.baseFilename for i in log.handlers if \"baseFilename\" in dir(i)]\n\t\t\tfor file in files:\n\t\t\t\twith open(file, \"r\") as f:\n\t\t\t\t\tlogs = f.read().strip().split(\"\\n\\n\")[-2:]\n\t\t\t\tprint(\"logs = \",logs)\n\t\t\t\tlogInfos = [re.match(r\"%s .+? (.+?) download error to (.+?) (\\d+)\"%(strNow), logLine) for logLine in logs]\n\n\t\t\t\tfor i, logInfo in enumerate(logInfos):\n\t\t\t\t\tself.assertNotEqual(logInfo, None)\n\t\t\t\t\tlogInfo = logInfo.groups()\n\n\t\t\t\t\tself.assertEqual(logInfo[0],urls[i][1])\n\t\t\t\t\tself.assertEqual(logInfo[1],os.path.join(path, urls[i][0]).replace(\"\\\\\",\"/\"))\n\t\t\t\t\tself.assertEqual(logInfo[2], \"404\")\n\n\t@mock.patch('src.sample.os')\n\tdef test_rm(self, my_mock):\n\t\trm(\"sample.txt\")\n\t\t\n\t\tmy_mock.remove.assert_called_with(\"sample.txt\")\n\n\tdef test_logger(self):\n\t\tx = tools.Log()\n\t\tself.assertEqual(type(x), tools.Log)\n\t\t\n\t\twith tools.Log() as log:\n\t\t\tself.assertEqual(type(log), logging.Logger)\n\n\tdef test_logger_with(self):\n\t\tformat = \"%(asctime)-15s %(message)s\"\n\t\twith tools.Log(format=format) as log:\n\t\t\tpass\n# \t\t\tlog.info(\"test\")\n\n#https://stackoverflow.com/questions/32043035/python-3-urlopen-context-manager-mocking\n\t@mock.patch('urllib.request.urlopen')\n\tdef test_sample(self, mockUrlopen):\n\t\tcm = MagicMock()\n\t\tcm.getcode.return_value = 200\n\t\tcm.read.return_value = b'contents'\n\t\tcm.__enter__.return_value = cm\n\t\tmockUrlopen.return_value = cm\n\n\t\twith urllib.request.urlopen('http://foo') as response:\n\t\t\t\tself.assertEqual(response.getcode(), 200)\n\t\t\t\tself.assertEqual(response.read(), b'contents')\n","sub_path":"test/test_tools.py","file_name":"test_tools.py","file_ext":"py","file_size_in_byte":4257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"470288745","text":"\"\"\"\nPrint header information of NetCDF file\n\nBoth NetCDF3 and NetCDF4 formats are supported.\n\n\"\"\"\n\n\nimport sys\nimport numpy as np\nimport os.path\n\nfrom subprocess import call\nfrom netCDF4 import Dataset\n\ndef open_netcdf(fname):\n if not os.path.isfile(fname):\n print(\"File does not exist:\"+fname)\n sys.exit()\n\n fid=Dataset(fname,'r')\n print(\"Open:\",fname)\n return fid\n\n\n\n##-- Parameters\n#indir='./data/'\n#fname=indir+'AERDB_L2_VIIRS_SNPP.A2014067.1936.001.2019056041229.nc'\n#fname=indir+'slp_wrfout_d01_2018-02-20_00:00:00.nc'\n\n### Get file name from argument\nif len(sys.argv)<2:\n sys.exit(\"Please provide NC file name as an argument!\")\nelse:\n fname= str(sys.argv[1]) ## [1]: the first argument\n\n\nnc_f= open_netcdf(fname)\nprint(\"\\n*** NC Format=\",nc_f.data_model)\n\n\n###--- Attributes\nprint(\"\\n*** Global Attributes ***\")\nnc_attrs= nc_f.ncattrs()\nfor nc_attr in nc_attrs:\n print(' {}: {}'.format(nc_attr,nc_f.getncattr(nc_attr)))\n\nprint(\"\\n*** Dimensions ***\")\nfor nc_dim in nc_f.dimensions:\n# print(' Name: {}'.format(nc_dim))\n print(' {}'.format(str(nc_f.dimensions[nc_dim]).split(':')[1]))\n \nprint(\"\\n*** Variables ***\")\nfor var in nc_f.variables:\n print(nc_f.variables[var])\n \nnc_f.close()\n","sub_path":"C.Various_File_Format_Read_Write/C03a.NetCDF_file_header_info.py3.py","file_name":"C03a.NetCDF_file_header_info.py3.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"269799684","text":"from bs4 import BeautifulSoup\nimport requests, csv\nimport pandas as pd\nfrom IPython.display import display\nimport matplotlib.pyplot as plt\n\nurl = 'https://download.bls.gov/pub/time.series/pc/pc.product'\nres = requests.get(url)\nsoup = BeautifulSoup(res.content, 'html.parser')\n\nwith open('./info/Series_ID_transform.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, dialect='excel')\n for column in soup:\n df = column.split('\\r\\n')\n for row in df:\n item = row.split('\\t')\n writer.writerow(item)\n csvfile.close()\n\n# Load the CSV file\ndf = pd.read_csv('./info/PPI_USA_all.csv')\nprodName = pd.read_csv('./info/Series_ID_transform.csv')\nprodName['item'] = 'PCU' + prodName['industry_code'] + prodName['product_code']\nprodName.set_index('item', inplace=True)\n\nfor i in df['series_id']:\n for newI in prodName.index:\n if i == newI:\n # acces the value which I want to switch\n item = prodName.loc[i, 'product_name']\n # replace the value, notic that it's series so need to add .str\n df['series_id'] = df['series_id'].str.replace(i, item)\n\n else:\n pass\n\n# Drop the M13\ndf = df[(True ^ df['period'].isin(['M13']))]\ndf['date'] = df['year'].astype(str) + df['period']\ndf['date'] = df['date'].str.replace('M', '-')\n\n# rename the date\ndf_sorted = df.sort_values(by=['date'], inplace=True)\n\n# Decide what columns we want\ncategories = list(df[list(df)[0]].drop_duplicates())\ndf_cols = ['date', 'series_id', 'value']\n\n# Prepare an empty dataframe to fill with properly indexed economic data\nnew_df = pd.DataFrame(columns=df_cols)\n\n# Toss out the columns we don't want\ndf = df[df_cols]\n\n# Set the date as the index using the same format as the USD_CAD data\ndf.index = df['date']\nnew_df.index = new_df['date']\n\n# Dump out the date column now that we applied it to the dataframe index\ndf.drop(['date'], axis=1, inplace=True)\nnew_df.drop(['date'], axis=1, inplace=True)\n\n# Spot check the dataframe so far\ndisplay(df.head())\n\n# Loop through the economic indicators and put each one in a dedicated column\nfor cat in categories:\n # Data can have problems, and not all indicators will make it through\n try:\n new_df[cat] = df[df[list(df)[0]] == cat]['value']\n except Exception as e:\n print(\"failed on\", cat, e)\n\n# Spot check the output dataframe\ndisplay(new_df.head())\n\n# Graph the data\nnew_df.plot()\nplt.show()\n# Save the dataframe with the economic indicators to a file\nnew_df.to_csv('./info/forex_signals.csv', index=True)\n\n# Clean the data for training\nrates = pd.read_csv('./info/forex_signals.csv', low_memory=False)\nrateSlice = rates[336:]\ndf_new = rateSlice.drop(['date','series_id','value'], axis=1)\ndf_new = df_new.fillna(0)\ndf_new.to_csv('./info/forex_signals_clean.csv', index=False, header=False)\n","sub_path":"PPI_ID.py","file_name":"PPI_ID.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"28054507","text":"# fnmf nnf nfn nm\n\n# in module -function - def functionname()\n# in class - method ( access by method), __init__ constructor \n\n# def __init__(self):\n# pass\nclass Student(object): # class Student: class Student()\n ''' this class describes about the student '''\n \n # Constructor \n def __init__(self,studentid):\n self.sid=studentid # studentid is normal variable, \n # self.sid is instance variable\n print(\"Student id -\",self.sid)\n \n \n # Instance method 1\n def getStudentInfo(self,sname,sage):\n self.sname=sname\n self.sage=sage\n print(\"self sid in instace method 1\",self.sid)\n print(\"Student name \",self.sname)\n print(\"Student age\",self.sage)\n # print('student marks',self.year1)\n \n # instance method 2 \n def studentMarks(self,year1):\n #self.year1=year1\n print(\"self sid in instace method 2\",self.sname)\n print(\"year1 marks\",year1)\n \n \n \n \ns1=Student(1001) # creation of an object, s1 is object reference \ns2=Student(1002) \n\ns1.getStudentInfo('John',17)\ns2.getStudentInfo('Obama',14)\n\n\ns1.studentMarks(78.98)\ns2.studentMarks(85)\n\n\n\n \n \n \n \n \n \n ","sub_path":"OOPS/Student.py","file_name":"Student.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"4850692","text":"import sys\nimport csv\nimport math\nimport numpy as np\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\n\n\n\n#gmmest estimates parameters of a 1-dimenstional GMM\n#Input:\n# - X\t\t\t: N 1-dimensinal data points (a 1-by-N vector)\n# - mu_init \t: initial means of K Gaussian components (a 1-by-K vector)\n# - sigmasq_init: initial variances of K Gaussian components (a 1-by-K vector)\n# - wt_init\t\t: initial weights of K Gaussian components (a 1-by-K vector that sums to 1)\n# - its \t\t: number of iterations\n#Output:\n# - mu \t\t\t: means of Gaussian components (a 1-by-K vector)\n# - sigmasq : variances of Gaussian components (a 1-by-K vector)\n# - wt \t\t\t: weights of Gaussian components (a 1-by-K vector that sums to 1)\n# - L\t\t\t: log likelihood\ndef gmmest(X, mu_init, sigmasq_init, wt_init, its):\n\tmu = np.copy(mu_init)\n\tsigmasq = np.copy(sigmasq_init)\n\twt = np.copy(wt_init)\n\n\tnew_mu = np.copy(mu)\n\tnew_sigmasq = np.copy(sigmasq)\n\tnew_wt = np.copy(wt)\n\t\n\tfor iteration in range(its):\n\t\tfor i in range(len(wt)):\n\t\t\t# r of Gaussian i for each data\n\t\t\tresponsibilities = []\n\t\t\tfor x in X:\n\t\t\t\ttop = stats.norm(mu[i],sigmasq[i]).pdf(x) * wt[i]\n\t\t\t\tbottom = 0\n\t\t\t\tfor ii in range(len(wt)):\n\t\t\t\t\tbottom += stats.norm(mu[ii],sigmasq[ii]).pdf(x) * wt[ii]\n\t\t\t\tresponsibilities.append(float(top) / float(bottom))\n\n\t\t\t#Big R for Gaussian i\n\t\t\tresponsibility = sum(responsibilities)\n\n\t\t\t#update weight, mean and variance\n\t\t\tnew_wt[i] = float(responsibility) / float(len(X))\n\n\n\t\t\tsum_mu = 0\n\t\t\tsum_var = 0\n\t\t\tfor index in range(len(X)):\n\t\t\t\tsum_mu += responsibilities[index] * X[index]\n\t\t\t\tsum_var += responsibilities[index] * ((X[index]-mu[i])**2)\n\t\t\tnew_mu[i] = float(sum_mu) / float(responsibility)\t\t\n\t\t\tnew_sigmasq[i] = (float(sum_var) / float(responsibility)) ** 0.5\n\n\t\t#update mu, sigmasq, and weights\n\t\tmu = np.copy(new_mu)\n\t\tsigmasq = np.copy(new_sigmasq)\n\t\twt = np.copy(new_wt)\n\n\n\tL = result_prob(X, mu, sigmasq, wt)\n\treturn mu, sigmasq, wt, L\n\n\n\n\n\n\n#compute probability of observed data points for the input GMM\ndef result_prob(X, mu, sigmasq, wt):\n\tL = 0\n\t\n\tfor x in X:\n\t\tpoint_prob = 0\n\t\tfor i in range(len(wt)):\n\t\t\tpoint_prob += (stats.norm(mu[i],sigmasq[i]).pdf(x) * wt[i])\n\n\t\tL += math.log(point_prob)\n\n\t#print(L)\t\n\treturn L\n\n\ndef build_models1(X):\n\t#by observing the hist of data\n\tmu_init = np.array([7, 25])\n\tsigmasq_init = np.array([5, 3])\n\twt_init = np.array([0.7, 0.3])\n\tits = 20\n\n\tprint(\"initial L\")\n\tprint(result_prob(X, mu_init, sigmasq_init, wt_init))\n\n\tL = []\n\tmu = np.copy(mu_init)\n\tsigmasq = np.copy(sigmasq_init)\n\twt = np.copy(wt_init)\n\t\n\t#firt iteration\n\tresult = gmmest(X, mu_init, sigmasq_init, wt_init, its)\n\tmu = np.array(result[0][:])\n\tsigmasq = np.array(result[1][:])\n\twt = np.array(result[2][:])\n\tL.append(result[3])\n\n\t#print(result[3])\n\n\t#rest of iterations\n\tfor i in range(its-1):\n\t\tresult = gmmest(X, mu, sigmasq, wt, 1)\n\t\tmu = np.array(result[0][:])\n\t\tsigmasq = np.array(result[1][:])\n\t\twt = np.array(result[2][:])\n\t\tL.append(result[3])\n\t\t#print(result[3])\n\n\t#print(L)\n\treturn result, L\n\n\n\n\ndef build_models2(X):\n\tmu_init = [-13, -4, 50]\n\tsigmasq_init = [5, 14, 30] \n\twt_init = [0.2, 0.4, 0.4]\n\tits = 20\n\n\tL = []\n\tmu = np.copy(mu_init)\n\tsigmasq = np.copy(sigmasq_init)\n\twt = np.copy(wt_init)\n\n\t#firt iteration\n\tresult = gmmest(X, mu_init, sigmasq_init, wt_init, its)\n\tmu = np.array(result[0][:])\n\tsigmasq = np.array(result[1][:])\n\twt = np.array(result[2][:])\n\tL.append(result[3])\n\n\t#rest of iterations\n\tfor i in range(its-1):\n\t\tresult = gmmest(X, mu, sigmasq, wt, 1)\n\t\tmu = np.array(result[0][:])\n\t\tsigmasq = np.array(result[1][:])\n\t\twt = np.array(result[2][:])\n\t\tL.append(result[3])\n\n\t\n\treturn result, L\n\n\ndef main():\n\trfile = sys.argv[1]\n\n\tcsvfile = open(rfile, 'rb')\n\tdat = csv.reader(csvfile, delimiter=',')\n\n\tX = []\n\tY = []\n\n\tfor i, row in enumerate(dat):\n\t\tif i > 0:\n\t\t\tX.append(float(row[0]))\n\t\t\tY.append(int(row[1]))\n\n\tX = np.array(X)\n\tY = np.array(Y)\n\n\tclass1 = np.array(X[np.nonzero(Y == 1)[0]])\n\tclass2 = np.array(X[np.nonzero(Y == 2)[0]])\n\n\tprint(\"computing...\")\n\t#build GMM for two classes\n\tmodel1 = build_models1(class1)\n\t#model2 = build_models2(class2)\n\n\tprint(\"Here are the models!\")\n\n\tprint(model1[0])\n\t#print(model2[0])\n\n\n\tplt.plot(range(1,20+1), model1[1], 'ro')\n\t#plt.plot(range(1,20+1), model2[1], 'ro')\n\tplt.show() \n\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"version1.py","file_name":"version1.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"128685076","text":"import pymysql\nimport pandas as pd\nimport numpy as np\n\n\nHOST = ''\nPORT = ''\nUSER = ''\nPASSWORD = ''\nDB = ''\n\nmysql = pymysql.connect(host=HOST, port=PORT, user=USER, passwd=PASSWORD,\n db=DB, charset='utf8', autocommit=False)\n\n\nclass StockDB:\n mysql = mysql\n cash_DB = {}\n min_date = 20160101\n\n @staticmethod\n def get_daily_index(code, start_date, end_date):\n col = ['date', 'open', 'high', 'low', 'close']\n sql = \"\"\"\n SELECT *\n FROM index_data\n WHERE code = '%s' AND date >= %s AND date <= %s\n ORDER BY date\n \"\"\" % (code, start_date, end_date)\n df = pd.read_sql(sql, StockDB.mysql.con)\n df = df.loc[:, col]\n df.set_index('date', inplace=True)\n df.index.name = None\n return df\n\n @staticmethod\n def get_daily_stock_price(code, start_date, end_date):\n key = 'daily_data-%s' % code\n if key in StockDB.cash_DB.keys():\n df = StockDB.cash_DB[key]\n return df.loc[(df.index >= start_date) & (df.index <= end_date)]\n sql = \"\"\"\n SELECT `종가`, `date`, `거래량`\n FROM `daily_data(not_adjusted)`\n WHERE code = '%s' AND date >= %s\n ORDER BY date ASC\n \"\"\" % (code, StockDB.min_date)\n # sql = \"\"\"\n # SELECT `종가`, `date`, `거래량`, `거래대금`, `시가총액`\n # FROM `daily_data(not_adjusted)`\n # WHERE code = '%s' AND date > %s\n # ORDER BY date ASC\n # \"\"\" % (code, StockDB.min_date)\n df = pd.read_sql(sql, StockDB.mysql.con)\n # datetime 변환은 시간이 너무 오래 소요되어 생략\n # df['date'] = df['date'].apply(lambda x: pd.to_datetime(str(x), format='%Y%m%d'))\n df.set_index('date', inplace=True)\n df.index.name = None\n StockDB.cash_DB[key] = df\n # df['{}'.format(start_date):'{}'.format(end_date)]\n return df.loc[(df.index >= start_date) & (df.index <= end_date)]\n\n @staticmethod\n def get_non_adjusted_cp(code, date):\n \"\"\"\n 무수정주가\n \"\"\"\n try:\n cp = StockDB.get_daily_stock_price(code, date, date).loc[date, '종가']\n except:\n return np.NaN\n return cp\n\n @staticmethod\n def get_cur_index(code, date):\n \"\"\"\n U001: 코스피\n U180: 코스피200\n U201: 코스닥\n \"\"\"\n sql = \"\"\"\n SELECT close\n FROM index_data\n WHERE code = '{}' AND date = {}\n ORDER BY date\n \"\"\".format(code, date)\n StockDB.mysql.cursor.execute(sql)\n data = StockDB.mysql.cursor.fetchone()\n if data is None:\n raise ValueError('%s %s index_data 없음' % (code, date))\n else:\n data = data[0]\n return data\n\n @staticmethod\n def get_d_n_s(code):\n key = 'd_n_s-%s' % code\n if key in StockDB.cash_DB.keys():\n return StockDB.cash_DB[key]\n else:\n sql = \"\"\"\n SELECT *\n FROM `d&s`\n WHERE code = '%s' AND date >= %s\n ORDER BY date ASC\n \"\"\" % (code, StockDB.min_date)\n df = pd.read_sql(sql, StockDB.mysql.con)\n # df['date'] = df['date'].apply(lambda x: pd.to_datetime(str(x), format='%Y%m%d'))\n df.set_index('date', inplace=True)\n df.index.name = None\n StockDB.cash_DB[key] = df\n return df\n\n @staticmethod\n def get_d_n_s_by_date(code, date):\n \"\"\"\n 해당날짜 이후의 데이터는 생략하여 줌\n \"\"\"\n df = StockDB.get_d_n_s(code)\n return df[df.index <= date]\n\n @staticmethod\n def get_d_n_s_by_from_date_to_date(code, start_date, end_date):\n \"\"\"\n 시작날짜, 끝날짜\n \"\"\"\n df = StockDB.get_d_n_s(code)\n df = df[start_date <= df.index]\n return df[df.index <= end_date]\n\n @staticmethod\n def get_adjusted_close_price(code, start_date, end_date):\n non_adjusted_cp = StockDB.get_daily_stock_price(code, start_date, end_date).loc[:, ['종가']]\n sql = \"\"\"\n SELECT `date`, `adjust_rate`\n FROM `stock_adjust`\n WHERE code = '%s' AND date > %s\n ORDER BY date DESC\n \"\"\" % (code, StockDB.min_date)\n adjust_event = pd.read_sql(sql, StockDB.mysql.con)\n adjust_event.set_index('date', inplace=True)\n adjust_event.index.name = None\n\n rows = adjust_event.shape[0]\n if rows == 0:\n return non_adjusted_cp['종가']\n for date in adjust_event.index.tolist():\n rate = adjust_event.loc[date, 'adjust_rate'] / 100\n non_adjusted_cp.loc[non_adjusted_cp.index < date, '종가'] = non_adjusted_cp.loc[non_adjusted_cp.index < date, '종가'].astype(float) * (rate + 1)\n return non_adjusted_cp['종가'].round().astype(int)\n\n @staticmethod\n def get_adj_cp_target_date(code, target_date):\n non_adjusted_cp = StockDB.get_daily_stock_price(code, target_date, target_date).loc[:, ['종가']]\n sql = \"\"\"\n SELECT `date`, `adjust_rate`\n FROM `stock_adjust`\n WHERE code = '%s' AND date > %s\n ORDER BY date DESC\n \"\"\" % (code, StockDB.min_date)\n adjust_event = pd.read_sql(sql, StockDB.mysql.con)\n adjust_event.set_index('date', inplace=True)\n adjust_event.index.name = None\n\n rows = adjust_event.shape[0]\n if rows == 0:\n return non_adjusted_cp['종가'].iloc[0]\n for date in adjust_event.index.tolist():\n rate = adjust_event.loc[date, 'adjust_rate'] / 100\n non_adjusted_cp.loc[non_adjusted_cp.index < date, '종가'] = non_adjusted_cp.loc[non_adjusted_cp.index < date, '종가'].astype(float) * (rate + 1)\n return non_adjusted_cp['종가'].round().astype(int).iloc[0]\n\n @staticmethod\n def get_cap_of_target_date(code, date):\n sql = \"\"\"\n SELECT cap\n FROM new_day_data\n WHERE code='%s' AND date=%s\n \"\"\" % (code, date)\n StockDB.mysql.cursor.execute(sql)\n cap = StockDB.mysql.cursor.fetchone()\n if cap is None:\n raise ValueError('code: {}, date: {} cap 존재하지않음')\n else:\n return cap[0]\n\n @staticmethod\n def get_listed_stock(code, date):\n sql = \"\"\"\n SELECT listed_stock\n FROM day_data\n WHERE code = '{}' AND date = {}\n \"\"\".format(code, date)\n StockDB.mysql.cursor.execute(sql)\n data = StockDB.mysql.cursor.fetchone()\n if data is None:\n raise ValueError('code: {} date: {} listed_stock 존재하지 않음')\n else:\n data = data[0]\n return data\n","sub_path":"data/StockDB.py","file_name":"StockDB.py","file_ext":"py","file_size_in_byte":6752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"570105678","text":"# Deep Learning Libraries\nimport numpy as np\nnp.set_printoptions(suppress=True)\nfrom keras.models import load_model\nfrom scipy.special import expit, softmax\nimport tensorflow as tf\ntf.compat.v1.disable_eager_execution()\nimport keras.backend as K\n\nclass AdversarialDetection:\n def __init__(self, model, attack_type, monochrome, classes):\n self.classes = len(classes)\n self.epsilon = 1\n self.graph = tf.compat.v1.get_default_graph()\n self.monochrome = monochrome\n\n if self.monochrome:\n self.noise = np.zeros((416, 416))\n else:\n self.noise = np.zeros((416, 416, 3))\n\n self.adv_patch_boxes = []\n self.fixed = False\n\n self.model = load_model(model)\n self.model.summary()\n self.attack_type = attack_type\n\n self.delta = None\n loss = None\n for out in self.model.output:\n # Targeted One Box\n if attack_type == \"one_targeted\":\n loss = K.max(K.sigmoid(K.reshape(out, (-1, 5 + self.classes))[:, 4]) * K.sigmoid(K.reshape(out, (-1, 5 + self.classes))[:, 5]))\n\n # Targeted Multi boxes\n if attack_type == \"multi_targeted\":\n loss = K.sigmoid(K.reshape(out, (-1, 5 + self.classes))[:, 4]) * K.sigmoid(K.reshape(out, (-1, 5 + self.classes))[:, 5])\n\n # Untargeted Multi boxes\n if attack_type == \"multi_untargeted\":\n # loss = tf.reduce_sum(K.sigmoid(K.reshape(out, (-1, 5 + self.classes))[:, 5:]))\n for i in range(0, self.classes):\n if loss == None:\n loss = tf.reduce_sum(K.sigmoid(K.reshape(out, (-1, 5 + self.classes))[:, 4]) * K.sigmoid(K.reshape(out, (-1, 5 + self.classes))[:, i+5]))\n else:\n loss = loss + tf.reduce_sum(K.sigmoid(K.reshape(out, (-1, 5 + self.classes))[:, 4]) * K.sigmoid(K.reshape(out, (-1, 5 + self.classes))[:, i+5]))\n\n grads = K.gradients(loss, self.model.input)\n if self.delta == None:\n self.delta = K.sign(grads[0])\n else:\n self.delta = self.delta + K.sign(grads[0])\n\n # Store current patches\n self.patches = []\n\n # loss = K.sum(K.abs((self.model.input-K.mean(self.model.input))))\n\n # Reduce Random Noises\n loss = - 0.01 * tf.reduce_sum(tf.image.total_variation(self.model.input))\n\n # Mirror\n # loss = - 0.01 * tf.reduce_sum(tf.image.total_variation(self.model.input)) - 0.01 * tf.reduce_sum(K.abs(self.model.input - tf.image.flip_left_right(self.model.input)))\n\n grads = K.gradients(loss, self.model.input)\n self.delta = self.delta + K.sign(grads[0])\n\n self.sess = tf.compat.v1.keras.backend.get_session()\n\n def attack(self, input_cv_image):\n with self.graph.as_default():\n # Draw each adversarial patch on the input image\n if not self.fixed:\n for box in self.adv_patch_boxes:\n if self.monochrome:\n input_cv_image[box[1]:(box[1]+box[3]), box[0]:(box[0] + box[2]), 0] = self.noise[box[1]:(box[1]+box[3]), box[0]:(box[0] + box[2])]\n input_cv_image[box[1]:(box[1]+box[3]), box[0]:(box[0] + box[2]), 1] = self.noise[box[1]:(box[1]+box[3]), box[0]:(box[0] + box[2])]\n input_cv_image[box[1]:(box[1]+box[3]), box[0]:(box[0] + box[2]), 2] = self.noise[box[1]:(box[1]+box[3]), box[0]:(box[0] + box[2])]\n else:\n input_cv_image[box[1]:(box[1]+box[3]), box[0]:(box[0] + box[2]), :] = self.noise[box[1]:(box[1]+box[3]), box[0]:(box[0] + box[2]), :]\n else:\n # If the patch is fixed, just draw previous saved patches\n ib = 0\n for box in self.adv_patch_boxes:\n if self.monochrome:\n input_cv_image[box[1]:(box[1]+box[3]), box[0]:(box[0] + box[2]), 0] = self.patches[ib]\n input_cv_image[box[1]:(box[1]+box[3]), box[0]:(box[0] + box[2]), 1] = self.patches[ib]\n input_cv_image[box[1]:(box[1]+box[3]), box[0]:(box[0] + box[2]), 2] = self.patches[ib]\n else:\n input_cv_image[box[1]:(box[1]+box[3]), box[0]:(box[0] + box[2]), :] = self.patches[ib]\n ib = ib + 1\n\n if(len(self.adv_patch_boxes) > 0 and (not self.fixed)):\n grads = self.sess.run(self.delta, feed_dict={self.model.input:np.array([input_cv_image])}) / 255.0\n if self.monochrome:\n # For monochrome images, we average the gradients over RGB channels\n self.noise = self.noise + 5 / 3 * (grads[0, :, :, 0] + grads[0, :, :, 1] + grads[0, :, :, 2])\n else:\n self.noise = self.noise + 5 * grads[0, :, :, :]\n # self.noise = np.clip(self.noise, 0.0, 1.0)\n return self.sess.run(self.model.output, feed_dict={self.model.input:np.array([input_cv_image])})\n","sub_path":"model/adversarial_detection.py","file_name":"adversarial_detection.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"502156028","text":"from django.conf.urls import patterns, include, url\nfrom django.views import generic\nfrom django.core.urlresolvers import reverse_lazy\n\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom mathpage.models import MathPage\nfrom mathpage.forms import MathPageForm\nfrom mathpage.views import RedirectSlugified\n\n\nurlpatterns = patterns('',\n # Examples:\n url(r'^show_me_base$', generic.base.TemplateView.as_view( template_name=\"base.html\" ), name='home'),\n # url(r'^oiler/', include('oiler.foo.urls')),\n \n # login\n url(r'^accounts/login/', \n 'django.contrib.auth.views.login',\n { 'template_name':'login.html' },\n name = 'login'),\n \n # logout \n url(r'^accounts/logout/',\n 'django.contrib.auth.views.logout',\n { 'template_name':'logout.html' },\n name = 'logout'), \n \n # home page\n url(r'^$',\n generic.RedirectView.as_view( url='/home/' ),\n name = 'oiler_home'), \n \n # list by page\n url(r'^page/(?P(\\d+|last))/$',\n generic.ListView.as_view(\n model=MathPage,\n template_name='list.html',\n paginate_by=25),\n name = 'mathpage_list'),\n\n # create a new oiler \n url(r'^new/$',\n login_required( \n generic.CreateView.as_view(\n form_class=MathPageForm,\n template_name='form.html')),\n name = 'mathpage_create'),\n\n # update an existing mathpage\n url(r'^edit/(?P\\d+)/$',\n login_required(\n generic.UpdateView.as_view(\n model=MathPage,\n form_class=MathPageForm,\n template_name='form.html')),\n name = 'mathpage_update'),\n\n # delete an existing mathpage\n url(r'^delete/(?P\\d+)/$',\n login_required(\n generic.DeleteView.as_view(\n model=MathPage,\n template_name='confirm_delete.html',\n success_url = reverse_lazy( 'oiler_home' ),\n )),\n name = 'mathpage_delete'),\n \n # detail view by id number\n url(r'^id/(?P\\d+)/$',\n generic.DetailView.as_view(\n model=MathPage,\n template_name='detail.html'),\n name = 'mathpage_by_pk'), \n\n # detail view by slug\n url(r'^(?P[-_a-z0-9]+)/$', \n generic.DetailView.as_view(\n model=MathPage,\n template_name='detail.html'),\n name = 'mathpage_by_slug'), \n \n # ===== bottoming out\n \n # catch-all redirect -- try again with slugified version\n url(r'^(?P[^/]+)/$',\n RedirectSlugified.as_view() ),\n\n\n\n\n url(r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"oiler/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"604822267","text":"#import math\n\n__author__ = 'jalaniz'\ndef compute_sqrt(n):\n \"Computer Square root *without* math library functions\"\n\n # The target is a 0(log n) or constant time (1) implementation\n # Result is to be truncated down to the nearest integer 7.1 or 7.9 produce 7\n # Attempting to use the babylonian method going for a log n implementation\n if n <= 0:\n return 0 # Error?\n r1 = n / 2 # Guess, Babylonian step 1\n while r1*r1 > n: # Still Too big\n r1 /= 2\n while r1*r1 < n: # Too small\n if ((r1 * 1.5) * r1) < n: # If we're not exceeding our boundary\n r1 *= 1.5\n else: # If not then we're close enough at this point\n break\n\n while True: # Repeat Step 2 until it's precise\n oldr1 = r1 # Save\n if (r1*r1) != n: # This *can* produce imprecise comparison...\n # If r1*r1 is 159.99999999999997 and n is 160, will return False\n r1 = (r1 + n/r1)/2 # Babylonian step 2\n else: # We found our match!\n break\n if oldr1 == r1: # Using caution to prevent infinite loop\n break\n result = r1 # For clarity of anyone reading... Unnecessary. But there you go!\n return result\n\n\n\nn = 160\n# For comparison between my square root and the python library square root\n# print(\"Python Math Library %d\" % math.sqrt(n))\nprint(\"My function %d\" % compute_sqrt(n))\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"261653221","text":"from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.models import User\nfrom administration.models import Tourist\nfrom administration.forms import EditTouristForm\nfrom faker import Factory\nfrom haystack.query import SearchQuerySet\nimport random\nimport string\n\n\n@login_required\ndef edit_tourist_view(request, tourist_id):\n '''This View is used to create a new tourist (tourist_id = 0)\n or edit an existing one (tourist_id = Tourist ID).\n '''\n # POST.\n if request.method == 'POST':\n form = EditTouristForm(request.POST)\n\n if form.is_valid():\n new_t = form.save(commit=False)\n user = request.user\n new_t.modify_user = user\n\n # Create tourist.\n if tourist_id == '0':\n new_t.create_user = user\n try:\n new_t.save()\n tourist_id = str(new_t.id)\n messages.success(request, \"User %s created successfully.\" % new_t.name.title())\n except Exception as err:\n msg = \"An error occurred while trying to create tourist '%s': %s\" % (new_t.name.title(), err)\n messages.error(request, msg)\n\n # Edit tourist.\n else:\n new_t.id = tourist_id\n try:\n new_t.save()\n except Exception as err:\n msg = \"An error occurred while trying to edit tourist '%s': %s\" % (new_t.name.title(), err)\n messages.error(request, msg)\n\n # Form no valid.\n else:\n for field in form:\n for error in field.errors:\n messages.error(request, \"%s: %s\" % (field.label, error))\n\n # GET.\n else:\n # New tourist.\n if tourist_id == '0':\n form = EditTouristForm()\n\n # Editable tourist.\n else:\n t = Tourist.objects.get(id=tourist_id)\n form = EditTouristForm(initial={\n 'name': t.name,\n 'surname': t.surname,\n 'nfc_id': t.nfc_id,\n 'phone1': t.phone1,\n 'phone2': t.phone2,\n 'address': t.address,\n 'postal_code': t.postal_code,\n 'city': t.city,\n 'province': t.province,\n 'country': t.country\n })\n\n context = {'form': form, 'tourist_id': tourist_id}\n return render(request, 'edit_tourist.html', context=context)\n\n@login_required\ndef remove_tourist_view(request, tourist_id):\n '''This View is used to remove a selected tourist by ID.'''\n try:\n Tourist.objects.get(id=tourist_id).delete()\n except Exception as err:\n messages.warning(request, \"Unable to remove the selected user: %s\" % err)\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required\ndef fake_tourists_view(request):\n '''This View is used to fake tourists and create some test data\n with a simple click.'''\n users = User.objects.all()\n fake = Factory.create()\n n_fakes = 1000\n i = 0\n\n while i < n_fakes:\n user = users[random.randint(0, (len(users) - 1))]\n\n try:\n Tourist.objects.create(\n create_user=user,\n modify_user=user,\n name=fake.first_name(),\n surname=fake.last_name(),\n nfc_id=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(50)),\n phone1=fake.phone_number(),\n phone2=fake.phone_number(),\n address=fake.address(),\n postal_code=fake.postcode(),\n city=fake.city(),\n province=fake.city(),\n country=fake.country(),\n last_longitude=random.uniform(3.195994, 0.94169),\n last_latitude=random.uniform(40.312756, 42.514597)\n )\n i += 1\n\n # It is possible to get an exception while trying to create data. Omit and continue.\n except:\n pass\n\n messages.success(request, \"A thousand new fake tourists have been added successfully.\")\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n@login_required\ndef list_tourist_view(request):\n '''This View is used to search and list the Tourists result.'''\n # Get the value.\n search_text = request.POST.get('search_text')\n\n # Run the query to get the results.\n tourists = SearchQuerySet().filter(fullname=search_text) if search_text else None\n\n # Extract the ids and get the Tourist objects by the id.\n ids = [t.object.id for t in tourists] if tourists else []\n t_list = Tourist.objects.filter(id__in=ids)\n\n context = {'tourists': tourists, 't_list': t_list}\n\n return render(request, 'list_tourists_v3.html', context=context)\n","sub_path":"administration/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"3945096","text":"from svggen.api.component import Component\n\nself = Component()\n\nself.addSubcomponent(\"servo\",\"Servo\", inherit=\"servo length width depth controller\".split(), prefix=None)\nself.addSubcomponent(\"gripper\",\"Gripper\", inherit=\"fingerlength fingerwidth width depth ratio\".split(), prefix=None)\n\nself.addConstConstraint((\"servo\",\"phase\"), 1)\nself.addConstConstraint((\"servo\",\"center\"), False)\n\nangles=[35.25, -35.25, 35.25, -35.25]\nfor i in range(3):\n self.addConnection((\"servo\", \"topedge%d\" % i),\n (\"gripper\",\"botedge%d\" % (i+1)),\n angle=angles[i])\n self.inheritInterface(\"botedge%d\" % i,(\"servo\",\"botedge%d\" % i))\nself.inheritInterface(\"botedge3\",(\"servo\",\"botedge3\"))\n\nself.inheritInterface(\"botface\",(\"servo\",\"botface\"))\n\nself.toYaml(\"library/ActuatedGripper.yaml\")\n","sub_path":"svggen/builders/ActuatedGripperBuilder.py","file_name":"ActuatedGripperBuilder.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"387015875","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 4 22:25:15 2018\nshowing stacks of images as row*col\n@author: yongweiw\n\"\"\"\n\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef read_img_file(root,img_size=256*256):\n imgs_list = os.listdir(root)\n imgs_list.sort()\n imgs = np.zeros([len(imgs_list),img_size])\n for i in range(len(imgs_list)):\n img = plt.imread(root + imgs_list[i])\n imgs[i,:] = img.ravel()\n return imgs\n\ndef display_imgs(imgs,numPerRow,numPerCol,imgH=32,imgW=32):\n Img = np.zeros([imgH*numPerCol, imgW*numPerRow])\n \n for i in range(numPerCol):\n for j in range(numPerRow):\n tmp = imgs[i*numPerRow+j,:].reshape([imgH,imgW]) #previously I made a mistake here\n Img[i*imgW:(i+1)*imgW, j*imgH:(j+1)*imgH] = tmp\n\n plt.imshow(Img,cmap='gray')\n plt.axis('off')\n plt.show()\n\nroot = '/path/to/file1/'\nroot_gt = '/path/to/file2'\n\nimgs = read_img_file(root,img_size=256*256)\nimgs_gt = read_img_file(root_gt,img_size=256*256)\n\n\ndisplay_imgs(np.vstack((imgs,imgs_gt)),numPerRow=8,numPerCol=2,imgH=256,imgW=256)\n\n","sub_path":"Pyrtical23_read_show_imges.py","file_name":"Pyrtical23_read_show_imges.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"432238413","text":"\"\"\"\nDjango settings for BakkerApp project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nfrom secrets import *\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nINTERNAL_IPS = ('127.0.0.1',)\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n 'debug_toolbar',\n 'rest_framework',\n 'modeltranslation',\n 'south',\n 'mptt',\n 'filer',\n 'easy_thumbnails',\n 'products',\n)\n\nMIDDLEWARE_CLASSES = (\n 'debug_toolbar.middleware.DebugToolbarMiddleware', # must come after GZipMiddleware\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'BakkerApp.urls'\n\nWSGI_APPLICATION = 'BakkerApp.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'bakker',\n 'USER': 'bakker',\n 'PASSWORD': DB_PASSWORD,\n 'HOST': 'localhost',\n }\n}\ndel DB_PASSWORD\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nSITE_ID = 1\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\n\nTEMPLATE_CONTEXT_PROCESSORS\t= (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.tz',\n 'django.core.context_processors.request',\n 'django.contrib.messages.context_processors.messages',\n)\n\nfrom django.utils.translation import ugettext_lazy as _\nLANGUAGES = (\n ('en', _('English')),\n ('nl', _('Dutch')),\n ('de', _('German')),\n ('fr', _('French')),\n ('it', _('Italian')),\n ('da', _('Danish')),\n ('sv', _('Swedish')),\n ('es', _('Spanish')),\n ('nb', _('Norwegian')),\n ('cs', _('Czech')),\n ('sl', _('Slovenian')),\n ('sk', _('Slovakian')),\n ('el', _('Greek')),\n ('ru', _('Russian')),\n ('hu', _('Hungarian')),\n ('et', _('Estonian')),\n ('lv', _('Latvian')),\n ('ro', _('Romanian')),\n ('pl', _('Polish')),\n)\n\nREST_FRAMEWORK = {\n # Use hyperlinked styles by default.\n # Only used if the `serializer_class` attribute is not set on a view.\n 'DEFAULT_MODEL_SERIALIZER_CLASS':\n 'rest_framework.serializers.HyperlinkedModelSerializer',\n\n # Use Django's standard `django.contrib.auth` permissions,\n # or allow read-only access for unauthenticated users.\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',\n ],\n\n 'PAGINATE_BY': 10,\n}\n\nBAKKER_PRODUCTS = {\n 'DEFAULT_PRODUCTS_PER_PAGE': 60,\n 'DEFAULT_VIEW': 'GridView', # ListView, ExtListView\n 'DEFAULT_SEARCH_RESULT': 'IN STOCK', # All products, Garden advice, Customer Service\n 'DEFAULT_SORT_ORDER': 'MOST SOLD', # Price HL, Price LH\n}","sub_path":"BakkerApp/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"620769384","text":"import random\n\nfrom card_2 import Kartochki\n\n\ndef computer_computer():\n card_comp1 = Kartochki()\n card_comp2 = Kartochki()\n\n card_list_comp1 = card_comp1.card_list()\n card_list_comp2 = card_comp2.card_list()\n\n pol = []\n flag = True\n\n while len(pol) != 90:\n\n # выводим карточки игроков\n print(\"карточка компьютера номер 1\")\n card_comp1.card_str()\n print()\n print(\"карточка компьютера номер 2\")\n card_comp2.card_str()\n\n # вытаскиваем бочонки\n bochonok = random.randint(1, 90)\n\n if bochonok in pol:\n bochonok = random.randint(1, 90)\n else:\n pol.append(bochonok)\n\n print('======= ', bochonok, ' =======')\n\n for n, j in enumerate(card_list_comp1):\n if j == bochonok:\n card_list_comp1[n] = '--'\n\n for n, j in enumerate(card_list_comp2):\n if j == bochonok:\n card_list_comp2[n] = '--'\n\n if card_list_comp1.count('--') == 15:\n flag = True\n break\n elif card_list_comp2.count('--') == 15:\n flag = False\n break\n\n if flag == False:\n print(\"++\"*30)\n print(\"Компьютер номер 2 - выиграл! Компьютер номер 1 - проиграл!\")\n print(\"++\" * 30)\n else:\n print(\"Компьютер номер 1 - выиграл! Компьютер номер 2 - проиграл!\")\n\n print(\".\" * 35)\n print(\"Номера бочонков: {}\".format(pol))\n print(\"Всего бочонков: {}\".format(len(pol)))\n","sub_path":"game_comp.py","file_name":"game_comp.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"454394901","text":"def add_key(dictionary, a, b):\n\tif b in dictionary.keys():\n\t\tdictionary[b].append(a)\n\t\t# Remove random duplicates\n\t\tdictionary[b] = list(set(dictionary[b]))\n\t\treturn\n\tdictionary[b] = [a]\n\ndef has_cycle(dictionary, element, recursion_list=None):\n\tif not bool(dictionary):\n\t\treturn False\n\tif element is None:\n\t\telement = list(dictionary.keys())[0]\n\tif recursion_list is None:\n\t\trecursion_list = []\n\tif element in recursion_list:\n\t\treturn True\n\trecursion_list.append(element)\n\tif element not in dictionary.keys():\n\t\treturn False\n\n\t# Update element\n\tls = dictionary[element]\n\t# Is empty, were at the end\n\tif not ls:\n\t\treturn False\n\t# Remove value from list\n\tval = ls[0]\n\tdictionary[element].remove(val)\n\telement = val\n\treturn has_cycle(dictionary, element, recursion_list)\n\ndef problem(instruction):\n\t'''\n\t\tNorth case\n\t\tdict[b] = [a]\n\t\tdict[c] = [b]\n\t\tdict[a] = [c]\n\n\t\trecurse through, use list to keep recursion stack. if any appear, cycle!\n\t\n\t'''\n\n\tn_dict = {}\n\ts_dict = {}\n\te_dict = {}\n\tw_dict = {}\n\n\tfor instruct in instruction:\n\t\ta, i, b = instruct\n\t\tfor c in i:\n\t\t\tif c == 'N':\n\t\t\t\tadd_key(n_dict, a, b)\n\t\t\telif c == 'S':\n\t\t\t\tadd_key(s_dict, a, b)\n\t\t\telif c == 'E':\n\t\t\t\tadd_key(e_dict, a, b)\n\t\t\telif c == 'W':\n\t\t\t\tadd_key(w_dict, a, b)\n\n\tif any([has_cycle(n_dict, None), has_cycle(s_dict, None), has_cycle(e_dict, None), has_cycle(w_dict, None)]):\n\t\treturn False\n\treturn True\n\t\t\t\t\t\ninstructions = [[['A', 'N', 'B'],['B', 'NE', 'C'],['C','N','A']],\n\t\t\t\t[['A', 'NW', 'B'],['A','N','B']]]\n\n\nprint(problem(instructions[0]))\nprint(problem(instructions[1]))\n","sub_path":"Problem_87/problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"182219131","text":"# Initializing the Cloud Controller client\nfrom getpass import getpass\nimport cf_api\nimport json\nimport os\nimport time\n\ndebug = True\n\ncloud_controller = 'https://CHANGEME/'\ndeploy_client_id = 'cf'\ndeploy_client_secret = ''\nverify_ssl = False\n\nusername = 'CHANGEME'\nPassword = 'CHANGEME'\n\nproxy_host = 'CHANGEME'\nproxy_port = '2878'\n\nAppsTotalName = 'BIZ.TANZU.TotalApps '\nSpacesName = 'BIZ.TANZU.Spaces'\nOrgName = 'BIZ.TANZU.Orgs'\nAppsName = 'BIZ.TANZU.Apps'\nOrgTotalName = 'BIZ.TANZU.TotalOrgs '\nSpacesTotalName = 'BIZ.TANZU.TotalSpaces '\n\ncc = cf_api.new_cloud_controller(\n cloud_controller,\n client_id=deploy_client_id,\n client_secret=deploy_client_secret,\n username=username,\n password=Password,\n).set_verify_ssl(verify_ssl)\n\ndef sendMetric(metric):\n entireMetric = metric + '| nc -q0 ' + proxy_host + ' ' + proxy_port\n if debug==True:\n print(entireMetric)\n os.popen(entireMetric)\n\n# List all organizations\nreq = cc.organizations()\nres = req.get()\norgs = res.resources\norgCounter = 0\nfor r in orgs:\n OrgTags = ' OrgName=' + r.name + ',OrgGUID=\"' + r.guid + '\" '\n orgCounter +=1\n sendMetric('echo ' + OrgName + ' ' + str(1) + ' source=' + r.name + OrgTags)\n \nsendMetric('echo ' + OrgTotalName + ' ' + str(orgCounter) + ' source=CF')\n\n# List all spaces\nres = cc.spaces().get()\nspaces = res.resources\nSpacesCounter = 0\nfor r in spaces:\n OrgGUID = r.organization_guid\n SpacesTags = ' SpacesName=' + r.name + ',SpacesGUID=\"' + r.guid + '\",OrgGUID=\"' + OrgGUID + '\" '\n SpacesCounter +=1\n sendMetric('echo ' + SpacesName + ' ' + str(1) + ' source=' + r.name + SpacesTags)\n \nsendMetric('echo ' + SpacesTotalName + ' ' + str(SpacesCounter) + ' source=CF')\n\n# List all applications\nres = cc.apps().get()\napps = res.resources\nAppsCounter = 0\nfor r in apps:\n si = r.service_bindings_url\n si = si.replace('/v2/apps/','')\n si = si.replace('/service_bindings','')\n AppState = r.state\n AppsTags = ' AppsName=' + r.name + ',AppsGUID=\"' + r.guid + '\",siGUID=\"' + si + '\",state=' + AppState + ' '\n AppsCounter +=1\n sendMetric('echo ' + AppsName + ' ' + str(1) + ' source=' + r.name + AppsTags)\n \nsendMetric('echo ' + AppsTotalName + ' ' + str(AppsCounter) + ' source=CF')\n\n# Find a stack by name\nres = cc.stacks().get()\nstacks = res.resource\nif debug == True:\n print(stacks)\n","sub_path":"pcf_custom_metrics.py","file_name":"pcf_custom_metrics.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"457015710","text":"class GameStats():\n \"\"\"Armazena dados estatísticos da Invasão Alienígena.\"\"\"\n def __init__(self, ai_settings):\n \"\"\"Inicializa os dados estatísticos.\"\"\"\n self.ai_settings = ai_settings\n self.reset_stats()\n self.game_active = False\n filename = 'highScore.txt'\n with open(filename) as file_object:\n self.high_score = int(file_object.read())\n\n\n\n def reset_stats(self):\n \"\"\"Inicializa os dados estatísticos que podem mudar durante o jogo.\"\"\"\n self.cheese_left = self.ai_settings.cheese_limit\n self.score = 0\n self.level = 1\n\n\n\n","sub_path":"game_stats.py","file_name":"game_stats.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"194342996","text":"from app.resources.Chat.ChatSocket import ChatSocket\nfrom flask import session\n\n\nclass Notification:\n\n @staticmethod\n def send_notification(partner_id, typeNotif):\n socket = ChatSocket()\n data = {\n 'type': typeNotif,\n 'author': session['login'],\n 'partner_id': partner_id\n }\n socket.manage_notification(data)","sub_path":"backend/app/resources/Profile/Notification.py","file_name":"Notification.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"154058981","text":"import csv\nimport json\nfrom datetime import datetime, timedelta\n\nyears = []\nmonths = []\ndays = []\nhs = []\nmins = []\nsecs = []\nnames = []\nvalues = []\n\ncsvfile = open('new_demands_201405.csv', 'r')\nidx = 1\nreader = csv.DictReader(csvfile)\nfor row in reader:\n\tnames.append(row['name'])\n\td = datetime.strptime(row['date'], '%Y-%m-%d')\n\tyears.append(d.year)\n\tmonths.append(d.month)\n\tdays.append(d.day)\n\td2 = datetime.strptime(row['date'], '%Y-%m-%d')\n\t# .strftime('%Y-%m-%d %H:%M:%S')\n\td2 += timedelta(0,15*60*(idx-1))\n\ths.append(d2.hour) \n\tmins.append(d2.minute)\n\tsecs.append(d2.second)\n\tvalues.append(row['value'])\n\tif (idx == 96):\n\t\tidx = 1\n\telse:\n\t\tidx += 1\n\n\noutputs = []\n\n# 0~4 \n# k = 4\nfor k in range(5):\n\tdata = []\n\tfor i in range(k*31*96, (k+1)*31*96):\n\t\tdata.append({\n\t\t\t'price': values[i],\n\t\t\t'year': years[i],\n\t\t\t'month': months[i],\n\t\t\t'day': days[i],\n\t\t\t'hour': hs[i],\n\t\t\t'minute': mins[i],\n\t\t\t'second': secs[i]\n\t\t\t}) \n\toutput = []\n\toutput.append({'name': names[k*31*96]})\n\toutput.append({'data': data})\n\toutputs.append(output)\n\n\njsonfile = open(\"test.json\", 'w')\njson.dump(outputs, jsonfile)\n","sub_path":"csvtojson.py","file_name":"csvtojson.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"632041282","text":"from ns_portal.database.meta import (\n Main_Db_Base\n)\nfrom sqlalchemy import (\n Column,\n Integer,\n String\n)\n\n\nclass TApplications(Main_Db_Base):\n __tablename__ = 'TApplications'\n\n TApp_PK_ID = Column(\n Integer,\n primary_key=True\n )\n TApp_Name = Column(\n String(50),\n nullable=True\n )\n TApp_ClientID = Column(\n String(500),\n nullable=False\n )\n TApp_Description = Column(\n String(255),\n nullable=True\n )\n","sub_path":"Back/ns_portal/database/main_db/tapplications_model.py","file_name":"tapplications_model.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"154513408","text":"from __future__ import print_function, division, absolute_import\n\nfrom collections import defaultdict, deque\nfrom datetime import timedelta\nfrom importlib import import_module\nimport heapq\nimport logging\nimport os\nimport random\nimport tempfile\nfrom threading import current_thread, Lock, local\nfrom timeit import default_timer\nimport shutil\nimport sys\n\nfrom .core import read, write, connect, close, send_recv, error_message\n\n\nfrom dask.core import istask\nfrom dask.compatibility import apply\ntry:\n from cytoolz import valmap, merge, pluck, concat\nexcept ImportError:\n from toolz import valmap, merge, pluck, concat\nfrom tornado.gen import Return\nfrom tornado import gen\nfrom tornado.ioloop import IOLoop, PeriodicCallback\nfrom tornado.iostream import StreamClosedError\n\nfrom .batched import BatchedSend\nfrom .config import config\nfrom .utils_comm import pack_data, gather_from_workers\nfrom .compatibility import reload, unicode\nfrom .core import (read, write, connect, close, send_recv, error_message,\n rpc, Server, pingpong, coerce_to_address, RPCClosed)\nfrom .metrics import time\nfrom .protocol.pickle import dumps, loads\nfrom .sizeof import sizeof\nfrom .threadpoolexecutor import ThreadPoolExecutor\nfrom .utils import (funcname, get_ip, has_arg, _maybe_complex, log_errors,\n All, ignoring, validate_key, mp_context)\n\n_ncores = mp_context.cpu_count()\n\nthread_state = local()\n\nlogger = logging.getLogger(__name__)\n\nLOG_PDB = config.get('pdb-on-err') or os.environ.get('DASK_ERROR_PDB', False)\n\ntry:\n import psutil\n TOTAL_MEMORY = psutil.virtual_memory().total\nexcept ImportError:\n logger.warn(\"Please install psutil to estimate worker memory use\")\n TOTAL_MEMORY = 8e9\n\n\nIN_PLAY = ('waiting', 'ready', 'executing', 'long-running')\nPENDING = ('waiting', 'ready', 'constrained')\n\n\nclass WorkerBase(Server):\n \"\"\" Worker Node\n\n Workers perform two functions:\n\n 1. **Serve data** from a local dictionary\n 2. **Perform computation** on that data and on data from peers\n\n Additionally workers keep a scheduler informed of their data and use that\n scheduler to gather data from other workers when necessary to perform a\n computation.\n\n You can start a worker with the ``dask-worker`` command line application::\n\n $ dask-worker scheduler-ip:port\n\n **State**\n\n * **data:** ``{key: object}``:\n Dictionary mapping keys to actual values\n * **active:** ``{key}``:\n Set of keys currently under computation\n * **ncores:** ``int``:\n Number of cores used by this worker process\n * **executor:** ``concurrent.futures.ThreadPoolExecutor``:\n Executor used to perform computation\n * **local_dir:** ``path``:\n Path on local machine to store temporary files\n * **scheduler:** ``rpc``:\n Location of scheduler. See ``.ip/.port`` attributes.\n * **name:** ``string``:\n Alias\n * **services:** ``{str: Server}``:\n Auxiliary web servers running on this worker\n * **service_ports:** ``{str: port}``:\n\n Parameters\n ----------\n scheduler_ip: str\n scheduler_port: int\n ip: str, optional\n ncores: int, optional\n loop: tornado.ioloop.IOLoop\n local_dir: str, optional\n Directory where we place local resources\n name: str, optional\n heartbeat_interval: int\n Milliseconds between heartbeats to scheduler\n memory_limit: int\n Number of bytes of data to keep in memory before using disk\n executor: concurrent.futures.Executor\n\n Examples\n --------\n\n Use the command line to start a worker::\n\n $ dask-scheduler\n Start scheduler at 127.0.0.1:8786\n\n $ dask-worker 127.0.0.1:8786\n Start worker at: 127.0.0.1:1234\n Registered with scheduler at: 127.0.0.1:8786\n\n See Also\n --------\n distributed.scheduler.Scheduler\n distributed.nanny.Nanny\n \"\"\"\n\n def __init__(self, scheduler_ip, scheduler_port, ip=None, ncores=None,\n loop=None, local_dir=None, services=None, service_ports=None,\n name=None, heartbeat_interval=5000, reconnect=True,\n memory_limit='auto', executor=None, resources=None,\n silence_logs=None, **kwargs):\n self.ip = ip or get_ip()\n self._port = 0\n self.ncores = ncores or _ncores\n self.local_dir = local_dir or tempfile.mkdtemp(prefix='worker-')\n self.total_resources = resources or {}\n self.available_resources = (resources or {}).copy()\n if silence_logs:\n logger.setLevel(silence_logs)\n if not os.path.exists(self.local_dir):\n os.mkdir(self.local_dir)\n\n if memory_limit == 'auto':\n memory_limit = int(TOTAL_MEMORY * 0.6 * min(1, self.ncores / _ncores))\n with ignoring(TypeError):\n memory_limit = float(memory_limit)\n if isinstance(memory_limit, float) and memory_limit <= 1:\n memory_limit = memory_limit * TOTAL_MEMORY\n self.memory_limit = memory_limit\n\n if self.memory_limit:\n try:\n from zict import Buffer, File, Func\n except ImportError:\n raise ImportError(\"Please `pip install zict` for spill-to-disk workers\")\n path = os.path.join(self.local_dir, 'storage')\n storage = Func(dumps_to_disk, loads_from_disk, File(path))\n self.data = Buffer({}, storage, int(float(self.memory_limit)), weight)\n else:\n self.data = dict()\n self.loop = loop or IOLoop.current()\n self.status = None\n self.reconnect = reconnect\n self.executor = executor or ThreadPoolExecutor(self.ncores)\n self.scheduler = rpc(ip=scheduler_ip, port=scheduler_port)\n self.active = set()\n self.name = name\n self.heartbeat_interval = heartbeat_interval\n self.heartbeat_active = False\n self.execution_state = {'scheduler': self.scheduler.address,\n 'ioloop': self.loop,\n 'worker': self}\n self._last_disk_io = None\n self._last_net_io = None\n self._ipython_kernel = None\n\n if self.local_dir not in sys.path:\n sys.path.insert(0, self.local_dir)\n\n self.services = {}\n self.service_ports = service_ports or {}\n for k, v in (services or {}).items():\n if isinstance(k, tuple):\n k, port = k\n else:\n port = 0\n\n self.services[k] = v(self, io_loop=self.loop)\n self.services[k].listen(port)\n self.service_ports[k] = self.services[k].port\n\n handlers = {\n 'gather': self.gather,\n 'compute-stream': self.compute_stream,\n 'run': self.run,\n 'run_coroutine': self.run_coroutine,\n 'get_data': self.get_data,\n 'update_data': self.update_data,\n 'delete_data': self.delete_data,\n 'terminate': self.terminate,\n 'ping': pingpong,\n 'health': self.host_health,\n 'upload_file': self.upload_file,\n 'start_ipython': self.start_ipython,\n 'keys': self.keys,\n }\n\n super(WorkerBase, self).__init__(handlers, io_loop=self.loop, **kwargs)\n\n self.heartbeat_callback = PeriodicCallback(self.heartbeat,\n self.heartbeat_interval,\n io_loop=self.loop)\n self.loop.add_callback(self.heartbeat_callback.start)\n\n @property\n def worker_address(self):\n \"\"\" For API compatibility with Nanny \"\"\"\n return self.address\n\n @gen.coroutine\n def heartbeat(self):\n if not self.heartbeat_active:\n self.heartbeat_active = True\n logger.debug(\"Heartbeat: %s\" % self.address)\n try:\n yield self.scheduler.register(address=self.address, name=self.name,\n ncores=self.ncores,\n now=time(),\n host_info=self.host_health(),\n services=self.service_ports,\n memory_limit=self.memory_limit,\n **self.process_health())\n finally:\n self.heartbeat_active = False\n else:\n logger.debug(\"Heartbeat skipped: channel busy\")\n\n @gen.coroutine\n def _start(self, port=0):\n self.listen(port)\n self.name = self.name or self.address\n for k, v in self.services.items():\n v.listen(0)\n self.service_ports[k] = v.port\n\n logger.info(' Start worker at: %20s:%d', self.ip, self.port)\n for k, v in self.service_ports.items():\n logger.info(' %16s at: %20s:%d' % (k, self.ip, v))\n logger.info('Waiting to connect to: %20s:%d',\n self.scheduler.ip, self.scheduler.port)\n logger.info('-' * 49)\n logger.info(' Threads: %26d', self.ncores)\n if self.memory_limit:\n logger.info(' Memory: %23.2f GB', self.memory_limit / 1e9)\n logger.info(' Local Directory: %26s', self.local_dir)\n logger.info('-' * 49)\n while True:\n try:\n resp = yield self.scheduler.register(\n ncores=self.ncores, address=(self.ip, self.port),\n keys=list(self.data),\n name=self.name, nbytes=valmap(sizeof, self.data),\n now=time(),\n host_info=self.host_health(),\n services=self.service_ports,\n memory_limit=self.memory_limit,\n resources=self.total_resources,\n **self.process_health())\n break\n except EnvironmentError:\n logger.debug(\"Unable to register with scheduler. Waiting\")\n yield gen.sleep(0.5)\n if resp != 'OK':\n raise ValueError(resp)\n logger.info(' Registered to: %20s:%d',\n self.scheduler.ip, self.scheduler.port)\n logger.info('-' * 49)\n self.status = 'running'\n\n def start(self, port=0):\n self.loop.add_callback(self._start, port)\n\n def identity(self, stream):\n return {'type': type(self).__name__, 'id': self.id,\n 'scheduler': (self.scheduler.ip, self.scheduler.port),\n 'ncores': self.ncores,\n 'memory_limit': self.memory_limit}\n\n @gen.coroutine\n def _close(self, report=True, timeout=10):\n if self.status in ('closed', 'closing'):\n return\n logger.info(\"Stopping worker at %s:%d\", self.ip, self.port)\n self.status = 'closing'\n self.stop()\n self.heartbeat_callback.stop()\n with ignoring(EnvironmentError):\n if report:\n yield gen.with_timeout(timedelta(seconds=timeout),\n self.scheduler.unregister(address=(self.ip, self.port)),\n io_loop=self.loop)\n self.scheduler.close_rpc()\n self.executor.shutdown()\n if os.path.exists(self.local_dir):\n shutil.rmtree(self.local_dir)\n\n for k, v in self.services.items():\n v.stop()\n self.rpc.close()\n self.status = 'closed'\n\n @gen.coroutine\n def terminate(self, stream, report=True):\n yield self._close(report=report)\n raise Return('OK')\n\n @property\n def address(self):\n return '%s:%d' % (self.ip, self.port)\n\n @property\n def address_tuple(self):\n return (self.ip, self.port)\n\n def _deserialize(self, function=None, args=None, kwargs=None, task=None):\n \"\"\" Deserialize task inputs and regularize to func, args, kwargs \"\"\"\n if function is not None:\n function = loads(function)\n if args:\n args = loads(args)\n if kwargs:\n kwargs = loads(kwargs)\n\n if task is not None:\n assert not function and not args and not kwargs\n function = execute_task\n args = (task,)\n\n return function, args or (), kwargs or {}\n\n @gen.coroutine\n def executor_submit(self, key, function, *args, **kwargs):\n \"\"\" Safely run function in thread pool executor\n\n We've run into issues running concurrent.future futures within\n tornado. Apparently it's advantageous to use timeouts and periodic\n callbacks to ensure things run smoothly. This can get tricky, so we\n pull it off into an separate method.\n \"\"\"\n job_counter[0] += 1\n # logger.info(\"%s:%d Starts job %d, %s\", self.ip, self.port, i, key)\n future = self.executor.submit(function, *args, **kwargs)\n pc = PeriodicCallback(lambda: logger.debug(\"future state: %s - %s\",\n key, future._state), 1000, io_loop=self.loop); pc.start()\n try:\n yield future\n finally:\n pc.stop()\n pass\n\n result = future.result()\n\n # logger.info(\"Finish job %d, %s\", i, key)\n raise gen.Return(result)\n\n\n def run(self, stream, function, args=(), kwargs={}):\n return run(self, stream, function=function, args=args, kwargs=kwargs)\n\n def run_coroutine(self, stream, function, args=(), kwargs={}, wait=True):\n return run(self, stream, function=function, args=args, kwargs=kwargs,\n is_coro=True, wait=wait)\n\n def update_data(self, stream=None, data=None, report=True):\n self.data.update(data)\n self.nbytes.update(valmap(sizeof, data))\n if report:\n self.batched_stream.send({'op': 'add-keys',\n 'keys': list(data)})\n info = {'nbytes': {k: sizeof(v) for k, v in data.items()},\n 'status': 'OK'}\n return info\n\n @gen.coroutine\n def delete_data(self, stream=None, keys=None, report=True):\n if keys:\n for key in list(keys):\n deps = self.dependents.get(key, ())\n if deps and any(self.task_state[dep] in IN_PLAY\n for dep in deps):\n logger.info(\"Tried to delete necessary key: %s\", key)\n self.log.append((key, 'tried-to-delete-unneccesary-key'))\n keys.remove(key)\n continue\n else:\n state = self.task_state.get(key)\n if state == 'memory':\n del self.data[key]\n self.forget_key(key)\n self.log.append((key, 'delete-memory'))\n elif state == 'error':\n self.forget_key(key)\n self.log.append((key, 'delete-error'))\n elif key in self.data:\n del self.data[key]\n self.log.append((key, 'delete-data'))\n logger.debug(\"Deleted %d keys\", len(keys))\n if report:\n logger.debug(\"Reporting loss of keys to scheduler\")\n yield self.scheduler.remove_keys(address=self.address,\n keys=list(keys))\n raise Return('OK')\n\n @gen.coroutine\n def get_data(self, stream, keys=None, who=None):\n start = time()\n\n msg = {k: to_serialize(self.data[k]) for k in keys if k in self.data}\n nbytes = {k: self.nbytes.get(k) for k in keys if k in self.data}\n stop = time()\n if self.digests is not None:\n self.digests['get-data-load-duration'].add(stop - start)\n start = time()\n try:\n compressed = yield write(stream, msg)\n yield close(stream)\n except EnvironmentError:\n logger.exception('failed during get data', exc_info=True)\n stream.close()\n raise\n stop = time()\n if self.digests is not None:\n self.digests['get-data-send-duration'].add(stop - start)\n\n total_bytes = sum(filter(None, nbytes.values()))\n\n self.outgoing_count += 1\n duration = (stop - start) or 0.5 # windows\n self.outgoing_transfer_log.append({\n 'start': start,\n 'stop': stop,\n 'middle': (start + stop) / 2,\n 'duration': duration,\n 'who': who,\n 'keys': nbytes,\n 'total': total_bytes,\n 'compressed': compressed,\n 'bandwidth': total_bytes / duration\n })\n\n raise gen.Return('dont-reply')\n\n def start_ipython(self, stream):\n \"\"\"Start an IPython kernel\n\n Returns Jupyter connection info dictionary.\n \"\"\"\n from ._ipython_utils import start_ipython\n if self._ipython_kernel is None:\n self._ipython_kernel = start_ipython(\n ip=self.ip,\n ns={'worker': self},\n log=logger,\n )\n return self._ipython_kernel.get_connection_info()\n\n def upload_file(self, stream, filename=None, data=None, load=True):\n out_filename = os.path.join(self.local_dir, filename)\n if isinstance(data, unicode):\n data = data.encode()\n with open(out_filename, 'wb') as f:\n f.write(data)\n f.flush()\n\n if load:\n try:\n name, ext = os.path.splitext(filename)\n if ext in ('.py', '.pyc'):\n logger.info(\"Reload module %s from .py file\", name)\n name = name.split('-')[0]\n reload(import_module(name))\n if ext == '.egg':\n import pkg_resources\n sys.path.append(out_filename)\n pkgs = pkg_resources.find_distributions(out_filename)\n for pkg in pkgs:\n logger.info(\"Load module %s from egg\", pkg.project_name)\n reload(import_module(pkg.project_name))\n if not pkgs:\n logger.warning(\"Found no packages in egg file\")\n except Exception as e:\n logger.exception(e)\n return {'status': 'error', 'exception': dumps(e)}\n return {'status': 'OK', 'nbytes': len(data)}\n\n def process_health(self, stream=None):\n d = {'active': len(self.active),\n 'stored': len(self.data)}\n return d\n\n def host_health(self, stream=None):\n \"\"\" Information about worker \"\"\"\n d = {'time': time()}\n try:\n import psutil\n mem = psutil.virtual_memory()\n d.update({'cpu': psutil.cpu_percent(),\n 'memory': mem.total,\n 'memory_percent': mem.percent})\n\n net_io = psutil.net_io_counters()\n if self._last_net_io:\n d['network-send'] = net_io.bytes_sent - self._last_net_io.bytes_sent\n d['network-recv'] = net_io.bytes_recv - self._last_net_io.bytes_recv\n else:\n d['network-send'] = 0\n d['network-recv'] = 0\n self._last_net_io = net_io\n\n try:\n disk_io = psutil.disk_io_counters()\n except RuntimeError:\n # This happens when there is no physical disk in worker\n pass\n else:\n if self._last_disk_io:\n d['disk-read'] = disk_io.read_bytes - self._last_disk_io.read_bytes\n d['disk-write'] = disk_io.write_bytes - self._last_disk_io.write_bytes\n else:\n d['disk-read'] = 0\n d['disk-write'] = 0\n self._last_disk_io = disk_io\n\n except ImportError:\n pass\n return d\n\n def keys(self, stream=None):\n return list(self.data)\n\n @gen.coroutine\n def gather(self, stream=None, who_has=None):\n who_has = {k: [coerce_to_address(addr) for addr in v]\n for k, v in who_has.items()\n if k not in self.data}\n try:\n result = yield gather_from_workers(who_has)\n except KeyError as e:\n logger.warn(\"Could not find data\", e)\n raise Return({'status': 'missing-data',\n 'keys': e.args})\n else:\n self.data.update(result)\n self.nbytes.update(valmap(sizeof, result))\n raise Return({'status': 'OK'})\n\n\njob_counter = [0]\n\n\ndef execute_task(task):\n \"\"\" Evaluate a nested task\n\n >>> inc = lambda x: x + 1\n >>> execute_task((inc, 1))\n 2\n >>> execute_task((sum, [1, 2, (inc, 3)]))\n 7\n \"\"\"\n if istask(task):\n func, args = task[0], task[1:]\n return func(*map(execute_task, args))\n elif isinstance(task, list):\n return list(map(execute_task, task))\n else:\n return task\n\n\ncache = dict()\n\n\ndef dumps_function(func):\n \"\"\" Dump a function to bytes, cache functions \"\"\"\n if func not in cache:\n b = dumps(func)\n cache[func] = b\n return cache[func]\n\n\ndef dumps_task(task):\n \"\"\" Serialize a dask task\n\n Returns a dict of bytestrings that can each be loaded with ``loads``\n\n Examples\n --------\n Either returns a task as a function, args, kwargs dict\n\n >>> from operator import add\n >>> dumps_task((add, 1)) # doctest: +SKIP\n {'function': b'\\x80\\x04\\x95\\x00\\x8c\\t_operator\\x94\\x8c\\x03add\\x94\\x93\\x94.'\n 'args': b'\\x80\\x04\\x95\\x07\\x00\\x00\\x00K\\x01K\\x02\\x86\\x94.'}\n\n Or as a single task blob if it can't easily decompose the result. This\n happens either if the task is highly nested, or if it isn't a task at all\n\n >>> dumps_task(1) # doctest: +SKIP\n {'task': b'\\x80\\x04\\x95\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00K\\x01.'}\n \"\"\"\n if istask(task):\n if task[0] is apply and not any(map(_maybe_complex, task[2:])):\n d = {'function': dumps_function(task[1]),\n 'args': dumps(task[2])}\n if len(task) == 4:\n d['kwargs'] = dumps(task[3])\n return d\n elif not any(map(_maybe_complex, task[1:])):\n return {'function': dumps_function(task[0]),\n 'args': dumps(task[1:])}\n return to_serialize(task)\n\n\ndef apply_function(function, args, kwargs, execution_state, key):\n \"\"\" Run a function, collect information\n\n Returns\n -------\n msg: dictionary with status, result/error, timings, etc..\n \"\"\"\n thread_state.execution_state = execution_state\n thread_state.key = key\n start = time()\n try:\n result = function(*args, **kwargs)\n except Exception as e:\n msg = error_message(e)\n msg['op'] = 'task-erred'\n else:\n msg = {'op': 'task-finished',\n 'status': 'OK',\n 'result': result,\n 'nbytes': sizeof(result),\n 'type': dumps_function(type(result)) if result is not None else None}\n finally:\n end = time()\n msg['compute_start'] = start\n msg['compute_stop'] = end\n msg['thread'] = current_thread().ident\n return msg\n\n\ndef get_msg_safe_str(msg):\n \"\"\" Make a worker msg, which contains args and kwargs, safe to cast to str:\n allowing for some arguments to raise exceptions during conversion and\n ignoring them.\n \"\"\"\n class Repr(object):\n def __init__(self, f, val):\n self._f = f\n self._val = val\n def __repr__(self):\n return self._f(self._val)\n msg = msg.copy()\n if \"args\" in msg:\n msg[\"args\"] = Repr(convert_args_to_str, msg[\"args\"])\n if \"kwargs\" in msg:\n msg[\"kwargs\"] = Repr(convert_kwargs_to_str, msg[\"kwargs\"])\n return msg\n\n\ndef convert_args_to_str(args, max_len=None):\n \"\"\" Convert args to a string, allowing for some arguments to raise\n exceptions during conversion and ignoring them.\n \"\"\"\n length = 0\n strs = [\"\" for i in range(len(args))]\n for i, arg in enumerate(args):\n try:\n sarg = repr(arg)\n except:\n sarg = \"< could not convert arg to str >\"\n strs[i] = sarg\n length += len(sarg) + 2\n if max_len is not None and length > max_len:\n return \"({}\".format(\", \".join(strs[:i+1]))[:max_len]\n else:\n return \"({})\".format(\", \".join(strs))\n\n\ndef convert_kwargs_to_str(kwargs, max_len=None):\n \"\"\" Convert kwargs to a string, allowing for some arguments to raise\n exceptions during conversion and ignoring them.\n \"\"\"\n length = 0\n strs = [\"\" for i in range(len(kwargs))]\n for i, (argname, arg) in enumerate(kwargs.items()):\n try:\n sarg = repr(arg)\n except:\n sarg = \"< could not convert arg to str >\"\n skwarg = repr(argname) + \": \" + sarg\n strs[i] = skwarg\n length += len(skwarg) + 2\n if max_len is not None and length > max_len:\n return \"{{{}\".format(\", \".join(strs[:i+1]))[:max_len]\n else:\n return \"{{{}}}\".format(\", \".join(strs))\n\n\nfrom .protocol import compressions, default_compression, to_serialize\n\n# TODO: use protocol.maybe_compress and proper file/memoryview objects\n\ndef dumps_to_disk(x):\n b = dumps(x)\n c = compressions[default_compression]['compress'](b)\n return c\n\ndef loads_from_disk(c):\n b = compressions[default_compression]['decompress'](c)\n x = loads(b)\n return x\n\ndef weight(k, v):\n return sizeof(v)\n\n\n@gen.coroutine\ndef run(worker, stream, function, args=(), kwargs={}, is_coro=False, wait=True):\n assert wait or is_coro, \"Combination not supported\"\n function = loads(function)\n if args:\n args = loads(args)\n if kwargs:\n kwargs = loads(kwargs)\n if has_arg(function, 'dask_worker'):\n kwargs['dask_worker'] = worker\n logger.info(\"Run out-of-band function %r\", funcname(function))\n try:\n result = function(*args, **kwargs)\n if is_coro:\n result = (yield result) if wait else None\n except Exception as e:\n logger.warn(\" Run Failed\\n\"\n \"Function: %s\\n\"\n \"args: %s\\n\"\n \"kwargs: %s\\n\",\n str(funcname(function))[:1000],\n convert_args_to_str(args, max_len=1000),\n convert_kwargs_to_str(kwargs, max_len=1000), exc_info=True)\n\n response = error_message(e)\n else:\n response = {\n 'status': 'OK',\n 'result': to_serialize(result),\n }\n raise Return(response)\n\n\nclass Worker(WorkerBase):\n def __init__(self, *args, **kwargs):\n self.tasks = dict()\n self.raw_tasks = dict()\n self.task_state = dict()\n self.dependencies = dict()\n self.dependents = dict()\n self.waiting_for_data = dict()\n self.who_has = dict()\n self.has_what = defaultdict(set)\n self.pending_data_per_worker = defaultdict(deque)\n\n self.data_needed = deque() # TODO: replace with heap?\n\n self.in_flight = dict()\n self.total_connections = 50\n self.connections = {}\n\n self.nbytes = dict()\n self.types = dict()\n self.priorities = dict()\n self.priority_counter = 0\n self.durations = dict()\n self.response = defaultdict(dict)\n self.host_restrictions = dict()\n self.worker_restrictions = dict()\n self.resource_restrictions = dict()\n\n self.ready = list()\n self.constrained = deque()\n self.executing = set()\n self.executed_count = 0\n self.long_running = set()\n\n self.batched_stream = None\n self.target_message_size = 200e6 # 200 MB\n\n self.log = deque(maxlen=100000)\n self.validate = kwargs.pop('validate', False)\n\n self._transitions = {\n ('waiting', 'ready'): self.transition_waiting_ready,\n ('waiting', 'memory'): self.transition_waiting_memory,\n ('ready', 'executing'): self.transition_ready_executing,\n ('ready', 'memory'): self.transition_ready_memory,\n ('constrained', 'executing'): self.transition_constrained_executing,\n ('executing', 'memory'): self.transition_executing_done,\n ('executing', 'error'): self.transition_executing_done,\n ('executing', 'long-running'): self.transition_executing_long_running,\n ('long-running', 'error'): self.transition_executing_done,\n ('long-running', 'memory'): self.transition_executing_done,\n }\n\n self.incoming_transfer_log = deque(maxlen=(100000))\n self.incoming_count = 0\n self.outgoing_transfer_log = deque(maxlen=(100000))\n self.outgoing_count = 0\n\n WorkerBase.__init__(self, *args, **kwargs)\n\n def __str__(self):\n return \"<%s: %s, %s, stored: %d, running: %d/%d, ready: %d, comm: %d, waiting: %d>\" % (\n self.__class__.__name__, self.address, self.status,\n len(self.data), len(self.executing), self.ncores,\n len(self.ready), len(self.in_flight),\n len(self.waiting_for_data))\n\n __repr__ = __str__\n\n ################\n # Update Graph #\n ################\n\n @gen.coroutine\n def compute_stream(self, stream):\n try:\n self.batched_stream = BatchedSend(interval=2, loop=self.loop)\n self.batched_stream.start(stream)\n\n closed = False\n\n while not closed:\n self.priority_counter += 1\n try:\n msgs = yield read(stream)\n except EnvironmentError:\n if self.reconnect:\n break\n else:\n yield self._close(report=False)\n break\n\n start = time()\n\n for msg in msgs:\n op = msg.pop('op', None)\n if 'key' in msg:\n validate_key(msg['key'])\n if op == 'close':\n closed = True\n break\n elif op == 'compute-task':\n priority = msg.pop('priority')\n priority = [self.priority_counter] + priority\n priority = tuple(-x for x in priority)\n self.add_task(priority=priority, **msg)\n elif op == 'release-task':\n self.release_task(**msg)\n elif op == 'delete-data':\n self.delete_data(**msg)\n else:\n logger.warning(\"Unknown operation %s, %s\", op, msg)\n\n self.ensure_communicating()\n self.ensure_computing()\n\n end = time()\n if self.digests is not None:\n self.digests['handle-messages-duration'].add(end - start)\n\n yield self.batched_stream.close()\n logger.info('Close compute stream')\n except Exception as e:\n logger.exception(e)\n raise\n\n def add_task(self, key, function=None, args=None, kwargs=None, task=None,\n who_has=None, nbytes=None, priority=None, duration=None,\n host_restrictions=None, worker_restrictions=None,\n resource_restrictions=None, **kwargs2):\n try:\n if key in self.tasks:\n state = self.task_state[key]\n if state in ('memory', 'error'):\n if state == 'memory':\n assert key in self.data\n logger.debug(\"Asked to compute prexisting result: %s: %s\" ,\n key, state)\n self.batched_stream.send(self.response[key])\n return\n if state in IN_PLAY:\n return\n\n if key in self.data:\n self.response[key] = {'op': 'task-finished',\n 'status': 'OK',\n 'key': key,\n 'nbytes': self.nbytes[key],\n 'type': dumps_function(type(self.data[key]))}\n self.batched_stream.send(self.response[key])\n self.task_state[key] = 'memory'\n self.tasks[key] = None\n self.raw_tasks[key] = None\n self.log.append((key, 'new-task-already-in-memory'))\n return\n\n self.log.append((key, 'new'))\n try:\n self.tasks[key] = self._deserialize(function, args, kwargs, task)\n raw = {'function': function, 'args': args, 'kwargs': kwargs,\n 'task': task}\n self.raw_tasks[key] = {k: v for k, v in raw.items() if v is not None}\n except Exception as e:\n logger.warn(\"Could not deserialize task\", exc_info=True)\n emsg = error_message(e)\n emsg['key'] = key\n emsg['op'] = 'task-erred'\n self.batched_stream.send(emsg)\n self.log.append((key, 'deserialize-error'))\n return\n\n self.priorities[key] = priority\n self.durations[key] = duration\n if host_restrictions:\n self.host_restrictions[key] = set(host_restrictions)\n if worker_restrictions:\n self.worker_restrictions[key] = set(worker_restrictions)\n if resource_restrictions:\n self.resource_restrictions[key] = resource_restrictions\n self.task_state[key] = 'waiting'\n\n if nbytes is not None:\n self.nbytes.update(nbytes)\n\n if who_has:\n self.dependencies[key] = set(who_has)\n for dep in who_has:\n if dep not in self.dependents:\n self.dependents[dep] = set()\n self.dependents[dep].add(key)\n who_has = {dep: v for dep, v in who_has.items() if dep not in self.data}\n self.waiting_for_data[key] = set(who_has)\n else:\n self.waiting_for_data[key] = set()\n self.dependencies[key] = set()\n\n if who_has:\n for dep, workers in who_has.items():\n if dep not in self.who_has:\n self.who_has[dep] = set(workers)\n self.who_has[dep].update(workers)\n\n for worker in workers:\n self.has_what[worker].add(dep)\n self.pending_data_per_worker[worker].append(dep)\n\n self.data_needed.append(key)\n else:\n self.transition(key, 'ready')\n except Exception as e:\n logger.exception(e)\n if LOG_PDB:\n import pdb; pdb.set_trace()\n raise\n\n def release_task(self, key=None):\n if self.task_state.get(key) in PENDING:\n self.rescind_key(key)\n\n ###############\n # Transitions #\n ###############\n\n def transition(self, key, finish, **kwargs):\n start = self.task_state[key]\n if start == finish:\n return\n func = self._transitions[start, finish]\n state = func(key, **kwargs)\n self.log.append((key, start, state or finish))\n self.task_state[key] = state or finish\n if self.validate:\n self.validate_key(key)\n\n def transition_waiting_ready(self, key):\n try:\n if self.validate:\n assert self.task_state[key] == 'waiting'\n assert key in self.waiting_for_data\n assert not self.waiting_for_data[key]\n assert all(dep in self.data for dep in self.dependencies[key])\n assert key not in self.executing\n assert key not in self.ready\n\n del self.waiting_for_data[key]\n if key in self.resource_restrictions:\n self.constrained.append(key)\n return 'constrained'\n else:\n heapq.heappush(self.ready, (self.priorities[key], key))\n except Exception as e:\n logger.exception(e)\n if LOG_PDB:\n import pdb; pdb.set_trace()\n raise\n\n def transition_waiting_memory(self, key):\n try:\n if self.validate:\n assert self.task_state[key] == 'waiting'\n assert key in self.waiting_for_data\n assert key not in self.executing\n assert key not in self.ready\n\n del self.waiting_for_data[key]\n self.response[key] = {'op': 'task-finished',\n 'status': 'OK',\n 'key': key,\n 'nbytes': self.nbytes[key],\n 'type': dumps_function(type(self.data[key]))}\n self.batched_stream.send(self.response[key])\n except Exception as e:\n logger.exception(e)\n if LOG_PDB:\n import pdb; pdb.set_trace()\n raise\n\n def transition_ready_executing(self, key):\n try:\n if self.validate:\n assert key not in self.waiting_for_data\n # assert key not in self.data\n assert self.task_state[key] in ('ready', 'constrained')\n assert key not in self.ready\n assert all(dep in self.data for dep in self.dependencies[key])\n\n self.executing.add(key)\n self.loop.add_callback(self.execute, key)\n except Exception as e:\n logger.exception(e)\n if LOG_PDB:\n import pdb; pdb.set_trace()\n raise\n\n def transition_ready_memory(self, key):\n self.response[key] = {'op': 'task-finished',\n 'status': 'OK',\n 'key': key,\n 'nbytes': self.nbytes[key],\n 'type': dumps_function(type(self.data[key]))}\n self.batched_stream.send(self.response[key])\n\n def transition_constrained_executing(self, key):\n self.transition_ready_executing(key)\n for resource, quantity in self.resource_restrictions[key].items():\n self.available_resources[resource] -= quantity\n\n if self.validate:\n assert all(v >= 0 for v in self.available_resources.values())\n\n def transition_executing_done(self, key):\n try:\n if self.validate:\n assert key in self.executing or key in self.long_running\n assert key not in self.waiting_for_data\n assert key not in self.ready\n\n if key in self.resource_restrictions:\n for resource, quantity in self.resource_restrictions[key].items():\n self.available_resources[resource] += quantity\n\n if self.task_state[key] == 'executing':\n self.executing.remove(key)\n self.executed_count += 1\n elif self.task_state[key] == 'long-running':\n self.long_running.remove(key)\n if self.batched_stream:\n self.batched_stream.send(self.response[key])\n else:\n raise StreamClosedError()\n\n except EnvironmentError:\n logger.info(\"Stream closed\")\n self._close(report=False)\n except Exception as e:\n logger.exception(e)\n if LOG_PDB:\n import pdb; pdb.set_trace()\n raise\n\n def transition_executing_long_running(self, key):\n try:\n if self.validate:\n assert key in self.executing\n\n self.executing.remove(key)\n self.long_running.add(key)\n\n self.ensure_computing()\n except Exception as e:\n logger.exception(e)\n if LOG_PDB:\n import pdb; pdb.set_trace()\n raise\n\n ##########################\n # Gather Data from Peers #\n ##########################\n\n def ensure_communicating(self):\n try:\n while self.data_needed and len(self.connections) < self.total_connections:\n logger.debug(\"Ensure communicating. Pending: %d. Connections: %d/%d\",\n len(self.data_needed),\n len(self.connections),\n self.total_connections)\n\n key = self.data_needed[0]\n\n if key not in self.tasks:\n self.data_needed.popleft()\n continue\n\n if self.task_state.get(key) != 'waiting':\n self.log.append((key, 'communication pass'))\n self.data_needed.popleft()\n continue\n\n deps = self.dependencies[key]\n deps = [d for d in deps\n if d not in self.data\n and d not in self.executing\n and d not in self.in_flight]\n\n for dep in deps:\n if not self.who_has.get(dep):\n logger.info(\"Can't find dependencies for key %s\", key)\n self.cancel_key(key)\n continue\n\n self.log.append(('gather-dependencies', key, deps))\n\n while deps and len(self.connections) < self.total_connections:\n token = object()\n self.connections[token] = None\n self.loop.add_callback(self.gather_dep, deps.pop(), token, cause=key)\n\n if not deps:\n self.data_needed.popleft()\n except Exception as e:\n logger.exception(e)\n if LOG_PDB:\n import pdb; pdb.set_trace()\n raise\n\n def put_key_in_memory(self, key, value):\n if key in self.data:\n return\n\n self.data[key] = value\n\n if key not in self.nbytes:\n self.nbytes[key] = sizeof(value)\n\n self.types[key] = type(value)\n\n for dep in self.dependents.get(key, ()):\n if dep in self.waiting_for_data:\n if key in self.waiting_for_data[dep]:\n self.waiting_for_data[dep].remove(key)\n if not self.waiting_for_data[dep]:\n self.transition(dep, 'ready')\n\n if key in self.task_state:\n self.transition(key, 'memory')\n\n @gen.coroutine\n def gather_dep(self, dep, slot, cause=None):\n failures = 5\n del self.connections[slot]\n try:\n if self.validate:\n self.validate_state()\n\n while True:\n if not self.who_has.get(dep):\n if dep not in self.dependents:\n return\n failures += 1\n result = yield self.query_who_has(dep)\n if not result or failures > 5:\n for key in list(self.dependents[dep]):\n if dep in self.executing:\n continue\n if dep in self.waiting_for_data.get(key, ()):\n self.cancel_key(key)\n return\n else:\n assert self.who_has.get(dep)\n worker = random.choice(list(self.who_has[dep]))\n ip, port = worker.split(':')\n try:\n start = time()\n future = connect(ip, int(port), timeout=10)\n self.connections[future] = True\n stream = yield future\n end = time()\n if self.digests is not None:\n self.digests['gather-connect-duration'].add(end - start)\n except (gen.TimeoutError, EnvironmentError):\n logger.info(\"Failed to connect to %s\", worker)\n with ignoring(KeyError): # other coroutine may have removed\n for d in self.has_what.pop(worker):\n self.who_has[d].remove(worker)\n else:\n break\n finally:\n del self.connections[future]\n\n if dep in self.data or dep in self.in_flight: # someone beat us\n stream.close() # close newly opened stream\n return\n\n deps = {dep}\n\n total_bytes = self.nbytes[dep]\n L = self.pending_data_per_worker[worker]\n\n while L:\n d = L.popleft()\n if (d in self.data or\n d in self.in_flight or\n d in self.executing or\n d not in self.nbytes): # no longer tracking\n continue\n if total_bytes + self.nbytes[d] > self.target_message_size:\n break\n deps.add(d)\n total_bytes += self.nbytes[d]\n\n for d in deps:\n assert d not in self.in_flight\n self.in_flight[d] = stream\n self.log.append(('request-dep', dep, worker, deps))\n self.connections[stream] = deps\n\n try:\n start = time()\n logger.debug(\"Request %d keys and %d bytes\", len(deps),\n total_bytes)\n response = yield send_recv(stream, op='get_data', keys=list(deps),\n close=True, who=self.address)\n stop = time()\n deps2 = list(response)\n\n if cause:\n self.response[cause].update({'transfer_start': start,\n 'transfer_stop': stop})\n\n total_bytes = sum(self.nbytes.get(dep, 0) for dep in deps2)\n duration = (stop - start) or 0.5\n self.incoming_transfer_log.append({\n 'start': start,\n 'stop': stop,\n 'middle': (start + stop) / 2.0,\n 'duration': duration,\n 'keys': {dep: self.nbytes.get(dep, None) for dep in deps2},\n 'total': total_bytes,\n 'bandwidth': total_bytes / duration,\n 'who': worker\n })\n if self.digests is not None:\n self.digests['transfer-bandwidth'].add(total_bytes / duration)\n self.digests['transfer-duration'].add(duration)\n self.counters['transfer-count'].add(len(deps2))\n self.incoming_count += 1\n except EnvironmentError as e:\n logger.error(\"Worker stream died during communication: %s\",\n worker)\n response = {}\n self.log.append(('receive-dep-failed', worker))\n finally:\n del self.connections[stream]\n stream.close()\n\n self.log.append(('receive-dep', worker, list(response)))\n\n assert len(self.connections) < self.total_connections\n\n for d in deps:\n del self.in_flight[d]\n\n for d, v in response.items():\n self.put_key_in_memory(d, v)\n\n if response:\n self.batched_stream.send({'op': 'add-keys',\n 'keys': list(response)})\n\n for d in deps:\n if d not in response and d in self.dependents:\n self.log.append(('missing-dep', d))\n try:\n self.who_has[d].remove(worker)\n except KeyError:\n pass\n try:\n self.has_what[worker].remove(d)\n except KeyError:\n pass\n for key in self.dependents.get(d, ()):\n if key in self.waiting_for_data:\n self.data_needed.appendleft(key)\n\n if self.validate:\n self.validate_state()\n\n self.ensure_computing()\n self.ensure_communicating()\n except Exception as e:\n logger.exception(e)\n if self.batched_stream and LOG_PDB:\n import pdb; pdb.set_trace()\n raise\n\n @gen.coroutine\n def query_who_has(self, *deps):\n with log_errors():\n response = yield self.scheduler.who_has(keys=deps)\n self.update_who_has(response)\n raise gen.Return(response)\n\n def update_who_has(self, who_has):\n try:\n for dep, workers in who_has.items():\n if dep in self.who_has:\n self.who_has[dep].update(workers)\n else:\n self.who_has[dep] = set(workers)\n\n for worker in workers:\n self.has_what[worker].add(dep)\n except Exception as e:\n logger.exception(e)\n if LOG_PDB:\n import pdb; pdb.set_trace()\n raise\n\n def cancel_key(self, key):\n try:\n self.log.append(('cancel', key))\n if key in self.waiting_for_data:\n missing = [dep for dep in self.dependencies[key]\n if dep not in self.data\n and not self.who_has.get(dep)]\n self.log.append(('report-missing-data', key, missing))\n self.batched_stream.send({'op': 'missing-data',\n 'key': key,\n 'keys': missing})\n self.forget_key(key)\n except Exception as e:\n logger.exception(e)\n raise\n\n def forget_key(self, key):\n try:\n self.log.append(('forget', key))\n if key in self.tasks:\n del self.tasks[key]\n del self.raw_tasks[key]\n del self.task_state[key]\n if key in self.waiting_for_data:\n del self.waiting_for_data[key]\n\n for dep in self.dependencies.pop(key, ()):\n self.dependents[dep].remove(key)\n if not self.dependents[dep]:\n del self.dependents[dep]\n\n if key in self.who_has:\n for worker in self.who_has.pop(key):\n self.has_what[worker].remove(key)\n if not self.has_what[worker]:\n del self.has_what[worker]\n\n if key not in self.dependents:\n if key in self.nbytes:\n del self.nbytes[key]\n if key in self.types:\n del self.types[key]\n if key in self.priorities:\n del self.priorities[key]\n if key in self.durations:\n del self.durations[key]\n if key in self.response:\n del self.response[key]\n\n if key in self.host_restrictions:\n del self.host_restrictions[key]\n if key in self.worker_restrictions:\n del self.worker_restrictions[key]\n if key in self.resource_restrictions:\n del self.resource_restrictions[key]\n except Exception as e:\n logger.exception(e)\n if LOG_PDB:\n import pdb; pdb.set_trace()\n raise\n\n def rescind_key(self, key):\n try:\n if self.task_state.get(key) not in PENDING:\n return\n del self.task_state[key]\n del self.tasks[key]\n del self.raw_tasks[key]\n if key in self.waiting_for_data:\n del self.waiting_for_data[key]\n\n for dep in self.dependencies.pop(key, ()):\n self.dependents[dep].remove(key)\n if not self.dependents[dep]:\n del self.dependents[dep]\n\n if key not in self.dependents:\n # if key in self.nbytes:\n # del self.nbytes[key]\n if key in self.priorities:\n del self.priorities[key]\n if key in self.durations:\n del self.durations[key]\n except Exception as e:\n logger.exception(e)\n if LOG_PDB:\n import pdb; pdb.set_trace()\n raise\n\n ################\n # Execute Task #\n ################\n\n def meets_resource_constraints(self, key):\n if key not in self.resource_restrictions:\n return True\n for resource, needed in self.resource_restrictions[key].items():\n if self.available_resources[resource] < needed:\n return False\n\n return True\n\n def ensure_computing(self):\n try:\n while self.constrained and len(self.executing) < self.ncores:\n key = self.constrained[0]\n if self.task_state.get(key) != 'constrained':\n self.constrained.popleft()\n continue\n if self.meets_resource_constraints(key):\n self.constrained.popleft()\n self.transition(key, 'executing')\n else:\n break\n while self.ready and len(self.executing) < self.ncores:\n _, key = heapq.heappop(self.ready)\n if key not in self.task_state:\n continue\n if self.task_state[key] in ('memory', 'error', 'executing'):\n continue\n self.transition(key, 'executing')\n except Exception as e:\n logger.exception(e)\n if LOG_PDB:\n import pdb; pdb.set_trace()\n raise\n\n @gen.coroutine\n def execute(self, key, report=False):\n try:\n if self.validate:\n assert key in self.executing\n assert key not in self.waiting_for_data\n assert self.task_state[key] == 'executing'\n\n function, args, kwargs = self.tasks[key]\n\n start = time()\n args2 = pack_data(args, self.data, key_types=str)\n kwargs2 = pack_data(kwargs, self.data, key_types=str)\n stop = time()\n if stop - start > 0.005:\n self.response[key]['disk_load_start'] = start\n self.response[key]['disk_load_stop'] = stop\n if self.digests is not None:\n self.digests['disk-load-duration'].add(stop - start)\n\n logger.debug(\"Execute key: %s\", key) # TODO: comment out?\n result = yield self.executor_submit(key, apply_function, function,\n args2, kwargs2,\n self.execution_state, key)\n\n result['key'] = key\n value = result.pop('result', None)\n self.response[key].update(result)\n\n if result['op'] == 'task-finished':\n self.put_key_in_memory(key, value)\n self.transition(key, 'memory')\n if self.digests is not None:\n self.digests['task-duration'].add(result['compute_stop'] -\n result['compute_start'])\n else:\n logger.warn(\" Compute Failed\\n\"\n \"Function: %s\\n\"\n \"args: %s\\n\"\n \"kwargs: %s\\n\",\n str(funcname(function))[:1000],\n convert_args_to_str(args2, max_len=1000),\n convert_kwargs_to_str(kwargs2, max_len=1000), exc_info=True)\n self.transition(key, 'error')\n\n logger.debug(\"Send compute response to scheduler: %s, %s\", key,\n self.response[key])\n\n if self.validate:\n assert key not in self.executing\n assert key not in self.waiting_for_data\n\n self.ensure_computing()\n self.ensure_communicating()\n except RuntimeError as e:\n logger.error(\"Thread Pool Executor error: %s\", e)\n except Exception as e:\n logger.exception(e)\n if LOG_PDB:\n import pdb; pdb.set_trace()\n raise\n\n ##################\n # Administrative #\n ##################\n\n def validate_key_memory(self, key):\n assert key in self.data\n assert key in self.nbytes\n assert key not in self.waiting_for_data\n assert key not in self.executing\n assert key not in self.ready\n\n def validate_key_executing(self, key):\n assert key in self.executing\n assert key not in self.data\n assert key not in self.waiting_for_data\n assert all(dep in self.data for dep in self.dependencies[key])\n\n def validate_key_ready(self, key):\n assert key in pluck(1, self.ready)\n assert key not in self.data\n assert key not in self.executing\n assert key not in self.waiting_for_data\n assert all(dep in self.data for dep in self.dependencies[key])\n\n def validate_key_waiting(self, key):\n assert key not in self.data\n assert not all(dep in self.data for dep in self.dependencies[key])\n\n def validate_key(self, key):\n try:\n state = self.task_state[key]\n if state == 'memory':\n self.validate_key_memory(key)\n elif state == 'waiting':\n self.validate_key_waiting(key)\n elif state == 'ready':\n self.validate_key_ready(key)\n elif state == 'executing':\n self.validate_key_executing(key)\n except Exception as e:\n logger.exception(e)\n import pdb; pdb.set_trace()\n raise\n\n def validate_state(self):\n try:\n for key, workers in self.who_has.items():\n for w in workers:\n assert key in self.has_what[w]\n\n for worker, keys in self.has_what.items():\n for k in keys:\n assert worker in self.who_has[k]\n\n for key, state in self.task_state.items():\n if state == 'memory':\n assert key in self.data\n assert isinstance(self.nbytes[key], int)\n if state == 'error':\n assert key not in self.data\n if state == 'waiting':\n assert key in self.waiting_for_data\n s = self.waiting_for_data[key]\n for dep in self.dependencies[key]:\n assert (dep in s or\n dep in self.in_flight or\n dep in self.executing or\n dep in self.data)\n if state == 'ready':\n assert key in pluck(1, self.ready)\n if state == 'executing':\n assert key in self.executing\n if state == 'long-running':\n assert key not in self.executing\n assert key in self.long_running\n\n for key in self.tasks:\n if self.task_state[key] == 'memory':\n assert isinstance(self.nbytes[key], int)\n assert key not in self.waiting_for_data\n assert key in self.data\n\n except Exception as e:\n logger.exception(e)\n if LOG_PDB:\n import pdb; pdb.set_trace()\n raise\n\n def stateof(self, key):\n return {'executing': key in self.executing,\n 'waiting_for_data': key in self.waiting_for_data,\n 'heap': key in pluck(1, self.ready),\n 'data': key in self.data}\n\n def story(self, *keys):\n return [msg for msg in self.log\n if any(key in msg for key in keys)\n or any(key in c\n for key in keys\n for c in msg\n if isinstance(c, (tuple, list, set)))]\n\n\ndef is_valid_worker(worker_restrictions=None, host_restrictions=None,\n resource_restrictions=None, resources=None, worker=None):\n \"\"\"\n Can this worker run on this machine given known scheduling restrictions?\n\n Examples\n --------\n >>> is_valid_worker(worker_restrictions={'alice:8000', 'bob:8000'},\n ... worker='alice:8000')\n True\n\n >>> is_valid_worker(host_restrictions={'alice', 'bob'},\n ... worker='alice:8000')\n True\n\n >>> is_valid_worker(resource_restrictions={'GPU': 1, 'MEM': 8e9},\n ... resources={'GPU': 2, 'MEM':4e9})\n False\n\n >>> is_valid_worker(host_restrictions={'alice', 'bob'},\n ... resource_restrictions={'GPU': 1, 'MEM': 8e9},\n ... resources={'GPU': 2, 'MEM': 10e9},\n ... worker='charlie:8000')\n False\n \"\"\"\n if worker_restrictions is not None:\n if worker not in worker_restrictions:\n return False\n\n if host_restrictions is not None:\n host = worker.split(':')[0]\n if host not in host_restrictions:\n return False\n\n if resource_restrictions is not None:\n for resource, quantity in resource_restrictions.items():\n if resources.get(resource, 0) < quantity:\n return False\n\n return True\n","sub_path":"distributed/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":61708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"173583987","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 16 14:22:40 2018\n\n@author: Jonathan Klopfenstein\n\"\"\"\n\nimport numpy as np\n#from scipy.stats import invgamma\nimport time\nfrom sklearn import preprocessing\n\n\n# Timer\nstart_time = time.clock()\n\n# Reproducibility\n#np.random.seed(123)\n\n###############################################################################\n\n# Distribution functions\n\n# =============================================================================\n# def rinvchisq(df, scale):\n# a = df * 0.5\n# b = df * scale * 0.5\n# return invgamma.rvs(a, scale=b)\n# =============================================================================\n \n#def rinvchisq(df, scale):\n# return 1.0 / np.random.gamma(df/2.0, df * scale / 2.0)\n\ndef rinvchisq(df, scale):\n sample = (df * scale)/np.random.chisquare(df)\n return sample\n\n\n\n# rnorm is defined using the variance (i.e sigma^2)\ndef rnorm(mean, var):\n sd = np.sqrt(var)\n return np.random.normal(mean, sd)\n\ndef rbeta(a,b):\n return np.random.beta(a,b)\n\ndef rbernouilli(p):\n return np.random.binomial(1, p)\n\n###############################################################################\n\n# Util functions\n \ndef squared_norm(vector):\n return np.sum(np.square(vector))\n\n###############################################################################\n\n\n# Sampling functions\n \ndef sample_mu(N, sigma2_e, Y, X, beta):\n mean = np.sum(Y - np.matmul(X,beta))/N\n var = sigma2_e/N\n return rnorm(mean, var)\n\ndef sample_sigma2_e(N, epsilon, v0E, s0E):\n df = v0E + N\n scale = (squared_norm(epsilon) + v0E*s0E)/ df\n \n #return 1.0/np.random.gamma(v0E+0.5,1.0/(s0E+0.5*squared_norm(epsilon)/N))\n return rinvchisq(df, scale)\n\ndef sample_sigma2_b(beta, NZ, v0B, s0B):\n df = v0B + NZ\n scale = (squared_norm(beta) * NZ + v0B*s0B) / df # * NZ or not ????\n return rinvchisq(df, scale)\n\ndef sample_w(M, NZ):\n return rbeta(1 + NZ, 1 + M - NZ)\n\n###############################################################################\n\n# Data simulation\n \ndef build_toy_dataset(N, M, var_g):\n \n sigma_b = np.sqrt(var_g/M)\n sigma_e = np.sqrt((1 - var_g))\n \n beta_true = np.random.normal(0, sigma_b , M)\n x = sigma_b * np.random.randn(N, M)\n x=preprocessing.scale(x)\n y = np.dot(x, beta_true) + np.random.normal(0, sigma_e, N)\n return x, y, beta_true\n\n# Parameters of simulated data\n \nN = 5000\nM = 10\nvar_g=0.5\n\n# var(b) = var(g) / M\n# var(e) = 1 - var(g)\n# var(y) = var(g) + var(e)\n\nx, y, beta_true = build_toy_dataset(N,M,var_g)\n\n\n\n#x=preprocessing.scale(x)\n\n#y=preprocessing.scale(y)\n#x= preprocessing.scale(x)\n#y=preprocessing.scale(y)\n# beta_true = np.linspace(-4.,10.,10)\n\n#beta_true = np.ones(M) * 0.25\n#x = np.random.randn(N,M)\n#y = np.matmul(x, beta_true) + (np.random.randn(N) * 0.375)\n\n###############################################################################\n\n# Parameters setup\n\nEmu = np.zeros(1)\nvEmu = np.ones(N)\nEbeta = np.zeros(M)\nny = np.zeros(M)\nEw = np.zeros(1)\nepsilon = y\nNZ = np.zeros(1)\nsigma2_e = squared_norm(y) / (N*0.5)\nsigma2_b = rbeta(1,1)\n\nv0E, v0B = 0.001,0.001\n#s0B = sigma2_b / 2\n#s0E = sigma2_e / 2\ns0B=0.01\ns0E=0.01\n###############################################################################\n\n# Precomputations\n\nsm = np.zeros(M)\nfor i in range(M):\n sm[i] = squared_norm(x[:,i])\n\n###############################################################################\n\n\n# Gibbs sampling iterations\n\n\nnum_iter = 5000\n\nsigma_e_log = []\nsigma_b_log = []\nbeta_log = []\n\nfor i in range(num_iter):\n \n time_in = time.clock()\n Emu = sample_mu(N, sigma2_e, y, x, Ebeta)\n Emu=0\n index = np.random.permutation(M)\n epsilon=epsilon- vEmu*Emu\n print(\"Gibbs sampling iteration:\", i)\n print(\"Current marker:\")\n \n for marker in index:\n print(marker, end=\" \", flush=True)\n epsilon = epsilon + x[:,marker] * Ebeta[marker]\n Cj = sm[marker] + sigma2_e/sigma2_b\n rj = np.dot(x[:,marker], epsilon)\n ratio = np.sqrt(sigma2_b * Cj / sigma2_e) * np.exp(-(np.square(rj)/(2*Cj*sigma2_e)))\n pij = Ew/(Ew + ratio*(1-Ew))\n ny[marker] = rbernouilli(pij)\n ny[marker]=1\n if (ny[marker] == 0):\n \n Ebeta[marker] = 0\n \n elif (ny[marker] == 1):\n \n Ebeta[marker] = rnorm(rj/Cj, sigma2_e/Cj)\n \n epsilon = epsilon - x[:,marker] * Ebeta[marker]\n Ebeta=beta_true \n NZ = np.sum(ny)\n Ew = sample_w(M, NZ)\n #epsilon = y - np.matmul(x,Ebeta) - vEmu*Emu\n sigma2_b = sample_sigma2_b(Ebeta, NZ, v0B, s0B)\n sigma_b_log.append(sigma2_b)\n # sigma2_b=0.5/10\n sigma2_e = sample_sigma2_e(N, y - np.matmul(x,Ebeta), v0E, s0E)\n sigma_e_log.append(sigma2_e)\n # sigma2_e= 1-0.5\n time_out = time.clock()\n elapsed_time = time_out - time_in\n print(\"\")\n print(\"Emu: {}, Ew: {}, NZ: {}, sigma2_e: {}, sigma2_b: {}\".format(\n round(Emu,5),round(Ew,5),NZ, round(sigma2_e,5), round(sigma2_b,5)))\n print(\"\")\n print(\"Time for the {}th generation: {}\".format(i, elapsed_time))\n print(\"\")\n \n if(i > 2000):\n beta_log.append(Ebeta.reshape(M))\n \n \n \n \n \n \nprint(\"Ebeta\" + \"\\t\" + \" \", ' ny' + '\\t'+ ' beta_true')\nfor i in range(M):\n print(round(Ebeta[i],5), \"\\t\" + \"\", ny[i], \"\\t\", beta_true[i])\n\ntotal_time = time.clock()-start_time\nprint(\"Total time: \" + str(total_time) + \"s\")\n\n\nprint(squared_norm(y)/N)\n\nprint(\"mean sigma e:\" + str(np.mean(sigma_e_log[2500:5000])))\nprint(\"mean sigma b:\" + str(np.mean(sigma_b_log[2500:5000])))\n\nprint(\"mean estimated betas:\")\nprint(np.mean(beta_log, axis = 0).reshape(M,1))\n\n\n\n\n\n","sub_path":"python/NumpyBayes.py","file_name":"NumpyBayes.py","file_ext":"py","file_size_in_byte":5707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"127967082","text":"# @SI_Copyright@\n# www.stacki.com\n# v1.0\n# \n# Copyright (c) 2006 - 2015 StackIQ Inc. All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n# \n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# \n# 2. Redistributions in binary form must reproduce the above copyright\n# notice unmodified and in its entirety, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided \n# with the distribution.\n# \n# 3. All advertising and press materials, printed or electronic, mentioning\n# features or use of this software must display the following acknowledgement: \n# \n# \t \"This product includes software developed by StackIQ\" \n# \n# 4. Except as permitted for the purposes of acknowledgment in paragraph 3,\n# neither the name or logo of this software nor the names of its\n# authors may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY STACKIQ AND CONTRIBUTORS ``AS IS''\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS\n# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR\n# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE\n# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN\n# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# @SI_Copyright@\n#\n# @Copyright@\n# \t\t\t\tRocks(r)\n# \t\t www.rocksclusters.org\n# \t\t version 5.4 (Maverick)\n# \n# Copyright (c) 2000 - 2010 The Regents of the University of California.\n# All rights reserved.\t\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n# \n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# \n# 2. Redistributions in binary form must reproduce the above copyright\n# notice unmodified and in its entirety, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided \n# with the distribution.\n# \n# 3. All advertising and press materials, printed or electronic, mentioning\n# features or use of this software must display the following acknowledgement: \n# \n# \t\"This product includes software developed by the Rocks(r)\n# \tCluster Group at the San Diego Supercomputer Center at the\n# \tUniversity of California, San Diego and its contributors.\"\n# \n# 4. Except as permitted for the purposes of acknowledgment in paragraph 3,\n# neither the name or logo of this software nor the names of its\n# authors may be used to endorse or promote products derived from this\n# software without specific prior written permission. The name of the\n# software includes the following terms, and any derivatives thereof:\n# \"Rocks\", \"Rocks Clusters\", and \"Avalanche Installer\". For licensing of \n# the associated name, interested parties should contact Technology \n# Transfer & Intellectual Property Services, University of California, \n# San Diego, 9500 Gilman Drive, Mail Code 0910, La Jolla, CA 92093-0910, \n# Ph: (858) 534-5815, FAX: (858) 534-7345, E-MAIL:invent@ucsd.edu\n# \n# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS''\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS\n# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR\n# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE\n# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN\n# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# @Copyright@\n\n\nimport threading\nimport os\nimport sys\nimport time\nimport socket\nimport subprocess\nimport shlex\nimport stack.commands\nfrom stack.exception import *\n\n\nclass Parallel(threading.Thread):\n\tdef __init__(self, cmdclass, cmd, host, collate):\n\t\tthreading.Thread.__init__(self)\n\t\tself.cmd = cmd\n\t\tself.host = host\n\t\tself.collate = collate\n\t\tself.cmdclass = cmdclass\n\t\tself.rc = False\n\n\tdef run(self):\n\t\t(self.rc, out, err) = self.cmdclass.runHostCommand(self.host, self.cmd)\n\n\t\tfor line in out:\n\t\t\tif self.collate:\n\t\t\t\tself.cmdclass.addOutput(self.host, line.strip())\n\t\t\telse:\n\t\t\t\tsys.stdout.write(\"%s\\n\" % line.strip())\n\n\t\tfor line in err:\n\t\t\tif self.collate:\n\t\t\t\tself.cmdclass.addOutput(self.host, line.strip())\n\t\t\telse:\n\t\t\t\tsys.stderr.write(\"%s\\n\" % line.strip())\n\n\n\t\nclass command(stack.commands.HostArgumentProcessor,\n\tstack.commands.run.command):\n\n\tMustBeRoot = 0\n\n\t\nclass Command(command):\n\t\"\"\"\n\tRun a command for each specified host.\n\n\t\n\tZero, one or more host names. If no host names are supplied, the command\n\tis run on all 'managed' hosts. By default, all compute nodes are\n\t'managed' nodes. To determine if a host is managed, execute:\n\t'rocks list host attr hostname | grep managed'. If you see output like:\n\t'compute-0-0: managed true', then the host is managed.\n\t\n\n\t\n\tThe command to run on the list of hosts.\n\t\n\n\t\n\tRun the command only on 'managed' hosts, that is, hosts that generally\n\thave an ssh login. Default is 'yes'.\n\t\n\n\t\n\tIf 'yes', enable X11 forwarding when connecting to hosts.\n\tDefault is 'no'.\n\t\n\n\t\n\tSets the maximum length of time (in seconds) that the command is\n\tallowed to run.\n\tDefault is '30'.\n\t\n\n\t\n\tSets the time (in seconds) to delay between each executed command\n\ton multiple hosts. For example, if the command is run on two\n\thosts and if the delay is 10, then the command will be executed on host\n\t1, then 10 seconds later, the command will be executed on host 2.\n\tDefault is '0' (no delay).\n\t\n\n\t\n\tPrepend the hostname to every output line if this parameter is set to\n\t'yes'. Default is 'yes'.\n\t\n\n\t\n\tThe number of threads to start in parallel. If num-threads is 0, then\n\ttry to run the command in parallel on all hosts. Default is '128'.\n\t\n\n\t\n\tRun the command 'hostname' on backend-0-0.\n\t\n\n\t\n\tRun the command 'ls /tmp/' on all backend nodes.\n\t\n\t\"\"\"\n\n\n\tdef runHostCommand(self, host, command):\n\t\t\"\"\"\n\t\tRuns the COMMAND on the remote HOST. If the HOST is the\n\t\tcurrent machine just run the COMMAND in a subprocess.\n\t\t\"\"\"\n\t\t\n\t\tonline = True\n\n\t\tif host != socket.gethostname().split('.')[0]:\n\t\t\t# First check to make the machine is up and SSH is responding.\n\t\t\t#\n\t\t\t# This catches the case when the node is up, sshd is sitting \n\t\t\t# on port 22, but it is not responding (e.g., the node is \n\t\t\t# overloaded, sshd is hung, etc.)\n\t\t\t#\n\t\t\t# sock.recv() should return something like:\n\t\t\t#\n\t\t\t#\tSSH-2.0-OpenSSH_4.3\n\n\t\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\t\tsock.settimeout(2.0)\n\t\t\ttry:\n\t\t\t\tsock.connect((host, 22))\n\t\t\t\tbuf = sock.recv(64)\n\t\t\texcept:\n\t\t\t\tonline = False\n\n\t\tif online:\n\t\t\tproc = subprocess.Popen([ 'ssh', host, command ],\n\t\t\t\t\t\tstdin = None,\n\t\t\t\t\t\tstdout = subprocess.PIPE,\n\t\t\t\t\t\tstderr = subprocess.PIPE)\n\n\t\t\tretval = proc.wait()\n\t\t\to, e = proc.communicate()\n\t\t\treturn (retval, o.split('\\n')[:-1], e.split('\\n')[:-1])\n\n\t\treturn (None, [ 'down' ], [])\n\n\n\n\n\tdef run(self, params, args):\n\n\n\t\t(command, managed, x11, t, d, c, n) = self.fillParams([\n ('command', None, True),\n ('managed', 'y'),\n ('x11', 'n'),\n ('timeout', '30'),\n\t\t\t('delay', '0'),\n\t\t\t('collate', 'y'),\n\t\t\t('num-threads', '128')\n\t\t\t])\n\n\t\ttry:\n\t\t\ttimeout = int(t)\n\t\texcept:\n raise ParamType(self, 'timeout', 'integer')\n\n\t\tif timeout < 0:\n raise ParamValue(self, 'timeout', '> 0')\n\n\t\ttry:\n\t\t\tnumthreads = int(n)\n\t\texcept:\n raise ParamType(self, 'num-threads', 'integer')\n\n\t\ttry:\n\t\t\tdelay = float(d)\n\t\texcept:\t\n raise ParamType(self, 'delay', 'float')\n\n\t\thosts = self.getHostnames(args, self.str2bool(managed))\n\t\t\n\t\t# This is the same as doing -x using ssh. Might be useful\n\t\t# for the common case, but required for the Viz Roll.\n\n\t\tif not self.str2bool(x11):\n\t\t\ttry:\n\t\t\t\tdel os.environ['DISPLAY']\n\t\t\texcept KeyError:\n\t\t\t\tpass\n\n\t\t# By default collate is true, unless otherwise specified\n\t\tcollate = self.str2bool(c)\n\t\t\t\n\t\tif collate:\n\t\t\tself.beginOutput()\n\n\t\tif numthreads <= 0:\n\t\t\tnumthreads = len(hosts)\n\n\t\tthreads = []\n\n\t\ti = 0\n\t\trc = True\n\t\twork = len(hosts)\n\t\twhile work:\n\t\t\t\n\t\t\t# Run the first batch of threads\n\n\t\t\twhile i < numthreads and i < len(hosts):\n\t\t\t\thost = hosts[i]\n\t\t\t\ti += 1\t\n\n\t\t\t\tp = Parallel(self, command, host, collate)\n\t\t\t\tp.setDaemon(True)\n\t\t\t\tp.start()\n\t\t\t\tthreads.append(p)\n\n\t\t\t\tif delay > 0:\n\t\t\t\t\ttime.sleep(delay)\n\n\t\t\t# Collect completed threads\n\n\t\t\ttry:\n\t\t\t\ttotaltime = time.time()\n\t\t\t\twhile timeout == 0 or (time.time() - totaltime) < timeout:\n\n\t\t\t\t\tactive = threading.enumerate()\n\n\t\t\t\t\tt = threads\n\t\t\t\t\tfor thread in t:\n\t\t\t\t\t\tif thread not in active:\n\t\t\t\t\t\t\tthread.join(0.1)\n\t\t\t\t\t\t\tthreads.remove(thread)\n\t\t\t\t\t\t\tnumthreads += 1\n\t\t\t\t\t\t\twork -= 1\n\n\t\t\t\t\tif len(active) == 1:\n\t\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\t\t# don't burn a CPU while waiting for the\n\t\t\t\t\t# threads to complete\n\n\t\t\t\t\ttime.sleep(0.5)\n\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\twork = 0\n\n\t\tif collate:\n\t\t\tself.endOutput(padChar='', trimOwner=False)\n\n\t\tfor thread in threads:\n\t\t\tif not thread.rc:\n\t\t\t\trc = False\n\t\t\t\tbreak\n\t\treturn rc\n","sub_path":"src/stack/command/stack/commands/run/host/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"198636861","text":"# -*- coding: utf-8 -*-\n'''\n :codeauthor: Jeff Frost\n :copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.\n :license: Apache 2.0, see LICENSE for more details.\n\n\n salt.grains.external_ip\n ~~~~~~~~~~~~~~~~~~~~~~~\n\n Return the external IP address reported by one of the following providers:\n\n * ipecho.net\n * externalip.net\n * ident.me\n\n Which ever reports a valid IP first\n'''\n\n# Import Python Libs\nimport contextlib\nimport socket\n\n# Import salt libs\nfrom salt.utils.validate.net import ipv4_addr as _ipv4_addr\n\n# Import 3rd party libs\nfrom salt.ext.six.moves.urllib.request import urlopen as _urlopen\nfrom salt.ext.six.moves.urllib.error import HTTPError, URLError\n\ndef external_ip():\n '''\n Return the external IP address\n '''\n check_ips = ('http://ipecho.net/plain',\n 'http://api.externalip.net/ip',\n 'http://v4.ident.me')\n\n for url in check_ips:\n try:\n with contextlib.closing(_urlopen(url, timeout=3)) as req:\n ip_ = req.read().strip()\n if not _ipv4_addr(ip_):\n continue\n return {'external_ip': ip_}\n except (HTTPError, URLError, socket.timeout):\n continue\n\n # Return an empty value as a last resort\n return {'external_ip': []}\n","sub_path":"_grains/external_ip.py","file_name":"external_ip.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"9513119","text":"# zippyshare-downloader\n# __init__.py\n\n\"\"\"\nzippyshare-downloader\n\nDownload file from zippyshare directly from python\n\"\"\"\n\nfrom zippyshare_downloader.utils import getStartandEndvalue\nfrom bs4 import BeautifulSoup\nfrom download import download as dl\nimport requests\nimport math\n\nclass Zippyshare:\n ALLOWED_NAMES = {\n k: v for k, v in math.__dict__.items() if not k.startswith(\"__\")\n }\n\n def __init__(self, verbose=True, progress_bar=True, replace=True):\n self._verbose = verbose\n self._progress_bar = progress_bar\n self._replace = replace\n \n def _get_url(self, u, r: requests.Request):\n startpos_init = r.text.find('document.getElementById(\\'dlbutton\\').href')\n scrapped_init = r.text[startpos_init:]\n endpos_init = scrapped_init.find('')\n scrapped = scrapped_init[:endpos_init]\n element_value = scrapped[:scrapped.find(';')].replace('document.getElementById(\\'dlbutton\\').href = ', '')\n url_download_init = getStartandEndvalue(element_value, '\"')\n random_number = getStartandEndvalue(element_value, '(', ')')\n url = {}\n # Now using self.evaluate() to safely do math calculations\n url_number = str(self.evaluate(random_number))\n continuation_download_url_init = getStartandEndvalue(element_value, '(')\n continuation_download_url = continuation_download_url_init[continuation_download_url_init.find('\"')+1:]\n return u[:u.find('.')] + '.zippyshare.com' + url_download_init + url_number + continuation_download_url\n\n def _get_info(self, u, r: requests.Request):\n parser = BeautifulSoup(r.text, 'html.parser')\n list_infos = []\n for _i in parser.find_all('font'):\n i = str(_i)\n if i.startswith(''):\n list_infos.append(i)\n elif i.startswith(''):\n list_infos.append(i)\n elif i.startswith(''):\n list_infos.append(i)\n return {\n 'name_file': getStartandEndvalue(list_infos[0], '>', '<'),\n 'size': getStartandEndvalue(list_infos[1], '>', '<'),\n 'date_upload': getStartandEndvalue(list_infos[2], '>', '<'),\n 'download_url': self._get_url(u, r)\n }\n\n def _request_get(self, url):\n return requests.get(url)\n\n def _extract_info(self, url, download=True):\n r = self._request_get(url)\n info = self._get_info(url, r)\n if download:\n dl(info['download_url'], info['name_file'], progressbar=self._progress_bar, verbose=self._verbose, replace=self._replace)\n return info\n else:\n return info\n \n def _download(self, urls):\n for url in urls:\n r = self._request_get(url)\n info = self._get_info(url, r)\n dl(info['download_url'], info['name_file'], progressbar=self._progress_bar, verbose=self._verbose, replace=self._replace)\n\n def download(self, *urls):\n self._download(urls)\n\n def extract_info(self, url: str, download=True):\n return self._extract_info(url, download=download)\n\n # Credit for the evaluate() method: Leodanis Pozo Ramos https://realpython.com/python-eval-function/\n def evaluate(self, expression):\n \"\"\"Evaluate a math expression.\"\"\"\n\n # Compile the expression\n code = compile(expression, \"\", \"eval\")\n\n # Validate allowed names\n for name in code.co_names:\n if name not in ALLOWED_NAMES:\n raise NameError(f\"The use of '{name}' is not allowed. Expression used: %s\" % (expression))\n\n return eval(code, {\"__builtins__\": {}}, self.ALLOWED_NAMES)\n\n","sub_path":"zippyshare_downloader/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"626028375","text":"from unittest import TestCase\nfrom uuid import uuid4\n\nfrom walkoff.worker.action_exec_strategy import LocalActionExecutionStrategy, make_execution_strategy, \\\n RemoteActionExecutionStrategy\n\n\nclass MockRestrictedWorkflowContext:\n id = str(uuid4())\n execution_id = str(uuid4())\n name = 'test'\n\n\nclass TestActionExecutionStrategyFactory(TestCase):\n\n def test_local_strategy_creation(self):\n class MockConfig:\n ACTION_EXECUTION_STRATEGY = 'local'\n\n self.assertIsInstance(\n make_execution_strategy(MockConfig, MockRestrictedWorkflowContext),\n LocalActionExecutionStrategy\n )\n\n def test_remote_strategy_creation(self):\n class MockConfig:\n ACTION_EXECUTION_STRATEGY = 'remote'\n\n self.assertIsInstance(\n make_execution_strategy(MockConfig, MockRestrictedWorkflowContext),\n RemoteActionExecutionStrategy\n )\n\n def test_unknown_strategy_creation(self):\n class MockConfig:\n ACTION_EXECUTION_STRATEGY = 'invalid'\n\n with self.assertRaises(ValueError):\n make_execution_strategy(MockConfig, MockRestrictedWorkflowContext)\n","sub_path":"tests/test_action_exec_strategy_factory.py","file_name":"test_action_exec_strategy_factory.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"285092884","text":"user_input = input(\"Please enter total file required:\")\n\ncontent = \"\"\"Email: Rahul.Shinde@gmail.com\nFirstname : Harish\nLastname : rathod\nEmail: Harish.rathod@gmail.com\n\"\"\"\nfor i in range(int(user_input)):\n with open(\"file\"+str(i)+\".txt\", 'w+')as f:\n f.write(content)","sub_path":"PythonPractice/Decorators/createfiles.py","file_name":"createfiles.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"7944491","text":"# GaussianGenerativeModel.py\n\nimport numpy as np\nfrom scipy.stats import multivariate_normal as mvn \n\n\n# Implement a Gaussian Generative Model from scratch\n\n\nclass GaussianGenerativeModel:\n def __init__(self, is_shared_covariance=False, mu = None, cov = None, pi = None, k=3):\n self.is_shared_covariance = is_shared_covariance\n self.mu = mu\n self.cov = cov\n self.pi = pi \n \n # mle for dataset pi_k\n def __find_prior_pi(self, y):\n y = list(map(np.argmax, y))\n unique, counts = np.unique(y, return_counts=True)\n t = counts / len(y)\n self.pi = t\n return t\n \n # mle for data set mu_k\n def __find_mu(self, X,y):\n y = list(map(np.argmax, y))\n # finds class indices\n cl_0 = [i for i, num in enumerate(y) if num == 0]\n cl_1 = [i for i, num in enumerate(y) if num == 1]\n cl_2 = [i for i, num in enumerate(y) if num == 2]\n \n X_0 = []\n X_1 = []\n X_2 = []\n\n for i in range(X.shape[1]):\n X_0.append(np.mean(X[cl_0, i])) \n X_1.append(np.mean(X[cl_1, i]))\n X_2.append(np.mean(X[cl_2, i]))\n \n mu = [X_0, X_1, X_2]\n self.mu = mu\n \n def __find_sigma(self, X, ys, mu):\n y = list(map(np.argmax, ys))\n cl_0 = [i for i, num in enumerate(y) if num == 0]\n cl_1 = [i for i, num in enumerate(y) if num == 1]\n cl_2 = [i for i, num in enumerate(y) if num == 2]\n \n cov0 = np.cov(X[cl_0].T, bias = True)\n cov1 = np.cov(X[cl_1].T, bias = True)\n cov2 = np.cov(X[cl_2].T, bias = True)\n \n if self.is_shared_covariance:\n cv = np.cov(X.T, bias = True)\n cov = [cv, cv, cv]\n else:\n cov = [cov0, cov1, cov2]\n \n self.cov = cov\n \n return cov\n \n \n def __onehot(self, y):\n n_values = np.max(y) + 1\n y_enc = np.eye(n_values)[y]\n return y_enc\n \n\n # fit function finds mle for gaussian parameters\n def fit(self, X, y):\n y = self.__onehot(y)\n self.__find_mu(X, y)\n self.__find_sigma(X, y, self.mu)\n self.__find_prior_pi(y)\n \n\n # predict function\n def predict(self, X_pred):\n preds = []\n cl1 = []\n cl2 = []\n cl3 = []\n \n for i in range(X_pred.shape[0]):\n cl1.append(mvn.pdf(X_pred[i], self.mu[0], self.cov[0]))\n cl2.append(mvn.pdf(X_pred[i], self.mu[1], self.cov[1]))\n cl3.append(mvn.pdf(X_pred[i], self.mu[2], self.cov[2]))\n \n y_mat = np.vstack((cl1,cl2,cl3)).T\n y_p = np.array(list(map(np.argmax, y_mat)))\n \n return y_p\n\n # implementation of negative log likelihood\n def negative_log_likelihood(self, X, y):\n y = self.__onehot(y)\n X = np.array(X)\n likel = []\n i = 0\n for elt in list(map(np.argmax, y)):\n if np.argmax(elt) == 0:\n likel.append(np.log(self.pi[0]*mvn.pdf(X[i], self.mu[0], self.cov[0])))\n elif np.argmax(elt) == 1:\n likel.append(np.log(self.pi[1]*mvn.pdf(X[i], self.mu[1], self.cov[1])))\n else:\n likel.append(np.log(self.pi[2]*mvn.pdf(X[i], self.mu[2], self.cov[2])))\n i += 1\n \n return -np.sum(likel)\n \n \n","sub_path":"GaussianGenerativeModel.py","file_name":"GaussianGenerativeModel.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"245946797","text":"from torch.autograd import Variable\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass SerializableModule(nn.Module):\n def __init__(self):\n super().__init__()\n\n def save(self,filename):\n torch.save(self.state_dict(),filename)\n\n def load(self,filename):\n self.load_state_dict(torch.load(filename, map_location=lambda storage, loc: storage))\n\nclass CNN_KWS_Model(SerializableModule):\n def __init__(self,config):\n super(CNN_KWS_Model, self).__init__()\n n_labels = config[\"n_labels\"]\n n_featmaps1 = config[\"n_feature_maps1\"]\n\n conv1_size = config[\"conv1_size\"] # (time, frequency)\n conv1_pool = config[\"conv1_pool\"]\n conv1_stride = tuple(config[\"conv1_stride\"])\n dropout_prob = config[\"dropout_prob\"]\n width = config[\"width\"]\n height = config[\"height\"]\n self.conv1 = nn.Conv2d(1, n_featmaps1, conv1_size, stride=conv1_stride)\n self.pool1 = nn.MaxPool2d(conv1_pool)\n\n x = Variable(torch.zeros(1, 1, height, width), requires_grad=False)\n x = self.pool1(self.conv1(x))\n conv_net_size = x.view(1, -1).size(1)\n last_size = conv_net_size\n self.lin = nn.Linear(conv_net_size,32)\n\n if \"conv2_size\" in config:\n conv2_size = config[\"conv2_size\"]\n conv2_pool = config[\"conv2_pool\"]\n conv2_stride = tuple(config[\"conv2_stride\"])\n n_featmaps2 = config[\"n_feature_maps2\"]\n self.conv2 = nn.Conv2d(n_featmaps1, n_featmaps2, conv2_size, stride=conv2_stride)\n self.pool2 = nn.MaxPool2d(conv2_pool)\n x = self.pool2(self.conv2(x))\n conv_net_size = x.view(1, -1).size(1)\n last_size = conv_net_size\n self.lin = nn.Linear(conv_net_size, 32)\n\n if \"dnn1_size\" in config:\n dnn1_size = config[\"dnn1_size\"]\n last_size = dnn1_size\n self.dnn1 = nn.Linear(32, dnn1_size)\n if \"dnn2_size\" in config:\n dnn2_size = config[\"dnn2_size\"]\n last_size = dnn2_size\n self.dnn2 = nn.Linear(dnn1_size, dnn2_size)\n self.output = nn.Linear(last_size, n_labels)\n self.dropout = nn.Dropout(dropout_prob)\n def forward(self, x):\n x = F.relu(self.conv1(x.unsqueeze(1))) # shape: (batch, channels, i1, o1)\n x = self.dropout(x)\n x = self.pool1(x)\n if hasattr(self, \"conv2\"):\n x = F.relu(self.conv2(x)) # shape: (batch, o1, i2, o2)\n x = self.dropout(x)\n x = self.pool2(x)\n x = x.view(x.size(0), -1) # shape: (batch, o3)\n if hasattr(self, \"lin\"):\n x = self.lin(x)\n if hasattr(self, \"dnn1\"):\n x = self.dnn1(x)\n x = F.relu(x)\n x = self.dropout(x)\n if hasattr(self, \"dnn2\"):\n x = self.dnn2(x)\n x = self.dropout(x)\n return self.output(x)\n\n","sub_path":"CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"178106555","text":"import os, sys, shutil, argparse, inspect\nfrom datetime import datetime\nfrom copy import deepcopy\n\nimport scipy, numpy as np, pandas as pd\nfrom scipy.stats.mstats import gmean\nfrom numpy import mean as amean\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv( os.path.join('.','..','data','01_data_mars_opposition.csv') )\n\ndef get_global_alpha_beta(df):\n res_alpha, res_beta = [],[]\n for i in range(df.shape[0]):\n alpha = np.deg2rad( df.loc[i,'ZodiacIndex']*30 + df.loc[i,'Degree'] + \n df.loc[i,'Minute']/60.0 + df.loc[i,'Second']/3600.0 )\n beta = np.deg2rad( df.loc[i,'ZodiacIndexAverageSun']*30 + df.loc[i,'DegreeMean'] + \n df.loc[i,'MinuteMean']/60.0 + df.loc[i,'SecondMean']/3600.0 )\n res_alpha.append( alpha )\n res_beta.append( beta )\n return np.array(res_alpha), np.array(res_beta)\n\ndef indx_rad_phi(point,ix):\n x, y = point\n p1, p2, b = np.sin(beta[ix]-y), np.sin(alpha[ix]-y), np.cos(beta[ix]-alpha[ix])\n n1, n2, n3, d = 2*x*p1*p2*b , np.square(p1) , np.square(x*p2) , (1-np.square(b))\n rad = np.sqrt( sum([n1,100*n2,100*n3]) / d )\n phi = np.arcsin((x*p2)/rad)+alpha[ix]\n return rad, phi\n\ndef obj_func(point,args):\n ls_radius = np.array( [indx_rad_phi(point,i)[0] for i in range(df.shape[0])] )\n return np.log(amean(ls_radius)) - np.log(gmean(ls_radius))\n\n\ns1, s2 = 1.15,0.25\nalpha, beta = get_global_alpha_beta(df)\nbounds = [(0,np.inf),(-np.pi,np.pi)]\n\nopt_res = scipy.optimize.minimize(obj_func,(s1,s2),args=[alpha,beta],bounds=bounds,method='L-BFGS-B')\nprint( 'Minimum Value of Function Obtained is {}'.format(opt_res['fun']) )\nprint( 'Points providing mimum value\\nx = {}\\ny = {}'.format(opt_res['x'][0], opt_res['x'][1]) )\n\n# Plotting Points on Orbit\nplt_X, plt_Y = [], []\nfor i in range(df.shape[0]):\n rad, phi = indx_rad_phi((opt_res['x'][0], opt_res['x'][1]),i)\n plt_X.append( rad * np.cos(phi) )\n plt_Y.append( rad * np.sin(phi) )\ntitle = 'Orbital Positions'\nplt.scatter(plt_X,plt_Y,c='r')\nplt.title(title)\nplt.savefig('{}.PNG'.format(title),dpi=400,format='PNG')","sub_path":"mod2-MARS/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"86311044","text":"\"\"\"\nIt sets up the file/folder structure by downloading the necessary files.\n\"\"\"\nimport sys\nimport subprocess\nimport tarfile\nfrom pathlib import Path\n\nimport conf\nfrom utils import curl, md5_matches\nfrom log import get_logger\n\nlogger = get_logger(\"setup\")\n\n\n#\n# These variables specify methods names (that download files) which should only\n# be executed in specific modes. For example, \"testing\" for unit testing, or\n# \"demo\" for data needed only for the demo.\n#\n\nMODES_ACTIONS = {\n \"testing\": {\n \"download_phenomexcan_rapid_gwas_pheno_info\",\n \"download_phenomexcan_rapid_gwas_data_dict_file\",\n \"download_uk_biobank_coding_3\",\n \"download_uk_biobank_coding_6\",\n \"download_phenomexcan_gtex_gwas_pheno_info\",\n \"download_gene_map_name_to_id\",\n \"download_gene_map_id_to_name\",\n \"download_biomart_genes_hg38\",\n \"download_multiplier_model_z_pkl\",\n \"download_multiplier_model_metadata_pkl\",\n \"download_predixcan_mashr_prediction_models\",\n \"download_gene_correlations_phenomexcan_rapid_gwas\",\n \"download_phenomexcan_smultixcan_mashr_zscores\",\n \"download_snps_covariance_gtex_mashr\",\n },\n \"demo\": {\n \"download_phenomexcan_rapid_gwas_pheno_info\",\n \"download_phenomexcan_gtex_gwas_pheno_info\",\n \"download_phenomexcan_rapid_gwas_data_dict_file\",\n \"download_uk_biobank_coding_3\",\n \"download_uk_biobank_coding_6\",\n \"download_biomart_genes_hg38\",\n \"download_gene_map_id_to_name\",\n \"download_gene_map_name_to_id\",\n \"download_multiplier_model_z_pkl\",\n \"download_multiplier_model_b_pkl\",\n \"download_multiplier_model_summary_pkl\",\n \"download_gene_correlations_phenomexcan_rapid_gwas\",\n },\n \"asthma-copd\": {\n \"download_plink2\",\n \"download_1000g_genotype_data\",\n \"download_liftover_hg19tohg38_chain\",\n \"download_eur_ld_regions\",\n \"download_setup_summary_gwas_imputation\",\n \"download_setup_metaxcan\",\n \"download_predixcan_mashr_prediction_models\",\n \"download_mashr_expression_smultixcan_snp_covariance\",\n \"download_gene_map_id_to_name\",\n \"download_gene_map_name_to_id\",\n \"download_biomart_genes_hg38\",\n \"download_multiplier_model_z_pkl\",\n \"download_snps_covariance_gtex_mashr\",\n },\n \"full\": {}, # empty means all actions/methods\n}\n\n\ndef download_phenomexcan_unified_pheno_info(**kwargs):\n output_file = conf.PHENOMEXCAN[\"UNIFIED_PHENO_INFO_FILE\"]\n curl(\n \"https://upenn.box.com/shared/static/dnce4hhp37mubhxbn7d0u8wp9u280c9n.gz\",\n output_file,\n \"2fdce9042244e13cc2952ec0cb3fd6d6\",\n logger=logger,\n )\n\n\ndef download_phenomexcan_rapid_gwas_pheno_info(**kwargs):\n output_file = conf.PHENOMEXCAN[\"RAPID_GWAS_PHENO_INFO_FILE\"]\n curl(\n \"https://upenn.box.com/shared/static/163mkzgd4uosk4pnzx0xsj7n0reu8yjv.gz\",\n output_file,\n \"cba910ee6f93eaed9d318edcd3f1ce18\",\n logger=logger,\n )\n\n\ndef download_phenomexcan_rapid_gwas_data_dict_file(**kwargs):\n output_file = conf.PHENOMEXCAN[\"RAPID_GWAS_DATA_DICT_FILE\"]\n curl(\n \"https://upenn.box.com/shared/static/u3po287ku1cj0jubbnsi7c4xawsaked5.tsv\",\n output_file,\n \"c4b5938a7fdb0b1525f984cfb815bda5\",\n logger=logger,\n )\n\n\ndef download_phenomexcan_gtex_gwas_pheno_info(**kwargs):\n output_file = conf.PHENOMEXCAN[\"GTEX_GWAS_PHENO_INFO_FILE\"]\n curl(\n \"https://upenn.box.com/shared/static/gur0ug0qg7hs88ybrsgrwx7eeymmxay1.tsv\",\n output_file,\n \"982434335f07acb1abfb83e57532f2c0\",\n logger=logger,\n )\n\n\ndef download_gene_map_name_to_id(**kwargs):\n output_file = conf.PHENOMEXCAN[\"GENE_MAP_NAME_TO_ID\"]\n curl(\n \"https://upenn.box.com/shared/static/t33a6iv4jtwc2pv2c1nllpnq0nlrfxkt.pkl\",\n output_file,\n \"582d93c30c18027eefd465516733170f\",\n logger=logger,\n )\n\n\ndef download_gene_map_id_to_name(**kwargs):\n output_file = conf.PHENOMEXCAN[\"GENE_MAP_ID_TO_NAME\"]\n curl(\n \"https://upenn.box.com/shared/static/p20w0ikxhvo04xf1b2zai53cpoqb4ljz.pkl\",\n output_file,\n \"63ac3ad54930d1b1490c6d02a68feb61\",\n logger=logger,\n )\n\n\ndef download_biomart_genes_hg38(**kwargs):\n output_file = conf.GENERAL[\"BIOMART_GENES_INFO_FILE\"]\n curl(\n \"https://upenn.box.com/shared/static/ks998wwlwble7rcb5cdthwjg1l0j1alb.gz\",\n output_file,\n \"c4d74e156e968267278587d3ce30e5eb\",\n logger=logger,\n )\n\n\ndef download_uk_biobank_coding_3(**kwargs):\n output_file = conf.UK_BIOBANK[\"CODING_3_FILE\"]\n curl(\n \"https://upenn.box.com/shared/static/1f5yjg31qxemvf5hqkoz559cau14xr68.tsv\",\n output_file,\n \"c02c65888793d4190fc190182128cc02\",\n logger=logger,\n )\n\n\ndef download_uk_biobank_coding_6(**kwargs):\n output_file = conf.UK_BIOBANK[\"CODING_6_FILE\"]\n curl(\n \"https://upenn.box.com/shared/static/libgz7998c2lsytjon8we1ouhabvh1z1.tsv\",\n output_file,\n \"23a2bca99ea0bf25d141fc8573f67fce\",\n logger=logger,\n )\n\n\ndef download_phenomexcan_smultixcan_mashr_zscores(**kwargs):\n output_file = conf.PHENOMEXCAN[\"SMULTIXCAN_MASHR_ZSCORES_FILE\"]\n curl(\n \"https://upenn.box.com/shared/static/taj1ex9ircek0ymi909of9anmjnj90k4.pkl\",\n output_file,\n \"83ded01d34c906092d64c1f5cc382fb0\",\n logger=logger,\n )\n\n\ndef download_smultixcan_mashr_raw_results(**kwargs):\n output_folder = conf.PHENOMEXCAN[\"GENE_ASSOC_DIR\"] / \"smultixcan\"\n if output_folder.exists():\n logger.warning(f\"Output directory already exists ({output_folder}). Skipping.\")\n return\n\n output_folder.parent.mkdir(exist_ok=True, parents=True)\n\n output_tar_file = output_folder.parent / \"phenomexcan-smultixcan.tar\"\n output_tar_file_md5 = \"da6beb02e927c0b586610a9138370a6b\"\n\n if not Path(output_tar_file).exists() or not md5_matches(\n output_tar_file_md5, output_tar_file\n ):\n # download\n curl(\n \"https://upenn.box.com/shared/static/7wa17vd7c2vax7g13g993s2gl2uviela.tar\",\n output_tar_file,\n output_tar_file_md5,\n logger=logger,\n )\n\n # uncompress file\n logger.info(f\"Extracting {output_tar_file}\")\n with tarfile.open(output_tar_file, \"r\") as f:\n f.extractall(output_folder.parent)\n\n # NO RENAME SHOULD BE NEEDED HERE\n # (output_folder.parent / \"eqtl\" / \"mashr\").rename(output_folder)\n # (output_folder.parent / \"eqtl\").rmdir()\n\n\ndef download_spredixcan_mashr_raw_results_partial(**kwargs):\n output_folder = conf.PHENOMEXCAN[\"GENE_ASSOC_DIR\"] / \"spredixcan\"\n if output_folder.exists():\n logger.warning(f\"Output directory already exists ({output_folder}). Skipping.\")\n return\n\n output_folder.parent.mkdir(exist_ok=True, parents=True)\n\n output_tar_file = output_folder.parent / \"phenomexcan-spredixcan-partial.tar\"\n output_tar_file_md5 = \"cf5aa2704fdfb6727b97dd87023da7a3\"\n\n if not Path(output_tar_file).exists() or not md5_matches(\n output_tar_file_md5, output_tar_file\n ):\n # download\n curl(\n \"https://upenn.box.com/shared/static/9dti6295bdoday4iv7kuri7v2f4w231x.tar\",\n output_tar_file,\n output_tar_file_md5,\n logger=logger,\n )\n\n # uncompress file\n logger.info(f\"Extracting {output_tar_file}\")\n with tarfile.open(output_tar_file, \"r\") as f:\n f.extractall(output_folder.parent)\n\n # NO RENAME SHOULD BE NEEDED HERE\n # (output_folder.parent / \"eqtl\" / \"mashr\").rename(output_folder)\n # (output_folder.parent / \"eqtl\").rmdir()\n\n\ndef download_gwas_parsing_raw_results_partial(**kwargs):\n output_folder = conf.PHENOMEXCAN[\"BASE_DIR\"] / \"gwas_parsing\"\n if output_folder.exists():\n logger.warning(f\"Output directory already exists ({output_folder}). Skipping.\")\n return\n\n output_folder.parent.mkdir(exist_ok=True, parents=True)\n\n output_tar_file = output_folder.parent / \"phenomexcan-gwas_parsing-partial.tar\"\n output_tar_file_md5 = \"b00ebbf8ac0330df2f04d1eb486bcd4a\"\n\n if not Path(output_tar_file).exists() or not md5_matches(\n output_tar_file_md5, output_tar_file\n ):\n # download\n curl(\n \"https://upenn.box.com/shared/static/fkj1yuzw6ayoovy7s89z7y5clal72awy.tar\",\n output_tar_file,\n output_tar_file_md5,\n logger=logger,\n )\n\n # uncompress file\n logger.info(f\"Extracting {output_tar_file}\")\n with tarfile.open(output_tar_file, \"r\") as f:\n f.extractall(output_folder.parent)\n\n # NO RENAME SHOULD BE NEEDED HERE\n # (output_folder.parent / \"eqtl\" / \"mashr\").rename(output_folder)\n # (output_folder.parent / \"eqtl\").rmdir()\n\n\ndef download_phenomexcan_smultixcan_mashr_pvalues(**kwargs):\n output_file = conf.PHENOMEXCAN[\"SMULTIXCAN_MASHR_PVALUES_FILE\"]\n curl(\n \"https://upenn.box.com/shared/static/wvrbt0v2ddrtb25g7dgw1be09yt9l14l.pkl\",\n output_file,\n \"3436a41e9a70fc2a206e9b13153ebd12\",\n logger=logger,\n )\n\n\ndef download_phenomexcan_fastenloc_rcp(**kwargs):\n output_file = conf.PHENOMEXCAN[\"FASTENLOC_TORUS_RCP_FILE\"]\n curl(\n \"https://upenn.box.com/shared/static/qgghpf4nyuj45su5a184e8geg4egjd20.pkl\",\n output_file,\n \"a1b12c552c0b41db3f3b0131910aa974\",\n logger=logger,\n )\n\n\ndef download_multiplier_model_summary_pkl(**kwargs):\n output_file = conf.MULTIPLIER[\"MODEL_SUMMARY_FILE\"]\n curl(\n \"https://upenn.box.com/shared/static/xfaez2u5wr258qb58lpexllyrvc7jolr.pkl\",\n output_file,\n \"1fdcd5dbee984b617dddb44937910710\",\n logger=logger,\n )\n\n\ndef download_multiplier_model_z_pkl(**kwargs):\n output_file = conf.MULTIPLIER[\"MODEL_Z_MATRIX_FILE\"]\n curl(\n \"https://upenn.box.com/shared/static/pz07jiy99f8yx0fx2grle8i5cstpn7fz.pkl\",\n output_file,\n \"c3c84d70250ab34d06625eedc3d5ff29\",\n logger=logger,\n )\n\n\ndef download_multiplier_model_b_pkl(**kwargs):\n output_file = conf.MULTIPLIER[\"MODEL_B_MATRIX_FILE\"]\n curl(\n \"https://upenn.box.com/shared/static/26n3l20t3755fjaihx9os783tk5hh2sa.pkl\",\n output_file,\n \"ef67e80b282781ec08beeb39f1bce07f\",\n logger=logger,\n )\n\n\ndef download_multiplier_model_metadata_pkl(**kwargs):\n output_file = conf.MULTIPLIER[\"MODEL_METADATA_FILE\"]\n curl(\n \"https://upenn.box.com/shared/static/efeulwvivjtucunvrx2nwq06pyzs3pkq.pkl\",\n output_file,\n \"21cfd84270d04ad30ac2bca7049c7dab\",\n logger=logger,\n )\n\n\ndef download_ukb_to_efo_map_tsv(**kwargs):\n # The original file was downloaded from:\n # https://github.com/EBISPOT/EFO-UKB-mappings/blob/master/UK_Biobank_master_file.tsv\n # on Nov 19, 2020\n output_file = conf.UK_BIOBANK[\"UKBCODE_TO_EFO_MAP_FILE\"]\n curl(\n \"https://upenn.box.com/shared/static/hwlpdlp3pq9buv955q5grlkxwwfxt6ul.tsv\",\n output_file,\n \"bfa56310d40e28f89c1f1b5d4ade0bf0\",\n logger=logger,\n )\n\n\ndef download_efo_ontology(**kwargs):\n # The original file was download from:\n # http://www.ebi.ac.uk/efo/efo.obo\n # on Nov 16, 2020\n output_file = conf.GENERAL[\"EFO_ONTOLOGY_OBO_FILE\"]\n curl(\n \"https://upenn.box.com/shared/static/nsrxx3szg4s69j84dg2oakx6mwjxoarb.obo\",\n output_file,\n \"2bf23581ff6365514a0b3b1b5ae4651a\",\n logger=logger,\n )\n\n\ndef download_emerge_phenotypes_description(**kwargs):\n output_file = conf.EMERGE[\"DESC_FILE_WITH_SAMPLE_SIZE\"]\n curl(\n \"https://upenn.box.com/shared/static/jvjaclxyckv4qd9gqh89qdc53147uctg.txt\",\n output_file,\n \"e8ed06025fc393e3216c1af9d6e16615\",\n )\n\n\ndef download_multiplier_banchereau_mcp_neutrophils(**kwargs):\n output_file = conf.MULTIPLIER[\"BANCHEREAU_MCPCOUNTER_NEUTROPHIL_FILE\"]\n curl(\n \"https://raw.githubusercontent.com/greenelab/multi-plier/master/results/40/Banchereau_MCPcounter_neutrophil_LV.tsv\",\n output_file,\n \"2ed8d71d9fdcf857a44b7fd1a42035f0\",\n )\n\n\ndef download_crispr_lipids_gene_sets_file(**kwargs):\n output_file = conf.CRISPR[\"LIPIDS_GENE_SETS_FILE\"]\n curl(\n \"https://upenn.box.com/shared/static/amiu6epztbuqjoad7eq9e50fpfdzdrvt.csv\",\n output_file,\n \"987eeef1987421b596988eba92e6305f\",\n logger=logger,\n )\n\n\ndef download_pharmacotherapydb_indications(**kwargs):\n output_file = conf.PHARMACOTHERAPYDB[\"INDICATIONS_FILE\"]\n curl(\n \"https://ndownloader.figshare.com/files/4823950\",\n output_file,\n \"33585132777601dedd3bed35caf718e2\",\n logger=logger,\n )\n\n\ndef download_lincs_consensus_signatures(**kwargs):\n output_file = conf.LINCS[\"CONSENSUS_SIGNATURES_FILE\"]\n curl(\n \"https://ndownloader.figshare.com/files/4797607\",\n output_file,\n \"891e257037adc15212405af461ffbfd6\",\n logger=logger,\n )\n\n\ndef _get_gene_correlations(\n cohort_name, file_url, file_md5, ref_panel=\"gtex_v8\", eqtl_panel=\"mashr\"\n):\n \"\"\"\n Downloads the gene correlations given a cohort, file url and file md5.\n Correlation files are downloaded to the default location.\n \"\"\"\n\n output_folder = (\n conf.RESULTS[\"GLS\"]\n / \"gene_corrs\"\n / \"cohorts\"\n / cohort_name\n / ref_panel.lower()\n / eqtl_panel.lower()\n / \"gene_corrs-symbols-within_distance_5mb.per_lv\"\n )\n if output_folder.exists():\n logger.warning(f\"Output directory already exists ({output_folder}). Skipping.\")\n return\n\n output_folder.parent.mkdir(parents=True, exist_ok=True)\n\n output_tar_file = Path(\n conf.RESULTS[\"GLS\"] / \"gene_corrs\" / \"cohorts\" / f\"{cohort_name}-gene_corrs.tar\"\n ).resolve()\n output_tar_file_md5 = file_md5\n\n if not Path(output_tar_file).exists() or not md5_matches(\n output_tar_file_md5, output_tar_file\n ):\n # download\n curl(\n file_url,\n output_tar_file,\n output_tar_file_md5,\n logger=logger,\n )\n\n # uncompress file\n logger.info(f\"Extracting {output_tar_file}\")\n with tarfile.open(output_tar_file, \"r\") as f:\n f.extractall(output_folder.parent)\n\n\ndef download_gene_correlations_phenomexcan_rapid_gwas(**kwargs):\n _get_gene_correlations(\n cohort_name=\"phenomexcan_rapid_gwas\",\n file_url=\"https://upenn.box.com/shared/static/5nj7j13yqi7wiqrspmat5f2fo6xaa9pp.tar\",\n file_md5=\"fb96f18421f7e0f79e74f568b5ae6c08\",\n )\n\n\ndef download_gene_correlations_phenomexcan_astle(**kwargs):\n _get_gene_correlations(\n cohort_name=\"phenomexcan_astle\",\n file_url=\"https://upenn.box.com/shared/static/82iprzu05bessy2o64ckfii06l0djyhl.tar\",\n file_md5=\"33abc9e199c6bc9ea95c56259b7d1ca3\",\n )\n\n\ndef download_gene_correlations_phenomexcan_other(**kwargs):\n _get_gene_correlations(\n cohort_name=\"phenomexcan_other\",\n file_url=\"https://upenn.box.com/shared/static/1notars78xxhbkeklj7xh7jrej49o9sg.tar\",\n file_md5=\"cad3ec7b1ae35510f9f653fea030b220\",\n )\n\n\ndef download_gene_correlations_emerge(**kwargs):\n _get_gene_correlations(\n cohort_name=\"emerge\",\n file_url=\"https://upenn.box.com/shared/static/bswgr2sn6g1y55ppt9j4e3rmohpvumn3.tar\",\n file_md5=\"3791b8a338485d0b0490773f6f3df912\",\n )\n\n\ndef download_gene_correlations_1000g_eur(**kwargs):\n _get_gene_correlations(\n cohort_name=\"1000g_eur\",\n file_url=\"https://upenn.box.com/shared/static/s3avu92x6wmumi6r7r4g7iglviixpxt5.tar\",\n file_md5=\"ad8b9dfb4bfa550d4ac4b847265d64f0\",\n )\n\n\ndef download_snps_covariance_gtex_mashr(eqtl_panel=\"mashr\", **kwargs):\n output_file = (\n conf.RESULTS[\"GLS\"]\n / \"gene_corrs\"\n / \"reference_panels\"\n / \"gtex_v8\"\n / eqtl_panel.lower()\n / \"snps_chr_blocks_cov.h5\"\n )\n\n curl(\n \"https://upenn.box.com/shared/static/oqddbztt3ymhqezxtjmtrfcasus7r26s.h5\",\n output_file,\n \"0d7895b07665d5d3afab1ba26d445901\",\n logger=logger,\n )\n\n\ndef download_predixcan_mashr_prediction_models(**kwargs):\n output_folder = conf.PHENOMEXCAN[\"PREDICTION_MODELS\"][\"MASHR\"]\n if output_folder.exists():\n logger.warning(f\"Output directory already exists ({output_folder}). Skipping.\")\n return\n\n output_folder.parent.mkdir(exist_ok=True, parents=True)\n\n output_tar_file = Path(\n conf.PHENOMEXCAN[\"PREDICTION_MODELS\"][\"BASE_DIR\"], \"mashr_eqtl.tar\"\n ).resolve()\n output_tar_file_md5 = \"87f3470bf2676043c748b684fb35fa7d\"\n\n if not Path(output_tar_file).exists() or not md5_matches(\n output_tar_file_md5, output_tar_file\n ):\n # download\n curl(\n \"https://zenodo.org/record/3518299/files/mashr_eqtl.tar?download=1\",\n output_tar_file,\n output_tar_file_md5,\n logger=logger,\n )\n\n # uncompress file\n logger.info(f\"Extracting {output_tar_file}\")\n with tarfile.open(output_tar_file, \"r\") as f:\n f.extractall(output_folder.parent)\n\n # rename folder\n (output_folder.parent / \"eqtl\" / \"mashr\").rename(output_folder)\n (output_folder.parent / \"eqtl\").rmdir()\n\n\ndef download_mashr_expression_smultixcan_snp_covariance(**kwargs):\n output_file = conf.PHENOMEXCAN[\"PREDICTION_MODELS\"][\"MASHR_SMULTIXCAN_COV_FILE\"]\n curl(\n \"https://zenodo.org/record/3518299/files/gtex_v8_expression_mashr_snp_smultixcan_covariance.txt.gz?download=1\",\n output_file,\n \"dda0eedeb842cfc272e76ad432753d73\",\n logger=logger,\n )\n\n\ndef download_spredixcan_hdf5_results(**kwargs):\n output_folder = conf.PHENOMEXCAN[\"SPREDIXCAN_MASHR_ZSCORES_FOLDER\"] / \"hdf5\"\n if output_folder.exists():\n logger.warning(f\"Output directory already exists ({output_folder}). Skipping.\")\n return\n\n output_folder.parent.mkdir(exist_ok=True, parents=True)\n\n output_tar_file = Path(\n conf.PHENOMEXCAN[\"GENE_ASSOC_DIR\"], \"spredixcan-mashr-zscores.tar\"\n ).resolve()\n output_tar_file_md5 = \"502cc184948c80c16ecea130a3523ebd\"\n\n if not Path(output_tar_file).exists() or not md5_matches(\n output_tar_file_md5, output_tar_file\n ):\n # download the two parts\n part0_filename = output_tar_file.parent / (output_tar_file.name + \".part0\")\n curl(\n \"https://upenn.box.com/shared/static/hayzpeowlvgjy6ctmp1gpochagq2ob62.tar\",\n part0_filename,\n \"333ae4a9b9fc215a1aa1c0628e03d65e\",\n logger=logger,\n )\n\n part1_filename = output_tar_file.parent / (output_tar_file.name + \".part1\")\n curl(\n \"https://upenn.box.com/shared/static/pl7hsqq7hqqf18kf4x0185xn1x45ov4i.tar\",\n part1_filename,\n \"3d41bc3e4d511081849a102f018af1a8\",\n logger=logger,\n )\n\n # combine\n logger.info(\"Concatenating parts\")\n with open(output_tar_file, \"wb\") as output_f, open(\n part0_filename, \"rb\"\n ) as part0_f, open(part1_filename, \"rb\") as part1_f:\n output_f.write(part0_f.read())\n output_f.write(part1_f.read())\n\n assert md5_matches(output_tar_file_md5, output_tar_file), \"Concatenation failed\"\n\n part0_filename.unlink()\n part1_filename.unlink()\n\n # uncompress file\n logger.info(f\"Extracting {output_tar_file}\")\n with tarfile.open(output_tar_file, \"r\") as f:\n tar_members = f.getmembers()\n members_dict = {t.name: t for t in tar_members}\n\n assert (\n output_folder.name in members_dict\n ), \"Output folder name not inside tar file\"\n\n f.extractall(output_folder.parent)\n\n\ndef download_1000g_genotype_data(**kwargs):\n output_folder = conf.PHENOMEXCAN[\"LD_BLOCKS\"][\"1000G_GENOTYPE_DIR\"]\n if output_folder.exists():\n logger.warning(f\"Output directory already exists ({output_folder}). Skipping.\")\n return\n\n output_folder.parent.mkdir(exist_ok=True, parents=True)\n\n output_tar_file = Path(\n conf.PHENOMEXCAN[\"LD_BLOCKS\"][\"BASE_DIR\"], \"sample_data.tar\"\n ).resolve()\n output_tar_file_md5 = \"8b42c388953d016e1112051d3b6140ed\"\n\n if not Path(output_tar_file).exists() or not md5_matches(\n output_tar_file_md5, output_tar_file\n ):\n # download\n curl(\n \"https://zenodo.org/record/3657902/files/sample_data.tar?download=1\",\n output_tar_file,\n output_tar_file_md5,\n logger=logger,\n )\n\n # uncompress file\n logger.info(f\"Extracting {output_tar_file}\")\n with tarfile.open(output_tar_file, \"r\") as f:\n selected_folder = [\n tarinfo\n for tarinfo in f.getmembers()\n if tarinfo.name.startswith(\"data/reference_panel_1000G/\")\n ]\n\n f.extractall(output_folder.parent, members=selected_folder)\n\n # rename folder\n (output_folder.parent / \"data\" / \"reference_panel_1000G\").rename(output_folder)\n (output_folder.parent / \"data\").rmdir()\n\n\ndef _get_file_from_zip(\n zip_file_url: str,\n zip_file_path: str,\n zip_file_md5: str,\n zip_internal_filename,\n output_file: Path,\n output_file_md5: str = None,\n):\n \"\"\"\n This method downloads a zip file and extracts a particular file inside\n it.\n\n Args:\n zip_file_url: URL pointing to a zip file.\n zip_file_path: path where the zip file will be downloaded to.\n zip_file_md5: MD5 hash of the zip file. It will be used to check if the file was already downloaded.\n zip_internal_filename: this is the internal file path that should be extracted. If it ends with \"/\",\n then it is treated as a folder and all members of it will be extracted.\n output_file: this is a path where the zip_internal_filename will be saved to.\n output_file_md5: MD5 hash of the internal zip file (the one being extracted). Ignored if a folder is extracted.\n \"\"\"\n from utils import md5_matches\n\n # output_file = conf.MULTIPLIER[\"RECOUNT2_MODEL_FILE\"]\n _internal_file = str(zip_internal_filename)\n\n # do not download file again if it exists and MD5 matches the expected one\n if (\n not _internal_file.endswith(\"/\")\n and output_file.exists()\n and (output_file_md5 is not None or md5_matches(output_file_md5, output_file))\n ):\n logger.info(f\"File already downloaded: {output_file}\")\n return\n\n # download zip file\n parent_dir = output_file.parent\n\n curl(\n zip_file_url,\n zip_file_path,\n zip_file_md5,\n logger=logger,\n )\n\n # extract model from zip file\n logger.info(f\"Extracting {_internal_file}\")\n import zipfile\n\n with zipfile.ZipFile(zip_file_path, \"r\") as z:\n\n if _internal_file.endswith(\"/\"):\n # it's a folder\n # in this case, output_file points to the output folder\n output_folder = output_file\n if output_folder.exists():\n logger.warning(\n f\"Output folder exists, skipping: '{str(output_folder)}'\"\n )\n return\n\n for i in z.namelist():\n if i.startswith(_internal_file):\n z.extract(i, parent_dir)\n else:\n # it's a file\n z.extract(_internal_file, path=parent_dir)\n\n # TODO: check output_file_md5 ?\n\n # rename file\n Path(parent_dir, zip_internal_filename).rename(output_file)\n if Path(zip_internal_filename).parent != Path(\".\"):\n Path(parent_dir, zip_internal_filename.parent).rmdir()\n\n # TODO: add optional parameter to delete the downloaded zip file?\n # delete zip file\n # zip_file_path.unlink()\n\n\ndef download_multiplier_recount2_model(**kwargs):\n \"\"\"\n This method downloads the MultiPLIER model on recount2.\n \"\"\"\n _get_file_from_zip(\n zip_file_url=\"https://ndownloader.figshare.com/files/10881866\",\n zip_file_path=Path(\n conf.MULTIPLIER[\"RECOUNT2_MODEL_FILE\"].parent, \"recount2_PLIER_data.zip\"\n ).resolve(),\n zip_file_md5=\"f084992c5d91817820a2782c9441b9f6\",\n zip_internal_filename=Path(\"recount2_PLIER_data\", \"recount_PLIER_model.RDS\"),\n output_file=conf.MULTIPLIER[\"RECOUNT2_MODEL_FILE\"],\n output_file_md5=\"fc7446ff989d0bd0f1aae1851d192dc6\",\n )\n\n\ndef download_1000g_genotype_data_from_plink(**kwargs):\n output_file = conf.A1000G[\"GENOTYPES_DIR\"] / \"all_phase3.pgen.zst\"\n curl(\n \"https://www.dropbox.com/s/y6ytfoybz48dc0u/all_phase3.pgen.zst?dl=1\",\n output_file,\n \"a52dcc2ad7ee09b29895df8fa044012e\",\n logger=logger,\n )\n\n output_file = conf.A1000G[\"GENOTYPES_DIR\"] / \"all_phase3.pvar.zst\"\n curl(\n \"https://www.dropbox.com/s/odlexvo8fummcvt/all_phase3.pvar.zst?dl=1\",\n output_file,\n \"aa99c03e3fd9c5fe702239d07161a153\",\n logger=logger,\n )\n\n output_file = conf.A1000G[\"GENOTYPES_DIR\"] / \"all_phase3.psam\"\n curl(\n \"https://www.dropbox.com/s/6ppo144ikdzery5/phase3_corrected.psam?dl=1\",\n output_file,\n \"b9a6d22dbf794f335ed122e465faef1d\",\n logger=logger,\n )\n\n output_file = conf.A1000G[\"GENOTYPES_DIR\"] / \"deg2_phase3.king.cutoff.out.id\"\n curl(\n \"https://www.dropbox.com/s/zj8d14vv9mp6x3c/deg2_phase3.king.cutoff.out.id?dl=1\",\n output_file,\n \"9b047ac7ffb14a5e2be2ee7a68a95f8a\",\n logger=logger,\n )\n\n\ndef _download_plink_generic(\n plink_zip_file: str,\n plink_executable_filename,\n output_file,\n platform_parameters: dict,\n):\n \"\"\"\n Generic function that downloads a specific PLINK version.\n\n Args:\n plink_zip_file: zip_file_path argument of function _get_file_from_zip\n plink_executable_filename: zip_internal_filename argument of function _get_file_from_zip\n output_file: output_file argument of function _get_file_from_zip\n platform_parameters: platform-specific (Linux, macOS, etc) parameters to download a plink version. They keys\n must be strings returned by the platform.system() function (such as \"Linux\" or \"Darwin\"). Values are dictionaries\n with strings as keys and values, and mandatory keys are \"zip_file_url\", \"zip_file_md5\" and \"output_file_md5\",\n which are all given to function _get_file_from_zip\n \"\"\"\n import platform\n\n current_system = platform.system()\n if current_system not in platform_parameters:\n raise ValueError(\"plink download for your platform was not added\")\n\n platform_parameters = platform_parameters[current_system]\n zip_file_url = platform_parameters[\"zip_file_url\"]\n zip_file_md5 = platform_parameters[\"zip_file_md5\"]\n output_file_md5 = platform_parameters[\"output_file_md5\"]\n\n # generic parameters\n zip_file_path = plink_zip_file\n zip_internal_filename = plink_executable_filename\n output_file = output_file\n\n _get_file_from_zip(\n zip_file_url=zip_file_url,\n zip_file_path=zip_file_path,\n zip_file_md5=zip_file_md5,\n zip_internal_filename=zip_internal_filename,\n output_file=output_file,\n output_file_md5=output_file_md5,\n )\n\n # make plink executable\n import os\n import stat\n\n st = os.stat(output_file)\n os.chmod(output_file, st.st_mode | stat.S_IEXEC)\n\n\ndef download_plink19(**kwargs):\n _download_plink_generic(\n plink_zip_file=Path(conf.PLINK[\"BASE_DIR\"], \"plink.zip\").resolve(),\n plink_executable_filename=Path(\"plink\"),\n output_file=conf.PLINK[\"EXECUTABLE_VERSION_1_9\"],\n platform_parameters={\n \"Linux\": {\n \"zip_file_url\": \"https://upenn.box.com/shared/static/6egljvof0gfug40rnr1q7ikdekly45ho.zip\",\n \"zip_file_md5\": \"446600c3930997a031476b5961ed372f\",\n \"output_file_md5\": \"f285ab12811ab3063952a2e20adf9860\",\n },\n \"Darwin\": {\n \"zip_file_url\": \"https://s3.amazonaws.com/plink1-assets/plink_mac_20220402.zip\",\n \"zip_file_md5\": \"f5e78f0f4f8da2b60cfa77dc60d5847f\",\n \"output_file_md5\": \"626fb1c3452de35d2365715e16c03034\",\n },\n },\n )\n\n\ndef download_plink2(**kwargs):\n _download_plink_generic(\n plink_zip_file=Path(conf.PLINK[\"BASE_DIR\"], \"plink2.zip\").resolve(),\n plink_executable_filename=Path(\"plink2\"),\n output_file=conf.PLINK[\"EXECUTABLE_VERSION_2\"],\n platform_parameters={\n \"Linux\": {\n \"zip_file_url\": \"https://upenn.box.com/shared/static/gr8b2qyg2hoo2lnlvhgcje77gc6un68h.zip\",\n \"zip_file_md5\": \"2e8e5d134a583f9f869a94fb11477208\",\n \"output_file_md5\": \"064529cc22083c44e4c6beeff33c206d\",\n },\n \"Darwin\": {\n \"zip_file_url\": \"https://s3.amazonaws.com/plink2-assets/plink2_mac_20220426.zip\",\n \"zip_file_md5\": \"51729ba53ccba1fb0de10158df289e45\",\n \"output_file_md5\": \"b62cbb4841d1bf062952f279f167fb2b\",\n },\n },\n )\n\n\ndef _create_conda_environment(\n environment_folder: Path, environment_spec: Path, channel_priority: str = \"flexible\"\n):\n \"\"\"\n It runs the commands to create a conda environment, given an environment specification (YAML file) and folder.\n\n Args:\n environment_folder: the output folder where the conda environment will be created.\n environment_spec: YAML file with conda specification.\n channel_priority: the conda channel priority for this environment.\n \"\"\"\n # make sure parent folder exists\n environment_folder.parent.mkdir(parents=True, exist_ok=True)\n\n if environment_folder.exists():\n logger.warning(\n f\"Environment directory already exists ({str(environment_folder)}). Skipping.\"\n )\n return\n\n logger.info(\n f\"Creating conda environment in '{environment_folder}' using specification in '{environment_spec}' and channel priority '{channel_priority}'\"\n )\n\n # create empty environment\n cmd = subprocess.check_call(\n [\n \"conda\",\n \"create\",\n \"-y\",\n \"-p\",\n str(environment_folder),\n ],\n stdout=sys.stdout,\n stderr=subprocess.STDOUT,\n )\n\n # set channel priority\n cmd = subprocess.check_call(\n [\n \"conda\",\n \"run\",\n \"-p\",\n str(environment_folder),\n \"--no-capture-output\",\n \"conda\",\n \"config\",\n \"--env\",\n \"--set\",\n \"channel_priority\",\n channel_priority,\n ],\n stdout=sys.stdout,\n stderr=subprocess.STDOUT,\n )\n\n # install packages\n cmd = subprocess.check_call(\n [\n \"conda\",\n \"run\",\n \"-p\",\n str(environment_folder),\n \"--no-capture-output\",\n \"conda\",\n \"env\",\n \"update\",\n \"-p\",\n str(environment_folder),\n \"--file\",\n str(environment_spec),\n ],\n stdout=sys.stdout,\n stderr=subprocess.STDOUT,\n )\n\n\ndef download_setup_summary_gwas_imputation(**kwargs):\n _get_file_from_zip(\n zip_file_url=\"https://github.com/hakyimlab/summary-gwas-imputation/archive/206dac587824a6f207e137ce8c2d7b15d81d5869.zip\",\n zip_file_path=Path(conf.SOFTWARE_DIR, \"summary-gwas-imputation.zip\").resolve(),\n zip_file_md5=\"b2e9ea5587c7cf35d42e7e16411efeb5\",\n zip_internal_filename=\"summary-gwas-imputation-206dac587824a6f207e137ce8c2d7b15d81d5869/\",\n output_file=conf.GWAS_IMPUTATION[\"BASE_DIR\"],\n )\n\n _create_conda_environment(\n environment_folder=conf.GWAS_IMPUTATION[\"CONDA_ENV\"],\n environment_spec=conf.GWAS_IMPUTATION[\"BASE_DIR\"] / \"src/conda_env.yaml\",\n )\n\n\ndef download_setup_metaxcan(**kwargs):\n _get_file_from_zip(\n zip_file_url=\"https://github.com/hakyimlab/MetaXcan/archive/cfc9e369bbf5630e0c9488993cd877f231c5d02e.zip\",\n zip_file_path=Path(conf.SOFTWARE_DIR, \"metaxcan.zip\").resolve(),\n zip_file_md5=\"ba377831c279002ea8dbb260b0f20880\",\n zip_internal_filename=\"MetaXcan-cfc9e369bbf5630e0c9488993cd877f231c5d02e/\",\n output_file=conf.METAXCAN[\"BASE_DIR\"],\n )\n\n _create_conda_environment(\n environment_folder=conf.METAXCAN[\"CONDA_ENV\"],\n environment_spec=conf.METAXCAN[\"BASE_DIR\"] / \"software/conda_env.yaml\",\n )\n\n\ndef download_liftover_hg19tohg38_chain(**kwargs):\n output_file = conf.GENERAL[\"LIFTOVER\"][\"HG19_TO_HG38\"]\n curl(\n \"http://hgdownload.cse.ucsc.edu/goldenpath/hg19/liftOver/hg19ToHg38.over.chain.gz\",\n output_file,\n \"35887f73fe5e2231656504d1f6430900\",\n logger=logger,\n )\n\n\ndef download_eur_ld_regions(**kwargs):\n output_file = conf.GENERAL[\"EUR_LD_REGIONS_FILE\"]\n curl(\n \"https://upenn.box.com/shared/static/yo25wpoct6vl8fk2s1rgxrs2wym7k1yb.gz\",\n output_file,\n \"900e4a7d3a14ae87de25ee48f7083dba\",\n logger=logger,\n )\n\n\nif __name__ == \"__main__\":\n import argparse\n from collections import defaultdict\n\n # create a list of available options. For example:\n # --mode=full: it downloads all the data.\n # --mode=testing: it downloads a smaller set of the data. This is useful for\n # Github Action workflows.\n # --mode=demo: it downloads the data needed for the demo\n # (other modes might be specified in MODES_ACTION\n AVAILABLE_ACTIONS = defaultdict(dict)\n\n # Obtain all local attributes of this module and run functions to download files\n local_items = list(locals().items())\n for key, value in local_items:\n # iterate only on download_* methods\n if not (\n callable(value)\n and value.__module__ == __name__\n and key.startswith(\"download_\")\n ):\n continue\n\n for mode, mode_actions in MODES_ACTIONS.items():\n if len(mode_actions) == 0:\n # if modes_actions is empty, it means all actions should be\n # added to that mode (e.g. \"full\" mode)\n AVAILABLE_ACTIONS[mode][key] = value\n elif key in mode_actions:\n AVAILABLE_ACTIONS[mode][key] = value\n\n parser = argparse.ArgumentParser(description=\"PhenoPLIER data setup.\")\n parser.add_argument(\n \"--mode\",\n choices=list(AVAILABLE_ACTIONS.keys()),\n default=\"full\",\n help=\"Specifies which kind of data should be downloaded. For example, \"\n \"it could be all the data (full) or a small subset (testing, which is \"\n \"used by unit tests).\",\n )\n parser.add_argument(\n \"--actions\",\n nargs=\"+\",\n help=\"Specifies a list of actions to be executed. It could be any of \"\n \"the following: \" + \" \".join(AVAILABLE_ACTIONS[\"full\"].keys()),\n )\n args = parser.parse_args()\n\n method_args = vars(args)\n\n methods_to_run = {}\n\n if args.actions is not None:\n for a in args.actions:\n if a not in AVAILABLE_ACTIONS[\"full\"]:\n logger.error(f\"The action does not exist: {a}\")\n sys.exit(1)\n\n methods_to_run[a] = AVAILABLE_ACTIONS[\"full\"][a]\n else:\n methods_to_run = AVAILABLE_ACTIONS[args.mode]\n\n for method_name, method in methods_to_run.items():\n method(**method_args)\n","sub_path":"environment/scripts/setup_data.py","file_name":"setup_data.py","file_ext":"py","file_size_in_byte":35350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"432971710","text":"# import the necessary packages\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import classification_report\nimport argparse\nimport pickle\nimport h5py\nfrom sklearn.tree import DecisionTreeClassifier\n\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\", \"--db\", required=True,\n help=\"path HDF5 database\")\nap.add_argument(\"-m\", \"--model\", required=True,\n help=\"path to output model\")\nap.add_argument(\"-j\", \"--jobs\", type=int, default=-1,\n help=\"# of jobs to run when tuning hyperparameters\")\nargs = vars(ap.parse_args())\n\ndb=h5py.File(args[\"db\"],\"r\") # r is read only mode\ni=int(db[\"labels\"].shape[0]*0.75)\n\ndata=db[\"features\"][:]\ndata_labels=db[\"labels\"][:]\ntrain_X=data[:i]\ntest_X=data[i:]\nprint(\"trainX shape:\",train_X.shape)\nprint(\"testX shape:\",test_X.shape)\ntrain_Y=data_labels[:i]\ntest_Y=data_labels[i:]\n\nprint(\"trainY shape:\",train_Y.shape)\nprint(\"testY shape:\",test_Y.shape)\n\nprint(\"testY\",test_Y[:20])\n\n\n# define the set of parameters that we want to tune then start a\n# grid search where we evaluate our model for each value of C\nprint(\"[INFO] tuning hyperparameters...\")\nparams = {\"C\": [0.1, 1.0, 10.0, 100.0, 1000.0, 10000.0]}\nmodel = GridSearchCV(LogisticRegression(), params, cv=3,n_jobs=args[\"jobs\"])\n\n\nmodel.fit(train_X,train_Y)\nprint(\"[INFO] best hyperparameters: {}\".format(model.best_params_))\n# evaluate the model\n\n'''\nprint(\"n_classes:\\n\",forest.n_classes_) # 3\nprint(\"classes:\\n\",forest.classes_) # [0,1,2]\nprint(\"n_features:\\n\",forest.n_features_) # 25088\nprint(\"N-outputs:\",forest.n_outputs_) # 1\n'''\n\nprint(\"[INFO] evaluating...\") \n\npreds=model.predict(test_X)\n#print(preds.shape)\n\nprint(\"# of labels:\",db[\"labels\"][i:].shape)\nprint(classification_report(test_Y,preds,target_names=db[\"label_names\"]))\n\n# serialize the model to disk\nprint(\"[INFO] saving the model...\")\nf=open(args[\"model\"],\"wb\")\nf.write(pickle.dumps(model.best_estimator_))\nf.close()\n\n#close the database\ndb.close()\n\n","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"82398197","text":"from ..misc import swap\nfrom ..structures.heap import Heap\n\ndef heap_sort(arr):\n heap = Heap(arr)\n Heap.build_max_heap(heap)\n \n while heap.size:\n heap.extract_max()\n\ndef merge_sort(arr):\n if len(arr) == 1:\n return arr\n\n left=merge_sort(arr[:len(arr)//2])\n right=merge_sort(arr[len(arr)//2:])\n \n return merge(left,right)\n\ndef merge(left,right):\n merged = []\n lt = rt = 0\n\n while lt < len(left) and rt < len(right):\n if left[lt] <= right[rt]:\n merged.append(left[lt])\n lt+=1\n else:\n merged.append(right[rt])\n rt+=1\n \n merged.extend(right[rt:]) if rt < len(right) else merged.extend(left[lt:])\n\n return merged\n \n\ndef insertion_sort(arr):\n for i in range(1, len(arr)):\n j=i\n while j > 0 and arr[j] < arr[j-1]:\n swap(arr, j-1, j)\n j-=1\n\n\ndef insertion_sort_bin(arr):\n for i in range(1, len(arr)):\n \n # on average 20 % faster than regular see bench.py\n e=i-1\n s=0\n \n while(s <= e):\n m = (e+s) // 2\n if arr[i] <= arr[m]:\n e = m - 1\n else:\n s = m + 1\n \n j=i\n while j > s:\n swap(arr, j-1, j)\n j-=1\n \n\n\n\n","sub_path":"tools/algorithms/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"544231462","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Aug 6 20:41:20 2021\r\n\r\n@author: DELL\r\n\"\"\"\r\n\r\ndevices = []\r\nfile = open('devices.txt','r')\r\nfor item in file:\r\n item = item.strip()\r\n devices.append(item)\r\n\r\nfile.close()\r\nprint(devices)","sub_path":"17_lecturafile_conlista_GuerrCleopatra.py","file_name":"17_lecturafile_conlista_GuerrCleopatra.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"380713874","text":"import tensorflow as tf\nimport numpy as np\nimport os\nimport shutil\n\nfrom util.onehot.OneHot import *\n\nclass KUtil:\n def __init__(mine):\n pass\n\n def calc_feature_len(feats,selected_column_idx):\n nfeats = len(feats)\n #selected_column_idx=[5,6,7,8,17,20,22,25,29,30,31,32,35,36,37,39,40,41,42,44]\n for id in selected_column_idx:\n klass = globals()[\"OneHot%02d\"%id]\n #print(\"static:\",klass, klass.N)\n nfeats += klass.N-1\n\n return nfeats\n\n def get_one_hot(targets, nb_classes):\n res = np.eye(nb_classes)[np.array(targets).reshape(-1)]\n return res.reshape(list(targets.shape)+[nb_classes])\n\n def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n def bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n\n def variable_summaries(var): \n #Attach a lot of summaries to a Tensor (for TensorBoard visualization).\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\n\n def export_model(session, export_dir_name, inputs, outputs):\n cwd = os.getcwd()\n path = os.path.join(cwd, export_dir_name)\n shutil.rmtree(path, ignore_errors=True)\n print(\"Exporting trained model to: \", path)\n tf.saved_model.simple_save(session, path, inputs, outputs)\n return\n\n builder = tf.saved_model.builder.SavedModelBuilder(export_path)\n\n classification_inputs = tf.saved_model.utils.build_tensor_info(inputs)\n classification_outputs_classes = tf.saved_model.utils.build_tensor_info(outputs)\n classification_outputs_scores = tf.saved_model.utils.build_tensor_info(scores)\n\n classification_signature = (\n tf.saved_model.signature_def_utils.build_signature_def(\n inputs={\n tf.saved_model.signature_constants.CLASSIFY_INPUTS:\n classification_inputs\n },\n outputs={\n tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES:\n classification_outputs_classes,\n tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES:\n classification_outputs_scores\n },\n method_name=tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME))\n \n tensor_info_x = tf.saved_model.utils.build_tensor_info(x)\n tensor_info_y = tf.saved_model.utils.build_tensor_info(y)\n\n prediction_signature = (\n tf.saved_model.signature_def_utils.build_signature_def(\n inputs={'features': tensor_info_x},\n outputs={'scores': tensor_info_y},\n method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))\n\n builder.add_meta_graph_and_variables(\n sess, [tf.saved_model.tag_constants.SERVING],\n signature_def_map={\n 'predict_images':\n prediction_signature,\n tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n classification_signature,\n },\n main_op=tf.tables_initializer(),\n strip_default_attrs=True)\n\n builder.save()\n print('Done exporting!')\n\n\ndef predict(path_to_model, features, labels):\n graph = tf.Graph()\n with graph.as_default():\n with tf.Session(graph=graph) as sess:\n # Restore saved values\n print('\\nRestoring...')\n tf.saved_model.loader.load(\n sess,\n [tag_constants.SERVING],\n path_to_model\n )\n print('Ok')\n # Get restored placeholders\n labels_data_ph = graph2.get_tensor_by_name('labels_data_ph:0')\n features_data_ph = graph2.get_tensor_by_name('features_data_ph:0')\n #batch_size_ph = graph2.get_tensor_by_name('batch_size_ph:0')\n # Get restored model output\n restored_logits = graph2.get_tensor_by_name('dense/BiasAdd:0')\n # Get dataset initializing operation\n dataset_init_op = graph2.get_operation_by_name('dataset_init')\n\n # Initialize restored dataset\n sess.run(\n dataset_init_op,\n feed_dict={\n features_data_ph: features\n ,labels_data_ph: labels\n #,batch_size_ph: 32\n }\n )\n # Compute inference for both batches in dataset\n sess.run(restored_logits)\n '''\n restored_values = []\n for i in range(2):\n restored_values.append(sess.run(restored_logits))\n print('Restored values: ', restored_values[i][0])\n '''\n\n # Check if original inference and restored inference are equal\n #valid = all((v == rv).all() for v, rv in zip(values, restored_values))\n #print('\\nInferences match: ', valid) \n\ndef main(argv):\n print(\"OK\")\n pass\n\nif __name__ == \"__main__\":\n\tmain(sys.argv)\n","sub_path":"KUtil.py","file_name":"KUtil.py","file_ext":"py","file_size_in_byte":5626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"340010036","text":"import websockets\nimport asyncio\nimport signal\nfrom datetime import datetime\nfrom contextlib import suppress\nimport socket\nimport json\nimport re\nimport zlib\n\nbinance_btc_usdt_book_ticker_websocket_uri = \"wss://stream.binance.com:9443/ws/btcusdt@bookTicker\"\nokex_btc_usdt_book_ticker_websocket_uri = \"wss://real.okex.com:8443/ws/v3\"\n\ndef average(bid_price, ask_price):\n return (bid_price + ask_price) / 2\n\nasync def binance_socket_handler(websocket):\n with suppress(websockets.exceptions.ConnectionClosed):\n async for message in websocket:\n message = json.loads(message, parse_float=True)\n bid_ask_average = average(*map(float, (message[\"b\"], message[\"a\"])))\n print(f\" {datetime.now().time()} binance.com {bid_ask_average}\")\n\n print(\"### binance.com connection closed ###\")\n\nasync def okex_socket_handler(websocket):\n def inflate(data):\n decompress = zlib.decompressobj(-zlib.MAX_WBITS)\n inflated = decompress.decompress(data)\n inflated += decompress.flush()\n\n return inflated\n\n request_params = {\"op\": \"subscribe\", \"args\": [\"spot/ticker:BTC-USDT\"]}\n request_str = json.dumps(request_params)\n\n with suppress(websockets.exceptions.ConnectionClosed):\n while True:\n await websocket.send(request_str)\n\n await websocket.recv() # header raw response\n\n body_raw_response = await websocket.recv()\n body_json_str_response = inflate(body_raw_response).decode(\"utf8\")\n body_json_response = json.loads(body_json_str_response, parse_float=True)\n\n data_object = body_json_response[\"data\"][0]\n\n bid_ask_average = average(*map(float, (data_object[\"best_bid\"], data_object[\"best_ask\"])))\n\n print(f\" {datetime.now().time()} okex.com {bid_ask_average}\")\n\n print(\"### okex.com connection closed ###\")\n\nclass CryptoExchangeWebSocket():\n\n def __init__(self):\n self.__connections = set()\n self.__socket_handlers = dict()\n\n async def add_connection(self, uri, socket_handler, ping_interval=20):\n '''\n Add cryptocurrency exchange connection\n '''\n\n connection = None\n exchange_name = re.search(r\"\\w+\\.\\w+(?=\\:)\", uri).group(0)\n\n try:\n connection = await websockets.client.connect(uri, ping_timeout=10, ping_interval=ping_interval)\n except websockets.exceptions.InvalidURI:\n print(\"Error: URI is invalid\")\n return connection\n except (websockets.exceptions.InvalidHandshake, socket.gaierror):\n print(\"### connection error on\", exchange_name, \"###\")\n return connection\n\n print(\"### connected to\", exchange_name, \"###\")\n\n self.__connections.add(connection)\n self.__socket_handlers[connection] = socket_handler\n\n return connection\n\n async def run_loop(self):\n if not self.__connections:\n return\n\n await asyncio.wait([self.__socket_handlers[connection](connection) for connection in self.__connections])\n\ndef main():\n cryptoExchangeWebSocket = CryptoExchangeWebSocket()\n\n connections = set()\n connections.add(cryptoExchangeWebSocket.add_connection(binance_btc_usdt_book_ticker_websocket_uri, binance_socket_handler, 1))\n connections.add(cryptoExchangeWebSocket.add_connection(okex_btc_usdt_book_ticker_websocket_uri, okex_socket_handler, 1))\n\n event_loop = asyncio.get_event_loop()\n connection_tasks, _ = event_loop.run_until_complete(asyncio.wait(connections))\n\n if all(connection_task.result() is None for connection_task in connection_tasks):\n return\n\n print(\"\\tTimestamp\\tExchange\\tAverage\")\n\n with suppress(KeyboardInterrupt):\n event_loop.run_until_complete(cryptoExchangeWebSocket.run_loop())\n\n event_loop.stop()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"CryptoExchangeWebSocket/crypto_exchange_module.py","file_name":"crypto_exchange_module.py","file_ext":"py","file_size_in_byte":3872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"450452875","text":"import collections\nimport inspect\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.layers.experimental import preprocessing\nfrom tensorflow.python.util import nest\n\nCombinerPreprocessingLayer = inspect.getmro(preprocessing.Normalization)[1]\nCombiner = inspect.getmro(preprocessing.Normalization()._combiner.__class__)[1]\n\nINT = 'int'\nNONE = 'none'\nONE_HOT = 'one-hot'\n\n\nclass CategoricalEncoding(CombinerPreprocessingLayer):\n \"\"\"Encode the categorical features to numerical features.\n\n # Arguments\n encoding: A list of strings, which has the same number of elements as the\n columns in the structured data. Each of the strings specifies the\n encoding method used for the corresponding column. Use 'int' for\n categorical columns and 'none' for numerical columns.\n \"\"\"\n\n # TODO: Support one-hot encoding.\n\n def __init__(self, encoding, **kwargs):\n super().__init__(\n combiner=CategoricalEncodingCombiner(encoding),\n **kwargs)\n self.encoding = encoding\n self.tables = {\n str(index): tf.lookup.experimental.DenseHashTable(\n key_dtype=tf.string,\n value_dtype=tf.int64,\n default_value=-3,\n empty_key='-2',\n deleted_key='-1'\n )\n for index, method in enumerate(self.encoding)\n if method in [INT, ONE_HOT]\n }\n\n for key, table in self.tables.items():\n tracked_table = self._add_trackable(table, trainable=False)\n tracked_table.shape = tf.TensorShape((0,))\n\n def _set_state_variables(self, updates):\n for key, vocab in updates.items():\n self.tables[key].insert(\n np.array(vocab, dtype=np.str),\n np.arange(len(vocab))\n )\n\n def call(self, inputs):\n inputs = nest.flatten(inputs)[0]\n outputs = []\n for index in range(len(self.encoding)):\n col = tf.slice(inputs, [0, index], [-1, 1])\n if self.encoding[index] in [INT, ONE_HOT]:\n col = self.tables[str(index)].lookup(col)\n col = tf.cast(col, tf.float32)\n else:\n col = tf.strings.to_number(col, tf.float32)\n outputs.append(col)\n outputs = tf.concat(outputs, axis=-1)\n outputs.set_shape(inputs.shape)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def compute_output_signature(self, input_spec):\n return input_spec\n\n def get_config(self):\n config = {'encoding': self.encoding}\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass CategoricalEncodingCombiner(Combiner):\n\n def __init__(self, encoding):\n self.encoding = encoding\n\n def compute(self, values, accumulator=None):\n if accumulator is None:\n accumulator = collections.defaultdict(set)\n for line in K.get_value(values):\n for index, value in enumerate(line):\n if self.encoding[index] in [INT, ONE_HOT]:\n accumulator[str(index)].add(value)\n return accumulator\n\n def merge(self, accumulators):\n base_accumulator = collections.defaultdict(set)\n for accumulator in accumulators:\n for index, method in enumerate(self.encoding):\n if method in [INT, ONE_HOT]:\n base_accumulator[index] = base_accumulator[index].union(\n accumulator[index])\n return base_accumulator\n\n def extract(self, accumulator):\n return {\n key: list(value)\n for key, value in accumulator.items()\n }\n\n def restore(self, output):\n return {\n key: set(value)\n for key, value in output\n }\n\n def serialize(self, accumulator):\n pass\n\n def deserialize(self, encoded_accumulator):\n pass\n\n\nclass Sigmoid(tf.keras.layers.Layer):\n \"\"\"Sigmoid activation function.\"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def call(self, inputs):\n return tf.keras.activations.sigmoid(inputs)\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nCUSTOM_OBJECTS = {\n 'CategoricalEncoding': CategoricalEncoding,\n 'Sigmoid': Sigmoid,\n 'Normalization': preprocessing.Normalization\n}\n","sub_path":"autokeras/keras_layers.py","file_name":"keras_layers.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"23581848","text":"import config as my_cfg\nimport json\nimport numpy as np\nimport os\nfrom PIL import Image\nimport preproccessing as pre\nimport torch\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset\nimport re\nfrom torchvision.models import resnet152\n\nclass VQAv2(Dataset):\n def __init__(self):\n self.img_path_list = []\n for entry in os.listdir(my_cfg.VAL['Feats']):\n if os.path.isfile(os.path.join(my_cfg.VAL['Feats'], entry)):\n self.img_path_list.append(os.path.join(my_cfg.VAL['Feats'], entry))\n for entry in os.listdir(my_cfg.TRAIN['Feats']):\n if os.path.isfile(os.path.join(my_cfg.TRAIN['Feats'], entry)):\n self.img_path_list.append(os.path.join(my_cfg.TRAIN['Feats'], entry))\n self.qns_path_list = []\n for entry in os.listdir(my_cfg.VAL['Questions_feat']):\n if os.path.isfile(os.path.join(my_cfg.VAL['Questions_feat'], entry)):\n self.qns_path_list.append(os.path.join(my_cfg.VAL['Questions_feat'], entry))\n for entry in os.listdir(my_cfg.TRAIN['Questions_feat']):\n if os.path.isfile(os.path.join(my_cfg.TRAIN['Questions_feat'], entry)):\n self.qns_path_list.append(os.path.join(my_cfg.TRAIN['Questions_feat'], entry))\n with open(my_cfg.VAL['Questions']) as json_file:\n self.qns_list = json.load(json_file)['questions']\n with open(my_cfg.TRAIN['Questions']) as json_file:\n self.qns_list += json.load(json_file)['questions']\n\n with open(my_cfg.VAL['Answers']) as json_file:\n self.ans_list = json.load(json_file)['annotations'] # +=\n with open(my_cfg.TRAIN['Answers']) as json_file:\n self.ans_list += json.load(json_file)['annotations'] # +=\n\n self.id_to_ques = {}\n for qn in self.qns_list:\n self.id_to_ques[int(qn['question_id'])] = qn\n \n self.id_to_ques_path = {}\n for qn in self.qns_path_list:\n self.id_to_ques_path[int(qn.split('/')[-1].split('.')[0])] = qn\n\n self.id_to_img_path = {}\n for im in self.img_path_list:\n self.id_to_img_path[int(im.split('/')[-1].split('_')[-1].split('.')[0])] = im\n\n self.ans_to_ix, self.ix_to_ans = json.load(open(my_cfg.answer_dict, 'r'))\n\n self.token_to_ix, self.pretrained_emb = pre.tokenize(self.qns_list)\n\n self.ans_size = len(self.ans_to_ix)\n self.token_size = len(self.token_to_ix)\n self.data_size = len(self.ans_list)\n\n def __len__(self):\n return self.data_size\n\n def __getitem__(self, idx):\n img_feat_iter = np.zeros(1)\n ques_ix_iter = np.zeros(1)\n ans_iter = np.zeros(1)\n \n ans = self.ans_list[idx]\n qid = int(ans['question_id'])\n ques_path = self.id_to_ques_path[qid]\n ques_ix = np.load(ques_path)\n\n id = int(ans['image_id'])\n img_path = self.id_to_img_path[id]\n img_feat = np.load(img_path, allow_pickle=True)['arr_0'][()]\n\n img_feat_x = img_feat['x']\n boxes = img_feat['boxes']\n\n if img_feat_x.shape[0] > my_cfg.img_feat_pad_size:\n img_feat_x = img_feat_x[:my_cfg.img_feat_pad_size]\n \n img_feat_x = np.pad(\n img_feat_x,\n ((0, my_cfg.img_feat_pad_size - img_feat_x.shape[0]), (0, 0)),\n mode='constant',\n constant_values=0\n )\n # Process answer\n ans_score = np.zeros(self.ans_to_ix.__len__(), np.float32)\n ans_prob_dict = {}\n\n for ans_ in ans['answers']:\n ans_proc = pre.prep_ans(ans_['answer'])\n if ans_proc not in ans_prob_dict:\n ans_prob_dict[ans_proc] = 1\n else:\n ans_prob_dict[ans_proc] += 1\n\n for ans_ in ans_prob_dict:\n if ans_ in self.ans_to_ix:\n ans_score[self.ans_to_ix[ans_]] = pre.get_score(ans_prob_dict[ans_])\n \n # np.save(my_cfg.TRAIN['ProcessedA'] + str(qid) + 'npy', ques_ix)\n return torch.from_numpy(img_feat_x), \\\n torch.from_numpy(ques_ix), \\\n torch.from_numpy(ans_score), idx","sub_path":"experiments/CASCADE+BERT/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"336779676","text":"from django.urls import path\nfrom . import views\n\napp_name='funcionarios'\n\nurlpatterns = [\n path('edit//', views.EditarFuncionario.as_view(), name='edit_funcionario'),\n path('delete//', views.DeletaFuncionario.as_view(), name='delete_funcionario'),\n path('', views.FuncionariosList.as_view(), name='funcionarios_list'),\n path('novo/', views.FuncionariosNovo.as_view(), name='funcionarios_novo'),\n path('pdf-reportlab/', views.some_view, name='pdf-reportlab'),\n]","sub_path":"apps/funcionarios/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"164068270","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun April 30 20:16:19 2017\r\n\r\n@author: Shahriyar\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport re\r\nimport string\r\nimport matplotlib.pyplot as plt\r\nfrom nltk.corpus import stopwords\r\n\r\n\r\n\r\n#used for cleaning textual data, cleans all the special characters\r\ndef cleaning(s):\r\n s = str(s)\r\n s = s.lower()\r\n s = re.sub('\\s\\W',' ',s)\r\n s = re.sub('\\W,\\s',' ',s)\r\n s = re.sub(r'[^\\w]', ' ', s)\r\n s = re.sub(\"\\d+\", \"\", s)\r\n s = re.sub('\\s+',' ',s)\r\n s = re.sub('[!@#$_]', '', s)\r\n s = s.replace(\"co\",\"\")\r\n s = s.replace(\"https\",\"\")\r\n s = s.replace(\",\",\"\")\r\n s = s.replace(\"[\\w*\",\" \")\r\n return s\r\n\r\n\r\n\r\nprint (\"Histograms based on text\")\r\ndf = pd.read_csv(\"GenderClassification.csv\",encoding='ISO-8859-1')\r\ndf = df[df.gender != 'unknown']\r\n#print(df)\r\n\r\ndf['Tweets'] = [cleaning(s) for s in df['text']]\r\ndf['Description'] = [cleaning(s) for s in df['description']]\r\n\r\n\r\n\r\n#We get a set of English stop words using the line:\r\nstop = set(stopwords.words('english'))\r\n\r\nprint(stop)\r\n\r\n\r\ndf['Tweets'] = df['Tweets'].str.lower().str.split()\r\n# Gives us all the items which are not in the listed stop words\r\ndf['Tweets'] = df['Tweets'].apply(lambda x : [item for item in x if item not in stop])\r\n\r\n\r\n\r\n\r\n\r\nMale = df[df['gender'] == 'male']\r\nMaleWords = pd.Series(' '.join(Male['Tweets'].astype(str)).lower().split(\" \")).value_counts()[:20]\r\n\r\nFemale = df[df['gender'] == 'female']\r\nFemaleWords = pd.Series(' '.join(Female['Tweets'].astype(str)).lower().split(\" \")).value_counts()[:20]\r\n\r\nBrand = df[df['gender'] == 'brand']\r\nBrandWords = pd.Series(' '.join(Brand['Tweets'].astype(str)).lower().split(\" \")).value_counts()[:10]\r\n\r\n\r\n#BrandWords.plot(stacked=True)\r\n#FemaleWords.plot(color='red',stacked=True)\r\n#MaleWords.plot(color='green',stacked=True)\r\nprint(stop)\r\nMaleWords.plot(kind='bar',stacked=True, colormap='Paired')\r\n\r\n#BrandWords.plot(kind='bar',stacked=True, colormap='Paired')\r\n\r\n\r\n#FemaleWords.plot(kind='bar',stacked=True, colormap='Paired')","sub_path":"Codes/HistogramOfWords.py","file_name":"HistogramOfWords.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"590290329","text":"'''Train CIFAR10 with PyTorch.'''\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nimport torch.nn.init as init\nimport numpy as np\n\nimport torchvision\n\nfrom data_provider import data_provider\n# from CIFARmodel.vgg import VGG\nfrom CIFARmodel.preact_resnet import PreActResNet, PreActBottleneck, PreActBlock\n# from CIFARmodel.preact_resnet_shake import PreActResNet, PreActBottleneck, PreActBlock\n# from CIFARmodel.mobilenetv2 import MobileNetV2\nfrom utils import progress_bar\n\nimport os\nimport argparse\nimport logging\n\n# parser = argparse.ArgumentParser(\n# description='PyTorch CIFAR10 Training VGG19')\n# parser.add_argument('--savefile', type=str, default='./savefile/cifar100/withoutbias/vgg19')\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training ResNet')\nparser.add_argument('--savefile',\n type=str,\n default='./savefile/cifar10/steam/test/ResnetWithourBottleneck_new/resnet110_prune_and_train_2x_densityp0.3_eachp0.05')\n# parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training MobileNetV2')\n# parser.add_argument('--savefile', type=str, default='./savefile/cifar100/withourbias/MobileNet')\nparser.add_argument('--lr', default=0.1, type=float, help='learning rate')\nparser.add_argument('--resume',\n '-r',\n default=False,\n action='store_true',\n help='resume from checkpoint')\nparser.add_argument(\n '--data_path',\n type=str,\n default=\n '/data/home/chenzhiqiang/tensorflow/KnowledgeDistill/data/cifar/all-cifar')\nparser.add_argument('-gpu', type=str, default=\"0\")\nargs = parser.parse_args()\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n\nlogger = logging.getLogger(__name__)\n\n\nclass Cifar_VGG:\n def __init__(self):\n logger.info('\\n' + '*' * 100 + '\\n' + '******init******\\n' + '*' * 100)\n self.dataset = 'cifar100' if 'cifar100' in args.savefile else 'cifar10'\n if self.dataset == 'cifar10':\n self.num_classes = 10\n else:\n self.num_classes = 100\n self.batchsize = 128\n self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n logger.info('device:'+self.device)\n self.savefile_checkpoint = args.savefile + '/checkpoint'\n self.max_epoch = 200\n self.test_every_k_epoch = 1\n\n self.choose_best_acc = False\n self.best_acc = 0 # best test accuracy\n self.start_epoch = 0 # start from epoch 0 or last checkpoint epoch\n self.train_acc = 0\n self.test_acc = 0\n self.train_loss = 0\n rate = 1\n arch = np.array([[16*rate, 16, 18], [32*rate, 32, 18], [64*rate, 64, 18]])\n arch[:, 0:2] *= 2\n\n self.train_data, self.test_data = data_provider(\n self.dataset, args.data_path, self.batchsize)\n # self.net = VGG('VGG19', self.num_classes)\n # self.block = PreActBottleneck\n self.block = PreActBlock\n # self.net = PreActResNet(self.block, arch, self.num_classes, dp=0.0)\n self.net = PreActResNet(self.block, arch, self.num_classes)\n # self.net = MobileNetV2(num_classes=self.num_classes)\n self.criterion = nn.CrossEntropyLoss()\n self.warmup = 0\n self.weight_decay = 1e-4\n self.lr = 1.\n self.lr_drop = [0, 120, 160, 180]\n self.lr_weight = 10.\n # self.lr = 0.2\n # self.lr_drop = [0, 160, 180]\n # self.lr_weight = 2.\n self.para, self.flop = self.net.cost()\n logger.info('Para:' + str(self.para) + ', Flops:' + str(self.flop))\n logger.info('weight decay:' + str(self.weight_decay) + ', lr drop:' +\n str(self.lr_drop))\n\n self.stream_epoch = 80\n self.prune_times = 96\n self.base_prune = 0\n self.dimension1 = [0, 1, 2]\n self.dimension2 = [0, 1]\n self.densityprune = 0.3\n self.eachprune = 0.05\n self.w_para = 0.5\n self.w_flop = 1. - self.w_para\n logger.info('stream epoch:' + str(self.stream_epoch) +\n ', prune times:' + str(self.prune_times) +\n ', prune base:' + str(self.base_prune) +\n ', prune dimensions:' + str(self.dimension1) +\n str(self.dimension2))\n\n self.stream_arch = {\n 'arch': [arch],\n 'para': [self.para],\n 'flop': [self.flop],\n 'cost': [0]\n }\n self.prenet = {\n 'net': PreActResNet(self.block, arch, self.num_classes),\n 'acc': 0,\n 'cost': 0,\n 'gate_set': np.array(self.net.gate_set),\n 'para': self.para,\n 'flop': self.flop\n }\n\n self.bestnet = {\n 'net': PreActResNet(self.block, arch, self.num_classes),\n 'acc': 0,\n 'cost': 0,\n 'gate_set': np.array(self.net.gate_set),\n 'para': self.para,\n 'flop': self.flop\n }\n\n self.current = {\n 'net': self.net,\n 'acc': 0,\n 'cost': 0,\n 'gate_set': self.net.gate_set,\n 'para': self.para,\n 'flop': self.flop\n }\n\n def run(self):\n\n def resume():\n # Load checkpoint.\n logger.info('==> Resuming from checkpoint..')\n assert os.path.exists(\n self.savefile_checkpoint), 'Error: no checkpoint directory found!'\n checkpoint = torch.load(self.savefile_checkpoint + '/ckpt_best.pth')\n self.net.load_state_dict(checkpoint['net'])\n self.best_acc = checkpoint['acc']\n self.start_epoch = checkpoint['epoch']\n # self.net.gate_set[:, :] = checkpoint['gate_set'][:, :]\n # self.net.gate_set[:, :] = 0\n # print(self.net.gate_set)\n # self.net._set_gate()\n\n def train(epoch):\n # logger.info('\\nEpoch: %d' % epoch)\n def apply_dropout(m):\n if type(m) == nn.Dropout:\n m.eval()\n self.net.train()\n # if epoch > self.lr_drop[1]:\n # self.net.apply(apply_dropout)\n train_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(self.train_data):\n inputs, targets = inputs.to(self.device), targets.to(\n self.device)\n self.optimizer.zero_grad()\n outputs = self.net(inputs)\n loss = self.criterion(outputs, targets)\n loss.backward()\n self.optimizer.step()\n\n train_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n if batch_idx % 10 == 0 or batch_idx == len(self.train_data)-1:\n progress_bar(\n batch_idx, len(self.train_data),\n 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %\n (train_loss / (batch_idx + 1), 100. * correct / total,\n correct, total))\n\n self.train_acc = correct / total\n self.train_loss = train_loss / len(self.train_data)\n pass\n\n def test(epoch):\n global best_acc\n self.net.eval()\n test_loss = 0\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(self.test_data):\n inputs, targets = inputs.to(self.device), targets.to(\n self.device)\n outputs = self.net(inputs)\n loss = self.criterion(outputs, targets)\n\n test_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n if batch_idx % 10 == 0 or batch_idx == len(\n self.test_data)-1:\n progress_bar(\n batch_idx, len(self.test_data),\n 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %\n (test_loss / (batch_idx + 1),\n 100. * correct / total, correct, total))\n\n # Save checkpoint.\n self.test_acc = correct / total\n logger.info(\n 'epoch: %d, loss: %f; accuracy: train: %f, test: %f' %\n (epoch, self.train_loss, self.train_acc, self.test_acc))\n if self.test_acc > self.best_acc:\n logger.info('Save best model')\n self.best_acc = self.test_acc\n savemodel(epoch, 'best')\n if epoch == self.max_epoch:\n logger.info('Save final model')\n savemodel(epoch, 'final')\n\n def stream(epoch):\n def saveatob(a, b):\n b['acc'] = a['acc']\n b['cost'] = a['cost']\n b['gate_set'][:, :] = a['gate_set'][:, :]\n b['net'].load_state_dict(a['net'].state_dict())\n b['para'] = a['para']\n b['flop'] = a['flop']\n\n def densityofa(a, b):\n # return (b['acc'] - a['acc']) / (\n # ((b['para'] - a['para'])**self.w_para) *\n # ((b['flop'] - a['flop'])**self.w_flop))\n return (b['acc'] - a['acc']) / (\n ((b['para']**self.w_para) * (b['flop']**self.w_flop)) -\n ((a['para']**self.w_para) * (a['flop']**self.w_flop)))\n\n def calstream(current, prenet):\n def cost(p0, f0, p1, f1):\n return ((p0**self.w_para) *\n (f0**self.w_flop)) - ((p1**self.w_para) *\n (f1**self.w_flop))\n\n st = np.array(current['gate_set'])\n st[:] = -1\n for i in self.dimension1:\n for j in self.dimension2:\n st[i, j] = current['gate_set'][i, j]\n self.base_prune = np.min(st[st > 0]) * self.densityprune + 1\n st[:] = 0\n s1, s2 = st.shape\n p0, f0 = current['net'].cost()\n for i in self.dimension1:\n for j in self.dimension2:\n current['gate_set'][i, j] -= self.base_prune\n if current['gate_set'][i, j] < 0:\n current['gate_set'][i, j] = 0\n p1, f1 = current['net'].cost()\n current['gate_set'][:, :] = prenet['gate_set'][:, :]\n st[i, j] = cost(p0, f0, p1, f1)\n\n m = np.max(st)\n\n cur, pre = 0, 0\n for i in self.dimension1:\n for j in self.dimension2:\n k = 1\n while True:\n current['gate_set'][i, j] -= k\n t = current['gate_set'][i, j] == 0\n p1, f1 = current['net'].cost()\n current['gate_set'][:, :] = prenet[\n 'gate_set'][:, :]\n if t:\n st[i, j] = k\n break\n\n cur = cost(p0, f0, p1, f1)\n if cur == m:\n st[i, j] = k\n break\n if cur > m:\n if cur - m < m - pre:\n st[i, j] = k\n else:\n st[i, j] = k - 1\n break\n if cur < m:\n pre = cur\n k += 1\n logger.info('prune of each part:' + str(st))\n return st\n\n prenet = self.prenet\n bestnet = self.bestnet\n current = self.current\n train(epoch)\n train(epoch)\n # test(epoch)\n current['para'], current['flop'] = self.net.cost()\n current['acc'] = self.train_acc\n saveatob(current, prenet)\n s1, s2 = current['gate_set'].shape\n st = calstream(current, prenet)\n print(s1, s2)\n for i in self.dimension1:\n for j in self.dimension2:\n saveatob(prenet, current)\n current['gate_set'][i, j] -= st[i, j]\n if current['gate_set'][i, j] == 0:\n continue\n logger.info(self.net.gate_set)\n self.net._set_gate()\n for k in range(2):\n train(epoch)\n para, flop = self.net.cost()\n current['para'], current['flop'] = para, flop\n current['acc'] = self.train_acc\n current['cost'] = densityofa(current, prenet)\n logger.info(current['cost'])\n # test(epoch)\n # current['acc'] = self.test_acc\n if bestnet['cost'] == 0 or current['cost'] < bestnet['cost']:\n current['gate_set'][i, j] = int(prenet['gate_set'][\n i, j] * (1.-self.eachprune) - 1)\n current['para'], current['flop'] = current['net'].cost()\n saveatob(current, bestnet)\n logger.info('para:' + str(para) + ', flops:' +\n str(flop) + ', current acc:' +\n str(bestnet['acc']) + ', prenet acc:' +\n str(prenet['acc']))\n\n saveatob(bestnet, current)\n savemodel(epoch, 'stream')\n self.net._set_gate()\n self.stream_arch['arch'].append(np.array(bestnet['gate_set']))\n self.stream_arch['para'].append(bestnet['para'])\n self.stream_arch['flop'].append(bestnet['flop'])\n self.stream_arch['cost'].append(bestnet['cost'])\n bestnet['cost'] = 0\n logger.info(current['gate_set'])\n\n def show_arch():\n arch = np.array(self.stream_arch['arch'])\n s0, s1, s2 = arch.shape\n logger.info('arch:')\n for i in range(s1):\n for j in range(s2):\n logger.info(arch[:, i, j])\n logger.info('parameter and flop:')\n logger.info(self.stream_arch['para'])\n logger.info(self.stream_arch['flop'])\n logger.info(self.stream_arch['cost'])\n\n def savemodel(epoch, name='final'):\n logger.info('Saving...')\n state = {\n 'net': self.net.state_dict(),\n 'acc': self.test_acc,\n 'epoch': epoch,\n 'gate_set': self.net.gate_set\n }\n if not os.path.exists(self.savefile_checkpoint):\n os.mkdir(self.savefile_checkpoint)\n torch.save(state,\n self.savefile_checkpoint + '/ckpt_' + name + '.pth')\n\n def init_params(net=self.net):\n logger.info('Init layer parameters.')\n self.bias = []\n self.conv_weight = []\n self.bn_weight = []\n for m in net.modules():\n if isinstance(m, nn.Conv2d):\n # print(m.weight, m.bias)\n init.kaiming_normal(m.weight, mode='fan_out')\n self.conv_weight += [m.weight]\n # self.bias += [m.bias]\n # init.constant(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant(m.weight, 0.5)\n init.constant(m.bias, 0)\n self.bn_weight += [m.weight]\n self.bias += [m.bias]\n elif isinstance(m, nn.Linear):\n init.normal(m.weight, std=1e-3)\n self.conv_weight += [m.weight]\n self.bias += [m.bias]\n init.constant(m.bias, 0)\n\n init_params()\n if args.resume:\n resume()\n\n logger.info('\\n' + '*' * 100 + '\\n' + '******Start training******\\n' +\n '*' * 100)\n self.net = self.net.to(self.device)\n for i in range(self.warmup):\n self.optimizer = optim.SGD(self.net.parameters(),\n lr=0.01,\n momentum=0.9,\n weight_decay=self.weight_decay)\n train(0)\n\n for i in range(self.max_epoch + 1):\n if i in self.lr_drop:\n self.lr /= self.lr_weight\n logger.info('learning rate:' + str(self.lr))\n # self.optimizer = optim.SGD([{\n # 'params': self.conv_weight + self.bn_weight,\n # 'weight_decay': self.weight_decay\n # }],\n # lr=self.lr,\n # momentum=0.9,\n # weight_decay=self.weight_decay)\n self.optimizer = optim.SGD(self.net.parameters(),\n lr=self.lr,\n momentum=0.9,\n weight_decay=self.weight_decay)\n\n if i >= self.start_epoch:\n train(i)\n if i % self.test_every_k_epoch == 0 or i == self.max_epoch:\n logger.info('test')\n test(i)\n if i == self.stream_epoch:\n para, flop = self.net.cost()\n self.current['acc'] = self.train_acc\n self.current['cost'] = para\n while para**self.w_para * flop**self.w_flop > (\n 1722416.0**self.w_para) * (261014144.0**self.w_flop):\n # while para/self.para > 0.36/1.8:\n # for k in range(self.prune_times):\n logger.info('para rate:' + str(para / self.para) +\n ',flop rate: ' + str(flop / self.flop))\n stream(i)\n para, flop = self.net.cost()\n show_arch()\n # for j in range(30):\n # while flop/self.flop > 0.5**((j+1.)/30.):\n # stream(i)\n # para, flop = self.net.cost()\n # train(i)\n # train(i)\n\n pass\n\n\ndef logset():\n logger.debug('Logger set')\n logger.setLevel(level=logging.INFO)\n\n path = os.path.dirname(args.savefile)\n print('dirname: ' + args.savefile)\n if not os.path.exists(path):\n os.makedirs(path)\n if not os.path.exists(args.savefile):\n os.makedirs(args.savefile)\n\n handler = logging.FileHandler(args.savefile + '_logger.txt')\n\n handler.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s : %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n logger.addHandler(console)\n\n return\n\n\nif __name__ == '__main__':\n logset()\n a = Cifar_VGG()\n a.run()","sub_path":"mymodel.py","file_name":"mymodel.py","file_ext":"py","file_size_in_byte":19748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"209105576","text":"import abjad\nimport baca\n\n\ndef H(leaf):\n return baca.sequence(leaf).helianthate(1, -1)\n\n\n# positionToPitchNumber = {\n# 'tasto': 4, 'normale': 7,\n# 'sp': 12, 'xsp': 14, 'xxsp': 16\n# }\n\n\npositionToPitchNumber = {\n \"tasto\": 4,\n \"normale\": 7,\n \"sp\": 14,\n \"xsp\": 17,\n \"xxsp\": 21,\n}\n\nPOSITION = {\n \"first alternation\": [\"normale\", \"sp\"] * 12,\n \"second alternation\": [\"sp\", \"xsp\"] * 12,\n \"third alternation\": [\"xsp\", \"xxsp\"] * 12,\n \"carved\": H(\n [[\"normale\", \"normale\", \"sp\"], [\"normale\", \"xsp\", \"xsp\"], [\"xxsp\", \"xxsp\"]]\n ),\n \"jagged ponticello\": H(\n [[\"sp\", \"xsp\", \"sp\", \"xsp\", \"xxsp\"], [\"xsp\", \"xxsp\", \"xsp\", \"sp\", \"xsp\", \"sp\"]]\n ),\n \"lifted tasto\": [\"tasto\", \"tasto\", \"normale\", \"tasto\", \"tasto\", \"tasto\", \"normale\"]\n * 8,\n \"glow\": [\"tasto\", \"normale\"] * 8\n + [\"normale\", \"xsp\"] * 12\n + H([[\"normale\", \"normale\", \"sp\", \"xsp\"], [\"xxsp\", \"xxsp\"]]),\n}\n\n\ndef _materialIndexToPosition(material, i):\n return POSITION[material][i % len(POSITION[material])]\n\n\ndef treatOneLeaf(cur, treatments, i):\n\n if hasattr(cur, \"notehead\"):\n cur.notehead.clear()\n cur.tremolo.clear()\n cur.glissando.clear()\n\n for treatment in treatments:\n if treatment == \"note\":\n if not cur.kind(\"Note\"):\n cur = abjad.Note(cur)\n cur.pitch = 4\n elif treatment == \"rest\":\n if not cur.kind(\"Rest\"):\n cur = abjad.Rest(cur)\n elif treatment == \"skip\":\n if not cur.kind(\"Skip\"):\n cur = abjad.Skip(cur)\n elif treatment == \"transparent\":\n cur.dots.transparent = True\n cur.stem.transparent = True\n if hasattr(cur, \"notehead\"):\n cur.notehead.transparent = True\n cur.notehead.no_ledgers = True\n # TODO - implement as a managed _Spanner attribute\n if cur.kind(\"Note\"):\n cur.beam._refuse = True\n elif treatment == \"harmonic-black\":\n cur.notehead.style = treatment\n elif treatment == \"headless\":\n cur.notehead.transparent = True\n cur.notehead.no_ledgers = True\n elif treatment == \"diamond\":\n cur.notehead.style = \"harmonic\"\n elif treatment == \"tremolo\":\n cur.stem.tremolo = 128\n elif treatment == \"glissando\":\n cur.glissando = True\n elif treatment in positionToPitchNumber.keys():\n cur.pitch = positionToPitchNumber[treatment]\n elif treatment in POSITION.keys():\n position = _materialIndexToPosition(treatment, i)\n cur.pitch = positionToPitchNumber[position]\n else:\n raise ValueError(\"unknown %s treatment.\" % treatment)\n\n\ndef BowTreatment():\n return {\n \"lone\": None,\n \"first\": None,\n \"last\": None,\n \"flanked\": None,\n \"left\": None,\n \"right\": None,\n \"middle\": None,\n \"default\": None,\n }\n\n\ndef iterate_over_structure(voice, start, stop, notes=BowTreatment, rests=BowTreatment):\n leaves = voice.leaves[start : stop + 1]\n for i, cur in enumerate(leaves):\n if 0 < i:\n prev = cur.prev\n else:\n prev = None\n if i < len(leaves) - 1:\n next = cur.next\n else:\n next = None\n positions = []\n if len(leaves) == 1:\n positions.append(\"lone\")\n if i == 1:\n positions.append(\"first\")\n if i == len(leaves) - 1:\n positions.append(\"last\")\n if (\n prev\n and next\n and prev.history[\"class\"] != cur.history[\"class\"] != next.history[\"class\"]\n ):\n positions.append(\"flanked\")\n if not prev or (prev and prev.history[\"class\"] != cur.history[\"class\"]):\n positions.append(\"left\")\n if not next or (next and cur.history[\"class\"] != next.history[\"class\"]):\n positions.append(\"right\")\n if (\n prev\n and next\n and prev.history[\"class\"] != cur.history[\"class\"] != next.history[\"class\"]\n ):\n positions.append(\"middle\")\n positions.append(\"default\")\n if cur.history[\"class\"] == \"Note\":\n for position in positions:\n if notes[position]:\n # print(i, positions, position)\n treatOneLeaf(cur, notes[position], i)\n break\n else:\n for position in positions:\n if rests[position]:\n # print(i, positions, position)\n treatOneLeaf(cur, rests[position], i)\n break\n","sub_path":"lidercfeny/etc/violin/bowtreatments.py","file_name":"bowtreatments.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"249415940","text":"def solution(n, k):\n dp = [[0] * (k + 1) for _ in range(n + 1)]\n\n for i in range(1, n + 1):\n weight, value = map(int, input().split())\n for j in range(1, k + 1):\n if j < weight:\n dp[i][j] = dp[i - 1][j]\n else:\n dp[i][j] = max(dp[i - 1][j], dp[i - 1][j - weight] + value)\n\n return dp[n][k]\n\n\nn, k = map(int, input().split())\nprint(solution(n, k))\n","sub_path":"BaekJoon/12865.py","file_name":"12865.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"151874822","text":"import matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport numpy as np\nimport timeit\n\n\nstart = timeit.default_timer()\n\nN = 100\ndx = 2*np.pi/N\ndt = 0.49\ngamma = dt/(dx**2)\n\n\nx = np.linspace(0, 2*np.pi-dx, 100)\nt = np.arange(0, 2, dt)\n\n\nT = np.zeros((len(t), N))\n\nfor i in range(N):\n T[0][i] = np.cos(x[i])**3\n \n\n\nfor i in range(1, len(t)):\n for j in range(N):\n \n T[i][j] = T[i-1][j]\n \n for z in range(N): \n for k in range(N):\n \n if k == 0:\n T[i][k] = (T[i-1][k]+ gamma*(T[i][k+1]+T[i][N-1]))/(1+2*gamma)\n \n elif k == N-1:\n T[i][k] = (T[i-1][k]+ gamma*(T[i][0]+T[i][k-1]))/(1+2*gamma)\n \n else:\n T[i][k] = (T[i-1][k]+ gamma*(T[i][k+1]+T[i][k-1]))/(1+2*gamma)\n\n\nstop = timeit.default_timer()\n\nprint('Time: ', stop - start) \n\n\n\n\n","sub_path":"implicit.py","file_name":"implicit.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"342358791","text":"#!/usr/bin/python3\n\"\"\"\nRoot file of our application\n\"\"\"\nfrom models import storage\nfrom api.v1.views import app_views\nfrom os import getenv\nfrom flask import Flask, make_response, jsonify\nfrom flask_cors import CORS\n\napp = Flask(__name__)\napp.register_blueprint(app_views)\napp.config.update(JSONIFY_PRETTYPRINT_REGULAR=True)\ncors = CORS(app, resources={r\"/*\": {\"origins\": \"0.0.0.0\"}})\n\n\n@app.teardown_appcontext\ndef teardown(error):\n \"\"\"Tear down method to close storage\"\"\"\n storage.close()\n\n\n@app.errorhandler(404)\ndef not_found(error):\n \"\"\"Return custom message for 404 errors\"\"\"\n return make_response(jsonify({'error': 'Not found'}), 404)\n\nif __name__ == \"__main__\":\n host_ip = '0.0.0.0'\n port_num = 5000\n if getenv('HBNB_API_HOST'):\n host_ip = getenv('HBNB_API_HOST')\n if getenv('HBNB_API_PORT'):\n port_num = int(getenv('HBNB_API_PORT'))\n app.run(host=host_ip, port=port_num, threaded=True)\n","sub_path":"api/v1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"286949384","text":"import numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport plotly\r\nimport plotly.graph_objs as go\r\nimport scipy.stats as stats\r\nimport plotly.express as px\r\n\r\ndf = pd.read_csv('../googlePlayStore/googleplaystore.csv')\r\n\r\n\r\ndf.drop_duplicates('App', 'first', inplace=True)\r\ndf = df[df['Installs'] != 'Free']\r\ndf = df[df['Installs'] != 'Paid']\r\ndf = df[df['Android Ver'] != np.nan]\r\ndf = df[df['Android Ver'] != 'NaN']\r\n# df.to_csv('../googlePlayStore/googleplaystoreFilted.csv')\r\n# \r\n# print('the number of apps in dataset:', len(df))\r\n# print(df.sample(7))\r\n\r\n# 规范Installs的数据,去掉 '+' 和 ','\r\ndf['Installs'] = df['Installs'].apply(lambda x: str(x).replace('+', '') if '+' in str(x) else x)\r\ndf['Installs'] = df['Installs'].apply(lambda x: str(x).replace(',', '') if ',' in str(x) else x)\r\ndf['Installs'] = df['Installs'].apply(lambda x: float(x))\r\n# print(df['Installs'].values)\r\n\r\n# 统一size的单位,去除varies with device等噪音\r\ndf['Size'] = df['Size'].apply(lambda x: str(x).replace('Varies with device', 'NaN') if 'Varies with device' in str(x) else x)\r\ndf['Size'] = df['Size'].apply(lambda x: str(x).replace(',', '') if ',' in str(x) else x)\r\ndf['Size'] = df['Size'].apply(lambda x: str(x).replace('M', '') if 'M' in str(x) else x)\r\ndf['Size'] = df['Size'].apply(lambda x: float(str(x).replace('k', '')) / 1000 if 'k' in str(x) else x)\r\ndf['Size'] = df['Size'].apply(lambda x: float(x))\r\n\r\n# 去掉price的单位\r\ndf['Price'] = df['Price'].apply(lambda x: str(x).replace('$', '') if '$' in str(x) else x)\r\ndf['Price'] = df['Price'].apply(lambda x: float(x))\r\n\r\ndf['Reviews'] = df['Reviews'].apply(lambda x: float(x))\r\n# df.to_csv('../googlePlayStore/googleplaystoreFilted.csv')\r\n\r\n\r\n# # basic EDA\r\n# x = df['Rating'].dropna()\r\n# y = df['Size'].dropna()\r\n# z = df['Installs'][df.Installs != 0].dropna()\r\n# p = df['Reviews'][df.Reviews != 0].dropna()\r\n# t = df['Type'].dropna()\r\n# price = df['Price']\r\n# p = sns.pairplot(pd.DataFrame(list(zip(x, y, np.log(z), np.log10(p), t, price)), columns=['Rating', 'Size', 'Installs', 'Reviews', 'Type', 'Price']), hue='Type', palette='Set2', diag_kind=\"hist\")\r\n# plt.savefig('EDA.png')\r\n\r\n# # 探究对最受欢迎的应用类型\r\n# numberOfCategory = df['Category'].value_counts().sort_values(ascending=False)\r\n# # print(numberOfCategory)\r\n# data = [go.Pie(\r\n# labels = numberOfCategory.index,\r\n# values = numberOfCategory.values,\r\n# hoverinfo = 'label+value')]\r\n# plotly.offline.plot(data, filename='populationOfCategory.html')\r\n\r\n# # 评分分布情况和平均值\r\n# data = [go.Histogram(\r\n# x = df.Rating,\r\n# xbins = {'start': 1, 'size': 0.1, 'end': 5}\r\n# )]\r\n# print('Average app rating = ', np.mean(df['Rating']))\r\n# plotly.offline.plot(data, filename='ratingDistribution.html')\r\n\r\n# # 单因素方差分析,验证各类型应用的评分分布是不同的\r\n# # 数据不符合正态分布,所以不能使用单因素方差分析\r\n# f = stats.f_oneway(df.loc[df.Category == 'FAMILY']['Rating'].dropna(),\r\n# df.loc[df.Category == 'GAME']['Rating'].dropna(),\r\n# df.loc[df.Category == 'TOOLS']['Rating'].dropna(),\r\n# df.loc[df.Category == 'BUSINESS']['Rating'].dropna(),\r\n# df.loc[df.Category == 'MEDICAL']['Rating'].dropna(),\r\n# df.loc[df.Category == 'PERSONALIZATION']['Rating'].dropna(),\r\n# df.loc[df.Category == 'PRODUCTIVITY']['Rating'].dropna(),\r\n# df.loc[df.Category == 'LIFESTYLE']['Rating'].dropna(),\r\n# df.loc[df.Category == 'FINANCE']['Rating'].dropna()\r\n# )\r\n# # print(f)\r\n# groups = df.groupby('Category').filter(lambda x: len(x) > 286).reset_index()\r\n# array = groups['Rating'].hist(by = groups['Category'], sharex=True, figsize=(20,20))\r\n# plt.savefig('OneWayAnova.png')\r\n\r\n# # 评分较好的应用类型\r\n# groups = df.groupby('Category').filter(lambda x: len(x) > 170).reset_index()\r\n# print('Average Rating = ', np.nanmean(list(groups['Rating'])))\r\n# layout = {\r\n# 'title': 'App ratings across major categories',\r\n# 'xaxis': {'tickangle': -40},\r\n# 'yaxis': {'title': 'Rating'},\r\n# 'plot_bgcolor': 'rgb(250,250,250)',\r\n# 'shapes': [{\r\n# 'type': 'line',\r\n# 'x0': -.5,\r\n# 'y0': np.nanmean(list(groups.Rating)),\r\n# 'x1': 19,\r\n# 'y1': np.nanmean(list(groups.Rating)),\r\n# 'line': {'dash': 'dash'}\r\n# }]\r\n# }\r\n# data = [{\r\n# 'y': df.loc[df.Category == category]['Rating'],\r\n# 'type': 'violin',\r\n# 'name': category,\r\n# 'showlegend': False\r\n# } for i, category in enumerate(list(set(groups.Category)))]\r\n# plotly.offline.plot({'data': data, 'layout': layout}, filename='BestPerformingCategory.html')\r\n\r\n\r\n# # 软件大小的影响\r\n# groups = df.groupby('Category').filter(lambda x: len(x) > 50).reset_index()\r\n# sns.set_style('darkgrid')\r\n# ax = sns.jointplot(df.Size, df.Rating, kind='hex')\r\n# plt.savefig('sizeVSrating.png')\r\n\r\n# subdf = df[df.Size > 40]\r\n# tmpGroups = subdf.groupby('Category').filter(lambda x: len(x) > 20)\r\n# layout = {\r\n# 'title': 'Rating vs Size',\r\n# 'xaxis': {'title': 'Rating'},\r\n# 'yaxis': {'title': 'Size(in MB)'},\r\n# 'plot_bgcolor': 'black'\r\n# }\r\n# data = [{\r\n# 'x': tmpGroups.loc[subdf.Category == category]['Rating'],\r\n# 'type': 'scatter',\r\n# 'y': subdf['Size'],\r\n# 'name': category,\r\n# 'mode': 'markers',\r\n# 'showlegend': True\r\n# } for i, category in enumerate(['GAME', 'FAMILY'])]\r\n# plotly.offline.plot({'data': data, 'layout': layout}, filename='ratingVSsize.html')\r\n\r\n\r\n# # 软件价格的影响\r\n# paidApps = df[df.Price > 0]\r\n# p = sns.jointplot('Price', 'Rating', paidApps)\r\n# plt.savefig('priceVSrating.png')\r\n\r\n# subdf = df[df.Category.isin(['GAME', 'FAMILY', 'PHOTOGRAPHY', 'MEDICAL', 'TOOLS', 'FINANCE', 'LIFESTYLE', 'BUSINESS'])]\r\n# sns.set_style('darkgrid')\r\n# fig, ax = plt.subplots()\r\n# fig.set_size_inches(15, 8)\r\n# p = sns.stripplot(x = 'Price', y = 'Category', data = subdf, jitter=True, linewidth=1)\r\n# title = ax.set_title('App price trend across categories')\r\n# plt.savefig('priceVScategory.png')\r\n\r\n# filtedSubdf = subdf[subdf.Price < 100]\r\n# fig, ax = plt.subplots()\r\n# fig.set_size_inches(15, 8)\r\n# p = sns.stripplot(x = 'Price', y = 'Category', data = filtedSubdf, jitter=True, linewidth=1)\r\n# title = ax.set_title('App price trend across categories - after filtering for junk apps')\r\n# plt.savefig('priceVScategory-filted.png')\r\n\r\n# newDF = df.groupby(['Category', 'Type']).agg({'App': 'count'}).reset_index()\r\n# # print(newDF)\r\n# outerGroupNames = ['GAME', 'FAMILY', 'MEDICAL', 'TOOLS']\r\n# outerGroupValues = [len(df.App[df.Category == category]) for category in outerGroupNames]\r\n# # print(majorCateNames, majorCateValues)\r\n# a, b, c, d = [plt.cm.Blues, plt.cm.Reds, plt.cm.Greens, plt.cm.Purples]\r\n# innerGroupNames = ['Paid', 'Free'] * 4\r\n# innerGroupValues = []\r\n# for category in outerGroupNames:\r\n# for t in ['Paid', 'Free']:\r\n# x = newDF[newDF.Category == category]\r\n# try:\r\n# innerGroupValues.append(int(x.App[x.Type == t].values[0]))\r\n# except:\r\n# innerGroupValues.append(0)\r\n# explode = (0.025, 0.025, 0.025, 0.025)\r\n# fig, ax = plt.subplots(figsize = (10, 10))\r\n# ax.axis(option='equal')\r\n# mypie, texts, _ = ax.pie(outerGroupValues, radius=1.2, labels=outerGroupNames, autopct='%1.1f%%',\r\n# pctdistance=1.1, labeldistance=0.75, explode=explode, colors=[a(0.6), b(0.6), c(0.6), d(0.6)],\r\n# textprops={'fontsize': 16})\r\n# plt.setp(mypie, width=0.5, edgecolor='black')\r\n# mypie2, _ = ax.pie(innerGroupValues, radius=1.2-0.5, labels=innerGroupNames, labeldistance=0.7,\r\n# textprops={'fontsize': 12}, colors=[a(0.4), a(0.2), b(0.4), b(0.2), c(0.4), c(0.2), d(0.4), d(0.2)])\r\n# plt.setp(mypie2, width=0.5, edgecolor='black')\r\n# plt.margins(0,0)\r\n# plt.tight_layout()\r\n# plt.savefig('freePaidPie.png')\r\n\r\n# newdf = df.copy()\r\n# newdf['PriceBand'] = None\r\n# newdf.loc[df.Price == 0, 'PriceBand'] = '0 free'\r\n# newdf.loc[(df.Price > 0) & (df.Price <= 0.99), 'PriceBand'] = '1 cheap'\r\n# newdf.loc[(df.Price > 0.99) & (df.Price <= 2.99), 'PriceBand'] = '2 not cheap'\r\n# newdf.loc[(df.Price > 2.99) & (df.Price <= 4.99), 'PriceBand'] = '3 normal'\r\n# newdf.loc[(df.Price > 4.99) & (df.Price <= 14.99), 'PriceBand'] = '4 expensive'\r\n# newdf.loc[(df.Price > 14.99) & (df.Price <= 29.99), 'PriceBand'] = '5 too expensive'\r\n# newdf.loc[df.Price > 29.99, 'PriceBand'] = '6 astronomical figures'\r\n\r\n# newdf[['PriceBand', 'Rating']].groupby('PriceBand', as_index=False).mean()\r\n# p = sns.catplot(x='PriceBand', y='Rating', data=newdf, kind='boxen', height=10, palette='Pastel1')\r\n# p.set_xticklabels(rotation = 90)\r\n# p.set_ylabels('Rating')\r\n# ax = plt.gca()\r\n# fig = plt.gcf()\r\n# ax.set_title('Rating VS PriceBand')\r\n# fig.set_size_inches(8, 15)\r\n# fig.subplots_adjust(top=0.95, bottom=0.2)\r\n# plt.savefig('pricebandVSrating.png')\r\n\r\n\r\n\r\n# # 价格对于下载量得影响\r\n# trace0 = go.Box(\r\n# y=np.log10(df['Installs'][df.Type == 'Paid']),\r\n# name='Paid',\r\n# marker=dict(color='rgb(214,12,140)')\r\n# )\r\n# trace1 = go.Box(\r\n# y=np.log10(df['Installs'][df.Type == 'Free']),\r\n# name='Free',\r\n# marker=dict(color='rgb(0, 128, 128)')\r\n# )\r\n# layout = go.Layout(\r\n# title='number of downloads of paid apps Vs free apps',\r\n# yaxis={'title': 'numbe of downloads (log-scaled)'}\r\n# )\r\n# data = [trace0, trace1]\r\n# plotly.offline.plot({'data': data, 'layout': layout}, filename='paidVSfree.html')\r\n\r\n# # 各属性相关性分析\r\n# corrMat = df.corr()\r\n# p = sns.heatmap(corrMat, annot=True, cma p=sns.diverging_palette(220, 20, as_cmap=True))\r\n# plt.savefig('correlations.png')\r\n# newdf = df.copy()\r\n# newdf = newdf[newdf.Reviews > 10]\r\n# newdf = newdf[newdf.Installs > 0]\r\n# newdf['Installs'] = np.log10(df['Installs'])\r\n# newdf['Reviews'] = np.log10(df['Reviews'])\r\n# p = sns.lmplot('Reviews', 'Installs', data=newdf)\r\n# fig = plt.gcf()\r\n# fig.set_size_inches(8, 8)\r\n# ax = plt.gca()\r\n# ax.set_title('Number of Reviews Vs Number of Downloads (Log Scaled)')\r\n# plt.savefig('reviewsVSdownloads.png')\r\n\r\n# KMeans-cluster\r\n\r\ndef distEclud(vecA, vecB):\r\n return np.sqrt(np.sum(np.power(vecA - vecB, 2)))\r\n\r\ndef randCent(dataMat, k):\r\n # print('ddddd=', dataMat)\r\n n = np.shape(dataMat)[1]\r\n centroIds = np.mat(np.zeros((k, n)))\r\n for j in range(n):\r\n minJ = min(dataMat[:, j])\r\n rangeJ = float(max(dataMat[:, j]) - minJ)\r\n centroIds[:, j] = np.mat(minJ + rangeJ * np.random.rand(k, 1))\r\n return centroIds\r\n\r\ndef getDistance(dataMat, centList):\r\n m,n = np.shape(centList)\r\n distance = np.zeros((m, np.shape(dataMat)[0]))\r\n for i in range(m):\r\n for j in range(np.shape(dataMat)[0]):\r\n distance[i, j] = distEclud(centList[i], dataMat[j])\r\n distance = np.min(distance, axis=0)\r\n return distance\r\n\r\ndef rollSelect(distance):\r\n n = len(distance)\r\n cnt = np.zeros(n)\r\n p = distance / np.sum(distance)\r\n cumP = np.cumsum(p)\r\n # print(cumP)\r\n for i in range(int(0.15 * n)):\r\n randNum = np.random.random()\r\n for j in range(n):\r\n if cumP[j] >= randNum:\r\n cnt[j] += 1\r\n break\r\n selectIndex = sorted(cnt)[-1]\r\n return selectIndex\r\n\r\ndef plusCent(dataMat, k):\r\n centList = dataMat[np.random.randint(np.shape(dataMat)[0])]\r\n # print(type(centList))\r\n for i in range(k-1):\r\n distance = getDistance(dataMat, centList)\r\n selectIndex = rollSelect(distance)\r\n # print('select:', selectIndex)\r\n centList = np.row_stack((centList, dataMat[int(selectIndex)]))\r\n return centList\r\n\r\ndef kMeans(dataMat, k, distMeans=distEclud, createCent=plusCent):\r\n m = np.shape(dataMat)[0]\r\n clusterAssment = np.mat(np.zeros((m,2)))\r\n centroIds = createCent(dataMat, k)\r\n # print('123124124=',centroIds)\r\n clusterChanged = True\r\n while clusterChanged:\r\n clusterChanged = False\r\n for i in range(m):\r\n minDist = np.inf\r\n minIndex = -1\r\n for j in range(k):\r\n distJI = distMeans(centroIds[j, :], dataMat[i, :])\r\n if distJI < minDist:\r\n minDist = distJI\r\n minIndex = j\r\n if clusterAssment[i, 0] != minIndex: # 0号族是没有记录距离的\r\n clusterChanged = True\r\n clusterAssment[i, :] = minIndex, minDist ** 2\r\n # print(centroIds)\r\n for cent in range(k):\r\n ptsInClust = dataMat[np.nonzero(clusterAssment[:, 0].A == cent)[0]]\r\n centroIds[cent, :] = np.mean(ptsInClust, axis=0)\r\n return centroIds, clusterAssment\r\n\r\ndef biKmeans(dataMat, k ,distMeans=distEclud):\r\n # print(type(dataMat))\r\n m = np.shape(dataMat)[0]\r\n clusterAssment = np.mat(np.zeros((m, 2)))\r\n centroId0 = np.mean(dataMat, axis=0).tolist()[0]\r\n # print(centroId0)\r\n centList = [centroId0]\r\n # print(centList)\r\n for j in range(m):\r\n # print(np.mat(centroId0))\r\n # print(dataMat.iloc[j, :])\r\n clusterAssment[j, 1] = distMeans(np.mat(centroId0), dataMat[j, :]) ** 2\r\n\r\n while len(centList) < k:\r\n lowestSSE = np.inf\r\n for i in range(len(centList)):\r\n ptsInCurrCluster = dataMat[np.nonzero(clusterAssment[:, 0].A == i)[0], :]\r\n # print('pppppp=',ptsInCurrCluster)\r\n centroIdMat, splitClustAss = kMeans(ptsInCurrCluster, 2, distMeans)\r\n sseSplit = np.sum(splitClustAss[:, 1])\r\n sseNotSplit = np.sum(clusterAssment[np.nonzero(clusterAssment[:, 0].A != i)[0], 1])\r\n print('sseSplit, sseNotSplit', sseSplit, sseNotSplit)\r\n if (sseSplit + sseNotSplit) < lowestSSE:\r\n bestCentToSplit = i\r\n bestNewCents = centroIdMat\r\n bestClustAss = splitClustAss.copy()\r\n lowestSSE = sseSplit + sseNotSplit\r\n \r\n bestClustAss[np.nonzero(bestClustAss[:, 0].A == 1)[0], 0] = len(centList)\r\n bestClustAss[np.nonzero(bestClustAss[:, 0].A == 0)[0], 0] = bestCentToSplit\r\n print('the bestCentToSplit is :', bestCentToSplit)\r\n print('the len of bestClustAss is :', len(bestClustAss))\r\n\r\n centList[bestCentToSplit] = bestNewCents[0, :].tolist()[0]\r\n centList.append(bestNewCents[1, :].tolist()[0])\r\n clusterAssment[np.nonzero(clusterAssment[:, 0].A == bestCentToSplit)[0], :] = bestClustAss\r\n return np.mat(centList), clusterAssment\r\n\r\ndef nomalization(x):\r\n Range = max(x) - min(x)\r\n m = min(x)\r\n return [(float(i) - m) / Range for i in x]\r\n\r\nnewdf = df[['Rating', 'Size', 'Price']]\r\nnewdf = newdf.dropna()\r\nnewdf = newdf.loc[(newdf.Price > 0) & (newdf.Price < 5)]\r\nnewdf_copy = newdf.copy()\r\n# print(newdf)\r\n# print(newdf.reset_index())\r\n\r\n\r\n# print(newdf.describe())\r\n# newdf.to_csv('../googlePlayStore/tmpdf.csv')\r\n\r\nnewdf['Rating'] = nomalization(newdf['Rating']) \r\n# newdf['Reviews'] = nomalization(newdf['Reviews']) \r\nnewdf['Size'] = nomalization(newdf['Size'])\r\nnewdf['Price'] = nomalization(newdf['Price'])\r\n# newdf['Installs'] = nomalization(newdf['Installs'])\r\n\r\n# centList, clusterAssment = kMeans(np.mat(newdf), 3)\r\n# # print(clusterAssment)\r\n# clusterAssment = pd.DataFrame(clusterAssment)\r\n# clusterAssment.to_csv('../googlePlayStore/tmp.csv')\r\n# # print('centList=', centList)\r\n# # print('clusterAssment=', clusterAssment)\r\n\r\n# newdf_copy = newdf_copy.reset_index()\r\n# newdf_copy['Cluster'] = clusterAssment[0]\r\n# # print(newdf)\r\n# f = px.scatter_3d(newdf_copy, x='Rating', y='Size', z='Price', color='Cluster')\r\n# plotly.offline.plot(f, filename='clusterKmeans++.html')\r\n\r\n## k值得选择\r\n# sse = [0]\r\n# dfMat = np.mat(newdf)\r\n# for k in range(1,6):\r\n# centList, clusterAssment = biKmeans(dfMat, k)\r\n# sum = 0\r\n# for i in range(np.shape(dfMat)[0]):\r\n# sum += distEclud(centList[int(clusterAssment[i, 0])], dfMat[i])\r\n# sse.append(sum)\r\n# print(sse)\r\n# plt.plot(sse)\r\n# # plt.xticks(range(1,6))\r\n# plt.xlim(1, 5)\r\n# plt.ylim(80, 180)\r\n# plt.xlabel('K')\r\n# plt.ylabel('SSE')\r\n# plt.savefig('selectK.png')\r\n# plt.show()","sub_path":"edaCode/EdaOfPlaystore.py","file_name":"EdaOfPlaystore.py","file_ext":"py","file_size_in_byte":16239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"281120488","text":"import os, re, numpy as np, pandas as pd, tqdm, glob, cv2, matplotlib.pyplot as plt\n\n\"\"\"\nCOMMON_DATASET:\n+ __init__(self, root_path = \".\", cache_name = 'cache.hdf5')\n+ \n\"\"\"\nclass COMMON_DATASET(object):\n def __init__(self, root_path = \".\", cache_name = 'cache.hdf5', \n video_pattern_re = r'gesture_(?P\\d+)[/\\\\]finger_(?P\\d+)[/\\\\]subject_(?P\\d+)[/\\\\]essai_(?P\\d+)',\n path_pattern_re = r'(?Pgesture_\\d+[/\\\\]finger_\\d+[/\\\\]subject_\\d+[/\\\\]essai_\\d+)',\n image_pattern_re = r'depth_(?P\\d+).png',\n video_pattern = 'gesture_*/finger_*/subject_*/essai_*',\n general_info = 'general_information.txt', skeleton_info = 'skeleton_image.txt', skeleton_world_info = 'skeleton_world.txt'):\n \"\"\"\n Load dataset from directory or cache file\n \"\"\"\n self.root_path = os.path.abspath(root_path)\n self.cach_file = os.path.join(root_path, cache_name)\n self.order_join = np.array(['wrist', 'palm', \n 'thumb_base', 'thumb_first_joint', 'thumb_second_joint', 'thumb_tip', \n 'index_base', 'index_first_joint', 'index_second_joint', 'index_tip', \n 'middle_base', 'middle_first_joint', 'middle_second_joint', 'middle_tip', \n 'ring_base', 'ring_first_joint', 'ring_second_joint', 'ring_tip', \n 'pinky_base', 'pinky_first_joint', 'pinky_second_joint', 'pinky_tip'])\n if os.path.exists(self.cach_file) == False:\n self.db_video = COMMON_DATASET.get_db_video(self.root_path, video_pattern_re = video_pattern_re, \n path_pattern_re = path_pattern_re, video_pattern = video_pattern)\n self.db_video.to_hdf(self.cach_file, key='db_video', table=True, mode='w')\n print('Load [db_video] table: Finished!')\n\n self.db_image = COMMON_DATASET.get_db_image(self.root_path, video_pattern_re = video_pattern_re, path_pattern_re = path_pattern_re, \n video_pattern = video_pattern, image_pattern_re = image_pattern_re,\n general_info = general_info, skeleton_info = skeleton_info, skeleton_world_info = skeleton_world_info)\n self.db_image.to_hdf(self.cach_file, key='db_image', table=True, mode='a')\n print('Load [db_image] table: Finished!')\n\n self.load_extra_info()\n self.db_video.to_hdf(self.cach_file, key='db_video', table=True, mode='a')\n print('Save [db_video] table extra info: Finished!')\n else:\n db_file = pd.HDFStore(path=os.path.join(self.cach_file))\n is_video_loaded = '/db_video' in db_file.keys()\n is_image_loaded = '/db_image' in db_file.keys()\n is_extra_loaded = False\n if is_video_loaded == True:\n is_extra_loaded = 'train_test' in db_file['db_video'].keys()\n db_file.close()\n\n if is_video_loaded == True:\n db_file = pd.HDFStore(path=os.path.join(self.cach_file))\n self.db_video = db_file['db_video']\n db_file.close()\n print('Load [db_video] table: Finished!')\n else:\n self.db_video = COMMON_DATASET.get_db_video(self.root_path, video_pattern_re = video_pattern_re, \n path_pattern_re = path_pattern_re, video_pattern = video_pattern)\n self.db_video.to_hdf(self.cach_file, key='db_video', table=True, mode='a')\n print('Save [db_video] table: Finished!')\n is_video_loaded = True\n # is_video_loaded\n\n if is_image_loaded == True:\n db_file = pd.HDFStore(path=os.path.join(self.cach_file))\n self.db_image = db_file['db_image']\n db_file.close()\n print('Load [db_image] table: Finished!')\n else:\n self.db_image = COMMON_DATASET.get_db_image(self.root_path, video_pattern_re = video_pattern_re, path_pattern_re = path_pattern_re, \n video_pattern = video_pattern, image_pattern_re = image_pattern_re,\n general_info = general_info, skeleton_info = skeleton_info, skeleton_world_info = skeleton_world_info)\n self.db_image.to_hdf(self.cach_file, key='db_image', table=True, mode='a')\n print('Save [db_image] table: Finished!')\n is_image_loaded = True\n # is_image_loaded\n\n if is_extra_loaded == True :\n print('Load [db_video] table extra info: Finished!')\n elif is_video_loaded == True:\n self.load_extra_info()\n self.db_video.to_hdf(self.cach_file, key='db_video', table=True, mode='a')\n print('Save [db_video] table extra info: Finished!')\n pass\n # __init__\n \n @staticmethod\n def parse_path(path, video_pattern_re = r'gesture_(?P\\d+)[/\\\\]finger_(?P\\d+)[/\\\\]subject_(?P\\d+)[/\\\\]essai_(?P\\d+)',\n path_pattern_re = r'(?Pgesture_\\d+[/\\\\]finger_\\d+[/\\\\]subject_\\d+[/\\\\]essai_\\d+)'):\n \"\"\"\n Parse path of dataset with template: gesture_*/finger_*/subject_*/essai_*\n output: dictionary {gesture, finger, subject, essai, path}\n \"\"\"\n output = {}\n _pattern_re = re.compile(video_pattern_re)\n output.update(_pattern_re.search(path).groupdict())\n for key in output.keys():\n output[key] = int(output[key])\n _pattern_re = re.compile(path_pattern_re)\n output['path'] = _pattern_re.search(path).groupdict()['path']\n return output\n # def parse_path\n \n @staticmethod\n def get_video_list(root_path, video_pattern_re = r'gesture_(?P\\d+)[/\\\\]finger_(?P\\d+)[/\\\\]subject_(?P\\d+)[/\\\\]essai_(?P\\d+)',\n path_pattern_re = r'(?Pgesture_\\d+[/\\\\]finger_\\d+[/\\\\]subject_\\d+[/\\\\]essai_\\d+)',\n video_pattern = 'gesture_*/finger_*/subject_*/essai_*'):\n \"\"\"\n Get list gesture video sequences from root_path\n Output: dictionary {gesture, finger, subject, essai, path, size}\n \"\"\"\n template_path = os.path.join(root_path, video_pattern)\n list_video_path = glob.glob(template_path)\n video_info_arr = []\n for video_path in list_video_path:\n video_info = COMMON_DATASET.parse_path(video_path, video_pattern_re = video_pattern_re, path_pattern_re = path_pattern_re)\n video_images = glob.glob(os.path.join(video_path, '*.png'))\n video_info['size'] = len(video_images)\n video_info_arr.append(video_info)\n video_info_arr.sort(key = lambda x: (x['gesture'],x['finger'],x['subject'],x['essai']))\n return video_info_arr\n \n @staticmethod\n def get_db_video(root_path, video_pattern_re = r'gesture_(?P\\d+)[/\\\\]finger_(?P\\d+)[/\\\\]subject_(?P\\d+)[/\\\\]essai_(?P\\d+)',\n path_pattern_re = r'(?Pgesture_\\d+[/\\\\]finger_\\d+[/\\\\]subject_\\d+[/\\\\]essai_\\d+)',\n video_pattern = 'gesture_*/finger_*/subject_*/essai_*'):\n \"\"\"\n Analysis root_path of dataset with template: gesture_*/finger_*/subject_*/essai_*\n Parse and load dataset and output pandas table \n + Columns: Gesture, Finger, Subject, Essai, Size (Number of Sequence), Path (Path of gesture)\n \"\"\"\n video_info_arr = COMMON_DATASET.get_video_list(root_path, video_pattern_re = video_pattern_re, path_pattern_re = path_pattern_re, video_pattern = video_pattern)\n a_db = {'gesture':[], 'finger': [], 'subject':[], 'essai':[], 'size':[], 'path' : []}\n for row in tqdm.tqdm(video_info_arr, desc='db_video'):\n for key in a_db.keys():\n a_db[key].append(row[key])\n df_data = None\n mask = ['gesture','finger','subject','essai', 'size', 'path']\n for key in mask:\n df_data = pd.concat([df_data, pd.Series(np.array(a_db[key]), name=key)], axis=1)\n return df_data\n # def list_videos\n\n @staticmethod\n def get_db_image(root_path, video_pattern_re = r'gesture_(?P\\d+)[/\\\\]finger_(?P\\d+)[/\\\\]subject_(?P\\d+)[/\\\\]essai_(?P\\d+)',\n path_pattern_re = r'(?Pgesture_\\d+[/\\\\]finger_\\d+[/\\\\]subject_\\d+[/\\\\]essai_\\d+)',\n video_pattern = 'gesture_*/finger_*/subject_*/essai_*',\n image_pattern_re = r'depth_(?P\\d+).png',\n general_info = 'general_information.txt', skeleton_info = 'skeleton_image.txt', skeleton_world_info = 'skeleton_world.txt'):\n \"\"\"\n Analysis frames and information in a gesture sequnce:\n + get_video_image: db_image with columns: path (use to link to db_video), image_id (name of frame: only id), image_name (fulle name: depth_id.???)\n + get_video_info : time, x1, y1, x2, y2, (dx01, dy01, ..., dx21, dy21), (wx01, wy01,wz01, ..., wx21, wy21, wz21)\n \"\"\"\n video_info_arr = COMMON_DATASET.get_video_list(root_path, video_pattern = video_pattern)\n df_data = None\n for video_info in tqdm.tqdm(video_info_arr, desc='db_image'):\n video_path= video_info[\"path\"]\n df_row_1 = COMMON_DATASET.get_video_image(root_path, video_path, image_pattern_re = image_pattern_re)\n df_row_2 = COMMON_DATASET.get_video_info(root_path, video_path, general_info = general_info, skeleton_info = skeleton_info, skeleton_world_info = skeleton_world_info)\n df_row = pd.concat([df_row_1, df_row_2], axis=1)\n df_data = pd.concat([df_data, df_row], axis=0)\n return df_data\n # def get_video_images\n \n @staticmethod\n def get_video_image(root_path, video_path, image_pattern_re = r'depth_(?P\\d+).png'):\n \"\"\"\n Load frame of gesture video sequence from root_path\n Output db_image with columns: path (use to link to db_video), image_id (name of frame: only id), image_name (fulle name: depth_id.???)\n \"\"\"\n # image_id, image_path\n path = os.path.join(root_path, video_path)\n video_images = glob.glob(os.path.join(path, '*.png'))\n video_images_arr = []\n for image_path in video_images:\n file_name = os.path.basename(image_path)\n _pattern_re = re.compile(image_pattern_re)\n image_id = int(_pattern_re.search(file_name).groupdict()['id'])\n video_images_arr.append({'path':video_path, 'image_id':image_id, 'image_name':file_name})\n video_images_arr.sort(key = lambda x : x['image_id'])\n video_images_dict = {'path':[],'image_id':[],'image_name':[]}\n for row in video_images_arr:\n video_images_dict['path'].append(row['path'])\n video_images_dict['image_id'].append(row['image_id'])\n video_images_dict['image_name'].append(row['image_name'])\n df_data = None\n mask = ['path','image_id','image_name']\n for key in mask:\n df_data = pd.concat([df_data, pd.Series(np.array(video_images_dict[key]), name=key)], axis=1)\n return df_data\n # def get_video_images\n \n @staticmethod\n def get_video_info(root_path, video_path, general_info = 'general_information.txt', skeleton_info = 'skeleton_image.txt', skeleton_world_info = 'skeleton_world.txt'):\n \"\"\"\n Load information in a gesture sequence from 3 files: general_information.txt, skeleton_image.txt, skeleton_world.txt\n + general_information.txt: time, x1, y1, x2, y2 --> time, hand bounding box in depth images\n + skeleton_image.txt: (dx01, dy01, ..., dx21, dy21), 22 position of fingertips in depth space\n + skeleton_world.txt: (wx01, wy01,wz01, ..., wx21, wy21, wz21), 22 position of fingertips in world space\n \"\"\"\n path = os.path.join(root_path, video_path)\n\n hand_bbox_txt = os.path.join(path, general_info)\n # time, x1, y1, x2, y2\n hand_bbox_arr = np.genfromtxt(hand_bbox_txt, delimiter=' ').astype(dtype=np.int64)\n\n skeleton_image_txt = os.path.join(path, skeleton_info)\n skeleton_image_arr = np.genfromtxt(skeleton_image_txt, delimiter=' ').astype(dtype=np.int32)\n\n skeleton_world_txt = os.path.join(path, skeleton_world_info)\n skeleton_world_arr = np.genfromtxt(skeleton_world_txt, delimiter=' ').astype(dtype=np.float)\n\n mask = ['time','x','y','width','height']\n df_data = None\n for cnt in range(0, 5):\n key = mask[cnt]\n df_data = pd.concat([df_data, pd.Series(np.array(hand_bbox_arr[:,cnt]), name=key)], axis=1)\n\n for cnt in range(0, 22):\n key = 'dx%d'%(cnt)\n df_data = pd.concat([df_data, pd.Series(np.array(skeleton_image_arr[:,cnt*2]), name=key)], axis=1)\n key = 'dy%d'%(cnt)\n df_data = pd.concat([df_data, pd.Series(np.array(skeleton_image_arr[:,cnt*2 + 1]), name=key)], axis=1)\n\n for cnt in range(0, 22):\n key = 'wx%d'%(cnt)\n df_data = pd.concat([df_data, pd.Series(np.array(skeleton_world_arr[:,cnt*3]), name=key)], axis=1)\n key = 'wy%d'%(cnt)\n df_data = pd.concat([df_data, pd.Series(np.array(skeleton_world_arr[:,cnt*3 + 1]), name=key)], axis=1)\n key = 'wz%d'%(cnt)\n df_data = pd.concat([df_data, pd.Series(np.array(skeleton_world_arr[:,cnt*3 + 1]), name=key)], axis=1)\n return df_data\n # def get_video_info\n \n def save_db(self):\n \"\"\"\n Save two tables into hdf5 dataset, db_image, db_video\n \"\"\"\n self.db_image.to_hdf(self.cach_file, key='db_image', table=True, mode='w')\n self.db_video.to_hdf(self.cach_file, key='db_video', table=True, mode='a')\n \n def cache_info(self):\n \"\"\"\n Print information tables of a hdf5 file (only test)\n \"\"\"\n db_file = pd.HDFStore(path=os.path.join(self.cach_file))\n print(db_file)\n db_file.close()\n # cache_info\n\n # INHERIT FUNCTION\n def load_extra_info(self): # call after load db_vide, db_image\n \"\"\"\n Call when load for specific dataset as dhg, shrec\n + dhg\n --> need to manual modify db_video --> train_test (randomly 80%,20%)\n --> modify db_video --> from/to has gesture\n + shrec\n --> db_video\n \"\"\"\n pass\n # load_extra_info\n \n def read_view_video(self, video_path, delay = 15, verbose = 1, save_path = None, show = False):\n \"\"\"\n View a gesture sequnce with 5 finger-tip\n \"\"\"\n df_video = self.db_video.loc[self.db_video['path'] == video_path]\n df_video = df_video.iloc[0] if len(df_video)>0 else None\n \n if verbose == 1 and df_video is not None:\n videoWriter = None \n video_title = \"Gesture[%s]\"%(self.gesture_names[df_video[\"gesture\"]])\n\n if save_path is not None:\n save_dir = os.path.dirname(save_path)\n if save_dir!=\"\" and os.path.exists(save_dir) == False: os.makedirs(save_dir)\n # if\n\n for idx in range(df_video[\"size\"]):\n result = self.read_view_image(video_path = video_path, image_order = idx, show = False, verbose = 1)\n image = result[\"depth_view\"]\n # cv2.putText(image,'%d'%(idx + 1),(10,50), cv2.FONT_HERSHEY_SIMPLEX, 2,(255,255,255), 2,cv2.LINE_AA)\n if show==True: cv2.imshow(video_title, image)\n if save_path is not None:\n if videoWriter is None:\n write_path = os.path.join(save_path)\n videoWriter = cv2.VideoWriter(write_path, cv2.VideoWriter_fourcc(*'DIVX'), 20, (image.shape[1],image.shape[0])) # MJPG, DIVX\n videoWriter.write(image)\n if show==True: cv2.waitKey(delay)\n if show==True: cv2.destroyAllWindows()\n # if\n \n return df_video\n # read_view_video\n \n def read_view_image(self, video_path, image_order = 0, figsize=(18,12), verbose = 1, show = True):\n df_images = self.db_image.loc[self.db_image['path'] == video_path]\n bbox_columns = [\"x\",\"y\",\"width\",\"height\"]\n joint_columns = [1, 5, 9, 13, 17, 21] # Palm, Thumb, Index, Middle, Ring, Pinky\n if image_order>=0 and image_order < len(df_images):\n image_name = df_images[\"image_name\"].values[image_order]\n image_dir = df_images[\"path\"].values[image_order]\n image_path = os.path.join(self.root_path, image_dir, image_name)\n bbox_hand = df_images[bbox_columns].values[image_order]\n \n \"\"\" \n 'wrist', 'palm', \n 'thumb_base', 'thumb_first_joint', 'thumb_second_joint', 'thumb_tip', \n 'index_base', 'index_first_joint', 'index_second_joint', 'index_tip', \n 'middle_base', 'middle_first_joint', 'middle_second_joint', 'middle_tip', \n 'ring_base', 'ring_first_joint', 'ring_second_joint', 'ring_tip', \n 'pinky_base', 'pinky_first_joint', 'pinky_second_joint', 'pinky_tip'\n \"\"\"\n joints = [0, 1,\n 2, 3, 4, 5, \n 6, 7, 8, 9,\n 10, 11, 12, 13, \n 14, 15, 16, 17,\n 18, 19, 20, 21] \n columns = []\n for cnt in range(len(joints)):\n px = \"dx{}\".format(joints[cnt])\n py = \"dy{}\".format(joints[cnt])\n columns.append(px)\n columns.append(py)\n skeleton_pos_arr = df_images[columns].values[image_order]\n\n world_columns = []\n for cnt in range(len(joints)):\n px = \"wx{}\".format(joints[cnt])\n py = \"wy{}\".format(joints[cnt])\n pz = \"wz{}\".format(joints[cnt])\n world_columns.append(px)\n world_columns.append(py)\n world_columns.append(pz)\n world_skeleton_pos_arr = df_images[world_columns].values[image_order]\n\n depth_data = cv2.imread(image_path, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR)\n depth_image = np.multiply(depth_data, 255.0 / 2000).astype(np.ubyte)\n depth_view = np.dstack([depth_image,depth_image,depth_image])\n\n if verbose == 1:\n # Draw Bounding Box\n cv2.rectangle(depth_view, (bbox_hand[0],bbox_hand[1]),(bbox_hand[0] + bbox_hand[2],bbox_hand[1] + bbox_hand[3]), (255, 0, 0), 5, 1)\n\n # Draw hand joints\n line_color = (255, 255, 0)\n joint_colors = [(128, 0, 128), (0, 0, 255), \n (255, 0, 0), (255, 0, 0), (255, 0, 0), (255, 0, 0),\n (0, 255, 0), (0, 255, 0), (0, 255, 0), (0, 255, 0),\n (255, 255, 0), (255, 255, 0), (255, 255, 0), (255, 255, 0), \n (255, 0, 255), (255, 0, 255), (255, 0, 255), (255, 0, 255), \n (0, 255, 255), (0, 255, 255), (0, 255, 255), (0, 255, 255)]\n connect = [(0,1),\n (1,2),(2,3),(3,4),(4,5),\n (1,6),(6,7),(7,8),(8,9),\n (1,10),(10,11),(11,12),(12,13),\n (1,14),(14,15),(15,16),(16,17),\n (1,18),(18,19),(19,20),(20,21)]\n for pair in connect:\n x1 = skeleton_pos_arr[pair[0]*2]\n y1 = skeleton_pos_arr[pair[0]*2 + 1]\n x2 = skeleton_pos_arr[pair[1]*2]\n y2 = skeleton_pos_arr[pair[1]*2 + 1]\n cv2.line(depth_view,(x1,y1), (x2,y2), line_color, 2)\n cv2.circle(depth_view, (x1,y1), 5, joint_colors[pair[0]], -1)\n cv2.circle(depth_view, (x2,y2), 5, joint_colors[pair[1]], -1)\n # for\n \n if show == True:\n plt.figure(figsize=figsize)\n plt.imshow(depth_view[...,::-1])\n plt.axis('off')\n plt.show()\n # if\n # if\n\n return {\"depth_data\": depth_data,\n \"bbox_hand\":bbox_hand,\n \"depth_pos\": skeleton_pos_arr, \n \"world_pos\": world_skeleton_pos_arr, \n \"depth_image\": depth_image, \n \"depth_view\": depth_view}\n # if\n pass\n # read_view_image\n# class COMMON_DATASET \n \nclass DHG_DATASET(COMMON_DATASET):\n def __init__(self, root_path = \".\", cache_name = 'cache.hdf5', \n video_pattern_re = r'gesture_(?P\\d+)[/\\\\]finger_(?P\\d+)[/\\\\]subject_(?P\\d+)[/\\\\]essai_(?P\\d+)',\n path_pattern_re = r'(?Pgesture_\\d+[/\\\\]finger_\\d+[/\\\\]subject_\\d+[/\\\\]essai_\\d+)',\n image_pattern_re = r'depth_(?P\\d+).png',\n video_pattern = 'gesture_*/finger_*/subject_*/essai_*',\n general_info = 'general_information.txt', skeleton_info = 'skeleton_image.txt', skeleton_world_info = 'skeleton_world.txt'):\n \"\"\"\n Load or analysis dataset into two tables: \n + self.db_video : Gesture, Finger, Subject, Essai, Size (Number of Sequence), Path (Path of gesture)\n + self.db_images: \n * path (use to link to db_video), image_id (name of frame: only id), image_name (fulle name: depth_id.???)\n * time, x1, y1, x2, y2, (dx01, dy01, ..., dx21, dy21), (wx01, wy01,wz01, ..., wx21, wy21, wz21)\n Load extra information only for dhg dataset: \n + self.db_video : ==> 'from', 'to': at sequence idx, frame from --> to focus sequnce\n \"\"\"\n self.gesture_names = np.array(['','Grab','Tap','Expand','Pinch','Rotation CW','Rotation CCW','Swipe Right','Swipe Left','Swipe Up','Swipe Down','Swipe X','Swipe V','Swipe +','Shake'])\n self.gesture_types = np.array(['','Fine','Coarse','Fine','Fine','Fine','Fine','Coarse','Coarse','Coarse','Coarse','Coarse','Coarse','Coarse','Coarse'])\n\n self.gesture_names_28 = np.array(['','Grab (1)','Grab (2)', 'Tap (1)', 'Tap (2)', \n 'Expand (1)', 'Expand (2)', 'Pinch (1)', 'Pinch (2)', \n 'Rotation CW (1)', 'Rotation CW (2)', 'Rotation CCW (1)', 'Rotation CCW (2)', \n 'Swipe Right (1)', 'Swipe Right (2)','Swipe Left (1)', 'Swipe Left (2)', \n 'Swipe Up (1)', 'Swipe Up (2)', 'Swipe Down (1)', 'Swipe Down (2)',\n 'Swipe X (1)', 'Swipe X (2)', 'Swipe V (1)', 'Swipe V (2)',\n 'Swipe + (1)', 'Swipe + (2)', 'Shake (1)', 'Shake (2)'])\n self.gesture_types_28 = np.array(['','Fine', 'Fine', 'Coarse', 'Coarse',\n 'Fine','Fine', 'Fine','Fine',\n 'Fine','Fine', 'Fine','Fine',\n 'Coarse','Coarse', 'Coarse','Coarse',\n 'Coarse','Coarse', 'Coarse','Coarse',\n 'Coarse','Coarse', 'Coarse','Coarse',\n 'Coarse','Coarse', 'Coarse','Coarse'])\n\n print(\"+ Load DHG Dataset\")\n super().__init__(root_path, cache_name, video_pattern_re = video_pattern_re, path_pattern_re = path_pattern_re, \n image_pattern_re = image_pattern_re, video_pattern = video_pattern,\n general_info = general_info, skeleton_info = skeleton_info, skeleton_world_info = skeleton_world_info)\n # __init__ \n \n def load_extra_info(self): # DHG\n if self.db_video is not None and 'train_test' not in self.db_video.keys():\n self.update_extra_info()\n self.update_train_test()\n pass\n # load\n \n def update_train_test(self, percent = 0.8): # modify with column train_test\n train_num = int(len(self.db_video) * percent)\n test_num = len(self.db_video) - train_num\n train_test_col = np.hstack([np.zeros(train_num, dtype=np.int32),np.ones(test_num, dtype=np.int32)])\n np.random.shuffle(train_test_col)\n df_data = pd.Series(train_test_col, name=\"train_test\")\n self.db_video = pd.concat([self.db_video, df_data], axis=1)\n # def update_train_test\n \n @staticmethod\n def get_dhg_extra_info(root_path):\n info_gesture_txt = os.path.join(root_path, 'informations_troncage_sequences.txt')\n info_gesture_arr = np.genfromtxt(info_gesture_txt, delimiter=' ').astype(dtype=np.int32)\n idx = list(range(len(info_gesture_arr)))\n idx.sort(key = lambda x : (info_gesture_arr[x,0], info_gesture_arr[x,1], info_gesture_arr[x,2], info_gesture_arr[x,3]))\n info_gesture_arr = info_gesture_arr[idx, :]\n mask = ['gesture','finger','subject','essai','from', 'to']\n df_data = None\n for cnt in range(len(mask)):\n key = mask[cnt]\n df_data = pd.concat([df_data, pd.Series(info_gesture_arr[:, cnt], name=key)], axis=1)\n return df_data\n \n def update_extra_info(self):\n df_gesture = DHG_DATASET.get_dhg_extra_info(self.root_path)\n self.db_video = pd.concat([self.db_video, df_gesture[\"from\"], df_gesture[\"to\"]], axis=1)\n# class DHG_DATASET\n\nclass SHREC_DATASET(COMMON_DATASET):\n def __init__(self, root_path = \".\", cache_name = 'cache.hdf5', \n video_pattern_re = r'gesture_(?P\\d+)[/\\\\]finger_(?P\\d+)[/\\\\]subject_(?P\\d+)[/\\\\]essai_(?P\\d+)',\n path_pattern_re = r'(?Pgesture_\\d+[/\\\\]finger_\\d+[/\\\\]subject_\\d+[/\\\\]essai_\\d+)',\n image_pattern_re = r'(?P\\d+)_depth.png', video_pattern = 'gesture_*/finger_*/subject_*/essai_*',\n general_info = 'general_informations.txt', skeleton_info = 'skeletons_image.txt', skeleton_world_info = 'skeletons_world.txt'):\n self.gesture_names = np.array(['', 'Grab','Tap','Expand','Pinch','Rotation CW','Rotation CCW','Swipe Right','Swipe Left','Swipe Up','Swipe Down','Swipe X','Swipe +','Swipe V','Shake'])\n self.gesture_types = np.array(['', 'Fine','Coarse','Fine','Fine','Fine','Fine','Coarse','Coarse','Coarse','Coarse','Coarse','Coarse','Coarse','Coarse'])\n\n\n self.gesture_names_28 = np.array(['', 'Grab (1)', 'Grab (2)', 'Tap (1)', 'Tap (2)', \n 'Expand (1)', 'Expand (2)', 'Pinch (1)', 'Pinch (2)', \n 'Rotation CW (1)', 'Rotation CW (2)', 'Rotation CCW (1)', 'Rotation CCW (2)', \n 'Swipe Right (1)', 'Swipe Right (2)', 'Swipe Left (1)', 'Swipe Left (2)', \n 'Swipe Up (1)', 'Swipe Up (2)', 'Swipe Down (1)', 'Swipe Down (2)', \n 'Swipe X (1)', 'Swipe X (2)', 'Swipe + (1)', 'Swipe + (2)', \n 'Swipe V (1)', 'Swipe V (2)', 'Shake (1)', 'Shake (2)'])\n self.gesture_types_28 = np.array(['', 'Fine','Fine','Coarse','Coarse',\n 'Fine','Fine', 'Fine','Fine',\n 'Fine','Fine', 'Fine','Fine',\n 'Coarse','Coarse', 'Coarse','Coarse',\n 'Coarse','Coarse', 'Coarse','Coarse',\n 'Coarse','Coarse', 'Coarse','Coarse',\n 'Coarse','Coarse', 'Coarse','Coarse'])\n\n print(\"+ Load SHREC Dataset\")\n super().__init__(root_path, cache_name, \n video_pattern_re = video_pattern_re, path_pattern_re = path_pattern_re, \n image_pattern_re = image_pattern_re, video_pattern = video_pattern,\n general_info = general_info, skeleton_info = skeleton_info, skeleton_world_info = skeleton_world_info)\n # __init__\n\n def load_extra_info(self): # SHREC\n if self.db_video is not None and 'train_test' not in self.db_video.keys():\n self.update_extra_info()\n pass\n pass\n # load\n \n @staticmethod\n def get_shrec_extra_info(root_path):\n train_gesture_txt = os.path.join(root_path, 'train_gestures.txt')\n train_gesture_arr = np.genfromtxt(train_gesture_txt, delimiter=' ').astype(dtype=np.int32)\n idx = list(range(len(train_gesture_arr)))\n idx.sort(key = lambda x : (train_gesture_arr[x,0], train_gesture_arr[x,1], train_gesture_arr[x,2], train_gesture_arr[x,3]))\n train_gesture_arr = train_gesture_arr[idx, :]\n train_gesture_lbl = np.array([0 for i in range(len(train_gesture_arr))])\n train_gesture_lbl = train_gesture_lbl.reshape(-1, 1)\n train_gesture_arr = np.hstack([train_gesture_arr, train_gesture_lbl])\n\n test_gesture_txt = os.path.join(root_path, 'test_gestures.txt')\n test_gesture_arr = np.genfromtxt(test_gesture_txt, delimiter=' ').astype(dtype=np.int32)\n idx = list(range(len(test_gesture_arr)))\n idx.sort(key = lambda x : (test_gesture_arr[x,0], test_gesture_arr[x,1], test_gesture_arr[x,2], test_gesture_arr[x,3]))\n test_gesture_arr = test_gesture_arr[idx, :]\n test_gesture_lbl = np.array([1 for i in range(len(test_gesture_arr))])\n test_gesture_lbl = test_gesture_lbl.reshape(-1, 1)\n test_gesture_arr = np.hstack([test_gesture_arr, test_gesture_lbl])\n\n mask = ['gesture','finger','subject','essai', 'label14', 'label28', 'size', 'train_test']\n idx = [4, 5, 7]\n data_gesture = np.vstack([train_gesture_arr[:,idx], test_gesture_arr[:,idx]])\n \n df_data = None\n for cnt in range(len(idx)):\n df_data = pd.concat([df_data, pd.Series(np.array(data_gesture[:,cnt]), name=mask[idx[cnt]])], axis=1)\n return df_data\n # def get_shrec_extra_info\n \n def update_extra_info(self):\n df_gesture = SHREC_DATASET.get_shrec_extra_info(self.root_path)\n self.db_video = pd.concat([self.db_video, df_gesture[\"label14\"], df_gesture[\"label28\"], df_gesture[\"train_test\"]], axis=1)\n # def update_extra_info","sub_path":"dhg_data.py","file_name":"dhg_data.py","file_ext":"py","file_size_in_byte":31194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"30022546","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('msgs', '0011_auto_20160222_1422'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='message',\n name='labels',\n field=models.ManyToManyField(help_text='Labels assigned to this message', to='msgs.Label'),\n ),\n ]\n","sub_path":"casepro/msgs/migrations/0012_message_labels.py","file_name":"0012_message_labels.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"581406814","text":"import numpy as np\n\nfrom dataset import UDDataSet\nfrom gibbs import Gibbs, BetaSqSchedule\nfrom hmm import HMM\n\ntrain_ds = UDDataSet('data/en-ud-train.conllu')\ndev_ds = UDDataSet('data/en-ud-dev.conllu', train_ds)\n\nhmm = HMM(train_ds)\ngibbs = Gibbs(hmm)\n\n\ndef predict(iteration):\n num_total_tag = 0\n num_correct_tag = 0\n\n for sentence in dev_ds.sentences():\n predict_tag = gibbs.sample(sentence, iteration)\n num_total_tag += len(sentence)\n gt_tag = np.array([word[1] for word in sentence.words])\n num_correct_tag += (predict_tag == gt_tag).sum()\n\n # print(\"Tag accuracy: %.4f\" % (num_correct_tag / num_total_tag))\n return num_correct_tag / num_total_tag\n\n\nk = [1, 2, 5, 10, 100, 500, 1000, 2000]\n\ngibbs.beta_schedule = BetaSqSchedule()\n\nfor ite in k:\n print(\"%d & %.4f\\\\\\\\\\\\hline\" % (ite, predict(ite)))\n","sub_path":"hw3/gibbs_run_annealing.py","file_name":"gibbs_run_annealing.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"156415753","text":"# Teste seu código aos poucos.\n# Não teste tudo no final, pois fica mais difícil de identificar erros.\n# Use as mensagens de erro para corrigir seu código.\nval= float(input(\"valor da compra:\"))\n\ndesc = val - val*5/100\n\nif (val>=200):\n\tprint(round(desc, 2))\nelse:\n\tprint(round(val, 2))","sub_path":"5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/224/users/4365/codes/1649_869.py","file_name":"1649_869.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"385008842","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom .basic import BasicHandler, login\n\n\nclass Case1Handler(BasicHandler):\n\n @login\n def get(self):\n page = self.get_argument('page', 1)\n extra_info = self.user_ins.fetch_user_info_by_nickname(self.current_user)\n data_info_list = self.case_ins.fetch_case3_list(page)\n total_page = (self.case_ins.count_case3_num() + self.case_ins.count - 1)/self.case_ins.count\n self.render('case1.html', extra_info=extra_info, total_page=int(total_page), cur_page=int(page),\n data_info_list=data_info_list)\n\n\nclass Case1CreateHandler(BasicHandler):\n @login\n def get(self):\n import xlrd\n xlsx = xlrd.open_workbook('./sourcedata/covdataadddate.xlsx')\n sheet = xlsx.sheets()[2]\n for i in range(1, sheet.nrows):\n swap = []\n for j in range(len(sheet.row_values(i))):\n if j == 1:\n swap.append(int(sheet.row_values(i)[j]))\n else:\n swap.append(sheet.row_values(i)[j])\n self.case_ins.insert_case3_data(tuple(swap))\n return self.write(\"添加新数据成功!!\")\n\n\nclass SearchCase1Handler(BasicHandler):\n\n @login\n def get(self):\n p = self.get_argument(\"p\", None)\n if not p:\n return\n extra_info = self.user_ins.fetch_user_info_by_nickname(self.current_user)\n tmp_list = self.case_ins.search_case3_info_by_p(p)\n data_info_list = list()\n for item in tmp_list:\n data_info_list.append(item)\n self.render('case1.html', extra_info=extra_info, total_page=1, cur_page=1,\n data_info_list=data_info_list)\n\n\nclass DelCase1Handler(BasicHandler):\n @login\n def post(self):\n id = self.get_argument(\"data_id\", \"\")\n if not id:\n return self.write(\"要删除的数据不存在!!\")\n self.case_ins.del_case3_data_by_id(id)\n return self.write(\"删除数据成功!!\")\n\n\nclass EditCase1Handler(BasicHandler):\n @login\n def get(self):\n id = self.get_argument(\"data_id\", \"\")\n extra_info = self.user_ins.fetch_user_info_by_nickname(self.current_user)\n data_info_list = self.case_ins.search_case3_info_by_id(id)\n self.render('case2.html', extra_info=extra_info, total_page=1, cur_page=1,\n data_info_list=data_info_list)\n\n def post(self):\n id = self.get_argument(\"data_id\", \"\")\n province = self.get_argument(\"province\")\n confirm = self.get_argument(\"confirm\")\n dead = self.get_argument(\"dead\")\n heal = self.get_argument(\"heal\")\n newconfirm = self.get_argument(\"newconfirm\")\n newheal = self.get_argument(\"newheal\")\n newdead = self.get_argument(\"newdead\", '')\n desp = self.get_argument(\"desp\", '')\n self.case_ins.update_case3_by_id(id, (province,confirm, dead, heal, newconfirm, newheal, newdead, desp))\n return self.write(\"修改数据成功,请手动返回数据管理页面!!\")\n\n","sub_path":"0608/handlers/case.py","file_name":"case.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"229903683","text":"import json\n\n\ndef open_osm_json(file_name, output_fn=None):\n \"\"\" Open OSM while where all objects represented as\n JSON objects.\n\n >> open_osm_json('holland.json', tag_keys_to_text)\n \"\"\"\n\n with open(file_name) as f:\n for line in f:\n obj = json.loads(line)\n\n if output_fn:\n yield output_fn(obj)\n else:\n yield obj\n\ndef tag_keys_to_text(osm_obj):\n return ' '.join(osm_obj.iterkeys())\n\n","sub_path":"first_attempts/misc/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"594436136","text":"__author__ = 'yunbo'\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nclass SpatioTemporalLSTMCell(nn.Module):\n def __init__(self, in_channel, num_hidden, width, filter_size, stride, layer_norm):\n super(SpatioTemporalLSTMCell, self).__init__()\n print(\"in_channel=\",in_channel,\"num_hidden=\",num_hidden,\"width=\",width,\"layer_norm=\",layer_norm)\n self.num_hidden = num_hidden\n self.padding = filter_size // 2\n self._forget_bias = 1.0\n# in_channel= 16 num_hidden= 64 width= 16 layer_norm= 1\n# in_channel= 64 num_hidden= 64 width= 16 layer_norm= 1\n# in_channel= 64 num_hidden= 64 width= 16 layer_norm= 1\n# in_channel= 64 num_hidden= 64 width= 16 layer_norm= 1\n# cell_list= (4,)\n self.conv_x = nn.Sequential(\n nn.Conv2d(in_channel, num_hidden * 7, kernel_size=filter_size, stride=stride, padding=self.padding),\n nn.LayerNorm([num_hidden * 7, width, width])\n )\n self.conv_h = nn.Sequential(\n nn.Conv2d(num_hidden, num_hidden * 4, kernel_size=filter_size, stride=stride, padding=self.padding),\n nn.LayerNorm([num_hidden * 4, width, width])\n )\n self.conv_m = nn.Sequential(\n nn.Conv2d(num_hidden, num_hidden * 3, kernel_size=filter_size, stride=stride, padding=self.padding),\n nn.LayerNorm([num_hidden * 3, width, width])\n )\n self.conv_o = nn.Sequential(\n nn.Conv2d(num_hidden * 2, num_hidden, kernel_size=filter_size, stride=stride, padding=self.padding),\n nn.LayerNorm([num_hidden, width, width])\n )\n self.conv_last = nn.Conv2d(num_hidden * 2, num_hidden, kernel_size=1, stride=1, padding=0)\n\n#LayerNorm:channel方向做归一化,算CHW的均值,主要对RNN作用明显;https://blog.csdn.net/shanglianlm/article/details/85075706\n def forward(self, x_t, h_t, c_t, m_t):\n# print(\"forward SpatioTemporalLSTMCell\")\n# print(\"x_t\",x_t.size())\n# print(\"h_t\",h_t.size())\n# print(\"c_t\",c_t.size())\n# print(\"m_t\",m_t.size())\n# x_t (8, 16, 16, 16)\n# h_t (8, 64, 16, 16)\n# c_t (8, 64, 16, 16)\n# m_t (8, 64, 16, 16)\n# print(\"上一步驟做h和m做conv x也做conv---\")\n x_concat = self.conv_x(x_t)\n h_concat = self.conv_h(h_t)\n m_concat = self.conv_m(m_t)\n \n# print(\"x_concat\",x_concat.size())\n# print(\"h_concat\",h_concat.size())\n# print(\"m_concat\",m_concat.size())\n# x_concat torch.Size([8, 448, 16, 16])\n# h_concat torch.Size([8, 256, 16, 16])\n# m_concat torch.Size([8, 192, 16, 16])\n# print(\"x分割---\")\n i_x, f_x, g_x, i_x_prime, f_x_prime, g_x_prime, o_x = torch.split(x_concat, self.num_hidden, dim=1)\n# print(\"i_x\",i_x.size())\n# print(\"f_x\",f_x.size())\n# print(\"g_x\",g_x.size())\n# print(\"i_x_prime\",i_x_prime.size())\n# print(\"f_x_prime\",f_x_prime.size())\n# print(\"g_x_prime\",g_x_prime.size()) \n# print(\"o_x\",o_x.size()) \n# print(\"h分割---\")\n# i_x torch.Size([8, 64, 16, 16])\n# f_x torch.Size([8, 64, 16, 16])\n# g_x torch.Size([8, 64, 16, 16])\n# i_x_prime torch.Size([8, 64, 16, 16])\n# f_x_prime torch.Size([8, 64, 16, 16])\n# g_x_prime torch.Size([8, 64, 16, 16])\n# o_x torch.Size([8, 64, 16, 16])\n \n i_h, f_h, g_h, o_h = torch.split(h_concat, self.num_hidden, dim=1)#T-1\n i_m, f_m, g_m = torch.split(m_concat, self.num_hidden, dim=1)\n# print(\"i_h\",i_h.size(), \"f_h\",f_h.size(),\"g_h\",g_h.size(),\"o_h\",o_h.size())\n# i_h torch.Size([8, 64, 16, 16]) f_h torch.Size([8, 64, 16, 16]) g_h torch.Size([8, 64, 16, 16]) o_h torch.Size([8, 64, 16, 16])\n# print(\"m分割---\")\n# print(\"i_m\",i_m.size(), \"f_m\",f_m.size(),\"g_m\",g_m.size())\n# i_m torch.Size([8, 64, 16, 16]) f_m torch.Size([8, 64, 16, 16]) g_m torch.Size([8, 64, 16, 16])\n i_t = torch.sigmoid(i_x + i_h)\n f_t = torch.sigmoid(f_x + f_h + self._forget_bias)\n g_t = torch.tanh(g_x + g_h)\n\n c_new = f_t * c_t + i_t * g_t\n \n# print(\"t上面---\")\n# print(\"i_t\",i_t.size())\n# print(\"f_t\",f_t.size())\n# print(\"g_t\",g_t.size()) \n# print(\"c_new\",c_new.size()) \n i_t_prime = torch.sigmoid(i_x_prime + i_m)\n f_t_prime = torch.sigmoid(f_x_prime + f_m + self._forget_bias)\n g_t_prime = torch.tanh(g_x_prime + g_m)\n# i_t torch.Size([8, 64, 16, 16])\n# f_t torch.Size([8, 64, 16, 16])\n# g_t torch.Size([8, 64, 16, 16])\n# c_new torch.Size([8, 64, 16, 16])\n\n\n m_new = f_t_prime * m_t + i_t_prime * g_t_prime\n# print(\"t下面---\")\n# print(\"i_t_prime\",i_t_prime.size())\n# print(\"f_t_prime\",f_t_prime.size())\n# print(\"g_t_prime\",g_t_prime.size()) \n# print(\"m_new\",m_new.size()) \n# i_t_prime torch.Size([8, 64, 16, 16])\n# f_t_prime torch.Size([8, 64, 16, 16])\n# g_t_prime torch.Size([8, 64, 16, 16])\n# m_new torch.Size([8, 64, 16, 16])\n \n mem = torch.cat((c_new, m_new), 1)\n o_t = torch.sigmoid(o_x + o_h + self.conv_o(mem))\n h_new = o_t * torch.tanh(self.conv_last(mem))\n# print(\"輸出---\")\n# print(\"mem\",mem.size())\n# print(\"o_t\",o_t.size()) \n# print(\"h_new\",h_new.size()) \n# mem torch.Size([8, 128, 16, 16])\n# o_t torch.Size([8, 64, 16, 16])\n# h_new torch.Size([8, 64, 16, 16])\n# h_t_0= (8, 64, 16, 16) c_t_0= (8, 64, 16, 16) memory= (8, 64, 16, 16)\n return h_new, c_new, m_new\n\n\n\n\n\n\n\n\n\n","sub_path":"core/layers/SpatioTemporalLSTMCell_LayerNorm.py","file_name":"SpatioTemporalLSTMCell_LayerNorm.py","file_ext":"py","file_size_in_byte":5682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"632002176","text":"# Name: Jacob Wren\n# Course: CPE 101\n# Instructor: Irene Humer\n# Assignment: Moonlander Project\n# Term: Winter 2020\n\n\ndef main():\n show_welcome()\n get_altitude_ = get_altitude()\n get_fuel_ = get_fuel()\n display_state(0, get_altitude_, 0, get_fuel_, 0)\n f_r = get_fuel_rate(get_fuel_)\n time = 0\n z = update_fuel(get_fuel_, 0)\n p = update_velocity(0, 0)\n q = update_acceleration(1.62, f_r)\n k = get_altitude_\n \n\n while k > 0:\n #n = n + 1\n z = update_fuel(z, f_r)\n k = update_altitude(k, p, update_acceleration(1.62, f_r))\n p = update_velocity(p, update_acceleration(1.62, f_r))\n \n time = time + 1\n if z < 0:\n z = 0\n display_state(time, k, p, z, f_r)\n if k == 0:\n display_landing_status(p)\n if z > 0 and k > 0:\n f_r = get_fuel_rate(z)\n else:\n f_r = 0\n \n \ndef show_welcome() -> None:\n print(\"\\nWelcome aboard the Lunar Module (LM) Flight Simulator!\\n\")\n print(\" To begin, provide an initial altitude and fuel amount.\")\n print(\" To simulate the actual LM, use 1300 meters and 500 liters.\\n\")\n \n \ndef get_fuel() -> int:\n fuel = int(input(\"Enter initial fuel amount (in liters): \"))\n while fuel < 1:\n print(\"[ERROR] Fuel amount must be positive\")\n fuel = int(input(\"Enter initial fuel amount (in liters): \"))\n return fuel\n \n\ndef get_altitude() -> int:\n altitude = int(input(\"Enter initial altitude (1 to 9999 meters): \"))\n while altitude < 1 or altitude > 9999:\n print(\"[ERROR] Altitude out of range\")\n altitude = int(input(\"Enter initial altitude (1 to 9999 meters): \"))\n return altitude\n \n \ndef get_fuel_rate(fuel_amount: int) -> int:\n fuel_rate = int(input(\"Enter fuel rate (0 to 9 liters per second): \"))\n while fuel_rate < 0 or fuel_rate > 9:\n print(\"[ERROR] Fuel rate out of range\")\n fuel_rate = int(input(\"Enter fuel rate (0 to 9 liters per second): \"))\n return min(fuel_amount, fuel_rate)\n\n \ndef update_fuel(fuel_amount: int, fuel_rate: int) -> int:\n fuel_Now = fuel_amount - fuel_rate\n if fuel_Now < 0:\n fuel_Now = 0\n return fuel_Now\n \n \ndef update_acceleration(gravity: float, fuel_rate: int) -> float:\n acceleration = gravity * ((fuel_rate / 5) - 1)\n return acceleration\n \n\ndef update_velocity(velocity: float, acceleration: float) -> float:\n velocity = velocity + acceleration \n return velocity\n \n\ndef update_altitude(altitude: float,\n velocity: float,\n acceleration: float) -> float:\n altitude = altitude + velocity + (acceleration / 2)\n if altitude < 0:\n altitude = 0\n return altitude\n\n\ndef display_state(time: int,\n altitude: float,\n velocity: float,\n fuel_amount: int,\n fuel_rate: int) -> None:\n if altitude <= 0:\n print()\n print(\"LM state at landing/impact\")\n print(\" Time: {0:4} s\\n Fuel: {1:4} l\\n Rate: {2:4} l/s\\nAltitude: {3:7.2f} m\\nVelocity: {4:7.2f} m/s\".format(time, fuel_amount, fuel_rate, altitude, velocity))\n print()\n elif time == 0:\n print()\n print(\"LM state at retrorocket cutoff\")\n print(\" Time: {0:4} s\\n Fuel: {1:4} l\\n Rate: {2:4} l/s\\nAltitude: {3:7.2f} m\\nVelocity: {4:7.2f} m/s\".format(time, fuel_amount, fuel_rate, altitude, velocity))\n print()\n elif fuel_amount <= 0:\n print(\"[OUT OF FUEL]\", f\"Time: {time: 4}\", f\"Altitude: {altitude:7.2f}\", f\"Velocity: {velocity: 7.2f}\")\n else:\n print(\" Time: {0:4} s\\n Fuel: {1:4} l\\n Rate: {2:4} l/s\\nAltitude: {3:7.2f} m\\nVelocity: {4:7.2f} m/s\".format(time, fuel_amount, fuel_rate, altitude, velocity))\n print()\n\n \ndef display_landing_status(velocity: float) -> None:\n if -1 <= velocity <= 0:\n print(\"[LANDING STATUS] The Eagle has landed!\")\n elif -10 < velocity < -1:\n print(\"[LANDING STATUS] Enjoy your oxygen while it lasts!\")\n elif velocity <= -10:\n print(\"[LANDING STATUS] Ouch, that hurt!\")\n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Projects/Project1/moonlander.py","file_name":"moonlander.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"235376851","text":"\"\"\"teamproject URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path,include\nimport blog.views\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', blog.views.loginpage, name=\"login\"),\n path('accounts/',include('allauth.urls')),\n path('home',blog.views.home, name=\"home\"),\n path('profile',blog.views.profile, name=\"profile\"),\n path('team/', blog.views.team, name=\"team\"),\n path('timetable', blog.views.timetable, name=\"timetable\"),\n path('timetable_edit', blog.views.timetable_edit, name=\"timetable_edit\"),\n path('team/new/',blog.views.new, name=\"new\"),\n path('team/create/',blog.views.create,name=\"create\"),\n path('/dday/new', blog.views.dday_new, name=\"dday_new\"),\n path('signup/', blog.views.signup, name='signup'),\n path('login/', blog.views.login, name='login'),\n path('logout/', blog.views.logout, name='logout'),\n path('table/', blog.views.table, name='table'),\n path('loginpage/',blog.views.loginpage, name='loginpage'),\n\n]\n","sub_path":"teamproject/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"632384001","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 5 21:35:44 2019\n\n@author: Administrator\n\"\"\"\nfrom sma_strategy import *\n\nif __name__ == 'main':\n short_lst = [7, 20, 50]\n long_lst = [100, 200, 300]\n return_result = []\n for i in short_lst:\n for j in long_lst:\n return_result.append(runDouble(path='./data/rb.csv', prod='rb', plot=False, sma_short=i, sma_long=j))\n print(return_result)\n\n# 最好的结果是7-300-SMA-Crossover策略","sub_path":"parameter_tune.py","file_name":"parameter_tune.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"294767163","text":"import socket, json, pyautogui\r\n\r\nip = \"192.168.0.3\"\r\nport = 4444\r\n\r\nlistener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nlistener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\nlistener.bind((ip, port))\r\nlistener.listen(0)\r\nprint(\"[+] Waiting for incoming connections\")\r\nconnection, address = listener.accept()\r\nprint(\"[+] Got a connection from \" + str(address))\r\n\r\nprev_button = \"\"\r\n\r\ndef reliable_recieve():\r\n json_data = \"\"\r\n while True:\r\n try:\r\n json_data += connection.recv(1024).decode()\r\n return json.loads(json_data)\r\n except ValueError:\r\n continue\r\nwhile True:\r\n try:\r\n data = reliable_recieve().split(\":\")\r\n pyautogui.moveTo(int(float(data[0])), int(float(data[1])))\r\n if prev_button == data[2] == \"left\":\r\n pyautogui.doubleClick()\r\n elif data[2] == \"left\":\r\n pyautogui.click()\r\n elif data[2] == \"right\":\r\n pyautogui.click(button=\"right\")\r\n prev_button = data[2]\r\n print(data)\r\n except:\r\n connection.close()\r\n print(\"[-] Closing...\")\r\n break","sub_path":"gyromouse_listener.py","file_name":"gyromouse_listener.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"74284065","text":"\"\"\"ROSARIO VALERO MIRANDA - 1º DAW - PRACTICA5 - EJERCICIO 2\r\nEscriu un programa que te demani nombres i els guardi en una llista.\r\nPer a terminar d'introduir nombres, simplement pitja Enter.\r\nEl programa termina escrivint la llista de nombres\"\"\"\r\n\r\nprint(\"Introduce un número\")\r\nnum=int(input())\r\n\r\nlista=[]\r\n\r\nwhile num!=\"\":\r\n lista.append(num)\r\n print(\"Introduce otro número\")\r\n num=input()\r\nprint(lista)\r\n","sub_path":"Ejercicios-Pr5/ejercicio2.py","file_name":"ejercicio2.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"166270350","text":"from urllib.request import urlopen\nfrom io import StringIO\nimport csv\n\ndata = urlopen(\"http://pythonscraping.com/files/MontyPythonAlbums.csv\")\\\n .read().decode(\"utf-8\", 'ignore')\ndataFile = StringIO(data)\ndictReader = csv.DictReader(dataFile)\n\nprint(dictReader.fieldnames)\nprint(\"----------------\")\n\nfor row in dictReader:\n print(row['Name'] + \"--书写日期--:\" + row['Year'])\n","sub_path":"python网络数据采集/chapter6-1.py","file_name":"chapter6-1.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"261161649","text":"import random\nfrom datetime import datetime, date, time\n\nstartDate = date(date.today.year, 9, 14)\n\ndef get_teams(teams, n_of_lists = 4):\n n = len(teams)-1\n \n return_teams = [[],[],[],[]]\n for i in range(n+1):\n m = random.randint(0,n-i)\n return_teams[i%n_of_lists].append(teams[m])\n del teams[m]\n return return_teams\n\nte = [\"A\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\"]\nprint(get_teams(te))\n\n","sub_path":"task_sixteen.py","file_name":"task_sixteen.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"430378562","text":"import re\n\nfrom jarmodmoo.config import INTERNAL_SERVICE_PREFIX\n\nSERVICE_HELP = \"\"\"Service calls are entered like: %(prefix)sSERVICE [ARGS]\n\nExample:\n\n %(prefix)sservice [\"echo\"]\n\"\"\" % {\"prefix\": INTERNAL_SERVICE_PREFIX}\n\n\nclass NoServiceCall(Exception):\n pass\n\n_service_call_regex_no_args = re.compile(r\"^mmi\\.(\\w+)$\")\n_service_call_regex_args = re.compile(r\"^mmi\\.(\\w+) (.*)$\")\n\ndef parse_service_call(s):\n \"\"\"Parse service call from string.\n\n If no args are given, they will default to ``[\"\"]``.\n\n :param s: Input string from user.\n :returns: A two-tuple ``(SERVICE, ARGS)``.\n :raises NoServiceCall:\n \"\"\"\n match_no_args = _service_call_regex_no_args.match(s.strip())\n match_args = _service_call_regex_args.match(s.strip())\n if not (match_no_args or match_args):\n raise NoServiceCall(\"not a service call\")\n if match_args:\n call = match_args.group(1)\n args = match_args.group(2)\n try:\n evaled_args = eval(args)\n except Exception:\n raise NoServiceCall(\"invalid args\")\n return (INTERNAL_SERVICE_PREFIX + call, evaled_args)\n else:\n call = match_no_args.group(1)\n args = [\"\"]\n return (INTERNAL_SERVICE_PREFIX + call, args)\n","sub_path":"example/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"156678419","text":"# Tu napíšte svoj kód :-)\nWIDTH = 288\nHEIGHT = 512\nTITLE = 'flap.py'\nGRAVITY = 0.3\nKICK = -6.5\n\nflappy = Actor('flappy')\nflappy.x = WIDTH / 2\nflappy.y = HEIGHT / 2\nflappy.vy = 0\n\nhorna_trubka = Actor('pipe.upper')\nhorna_trubka.left = WIDTH\n\ndef update():\n flappy.vy = flappy.vy + GRAVITY\n flappy.y = flappy.y + flappy.vy\n if flappy.bottom > HEIGHT:\n print('ta si si narazil brusko')\n quit()\n\n if flappy.top < 0:\n print('ta si si narazil hlavicku')\n quit()\n\n horna_trubka.x = horna_trubka.x - 1\n if horna_trubka.right < 0:\n horna_trubka.left = WIDTH\n\n if flappy.colliderect(horna_trubka):\n print('ta hlavou trubku neprerazis')\n quit()\n\ndef draw():\n screen.blit('background', (0, 0))\n flappy.draw()\n horna_trubka.draw()\n\ndef on_key_down():\n flappy.vy = KICK\n","sub_path":"2023/07-detska.univerzita/resources/flappy/group-2.py","file_name":"group-2.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"530866862","text":"__author__ = 'Zilun Shen'\n\n\n# Factor Covariance Matrix Estimation\n\nfrom .factor_cov import FactorCovEstimator\nimport numpy as np\n\ndef get_cov(X):\n \"\"\"Compute the factor covariance matrix from the set of daily factor returns.\n\n :X: N-by-F numpy 2d array, where N is the number of `time units` in the window, and F is the number of factors\n by convention, from top to bottom, factor return vectors are aligned in descending order in time. (i.e., top\n row means the latest factor return)\n :returns: F-by-F numpy 2d array\n \"\"\"\n est = FactorCovEstimator('vanilla_EWMA', 'vanilla_EWMA')\n # Should call fit\n n_obs, n_feature = X.shape\n wgts = est._get_ewma_weights(hl=est._hl, tau=n_obs-1) # `tau` means how many time units from now\n wgts /= np.sum(wgts) # normalize weights\n mu = wgts.dot(X)\n var = wgts.dot(X**2) - mu ** 2\n D = np.diag(np.sqrt(var))\n corr = est._ewma(X)\n cov = D.dot(corr).dot(D)\n # cov = var.T.dot(corr).dot(var)\n return var, corr, cov\n","sub_path":"factor_mining/factor_return_cov.py","file_name":"factor_return_cov.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"262034627","text":"import sys\r\nsys.path.append('/var/www/flask_web_server')\r\nfrom chatServer import db\r\n\r\n\r\nclass Message(db.EmbeddedDocument, db.Document):\r\n meta = {'collection': 'message'}\r\n\r\n message = db.StriengField()\r\n from_user = db.StringField()\r\n to_user = db.StringField()\r\n channel_id = db.IntField()\r\n message_datetime = db.DateTimeField()","sub_path":"flask_web_server/models/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"316121185","text":"import tweepy\nfrom tweepy import OAuthHandler,Stream\nfrom tweepy.streaming import StreamListener\nimport json\n\nconsumer_key = '############################'\nconsumer_secret = '###########################'\naccess_token = '###########################'\naccess_secret = '###########################'\n\nauth = OAuthHandler(consumer_key,consumer_secret)\nauth.set_access_token(access_token,access_secret)\n\napi = tweepy.API(auth)\n\nclass MyStreamListener(StreamListener):\n\n\tdef on_data(self,data):\n\t\ttry:\n\t\t\twith open('Maggi.json','a') as f:\n\t\t\t\tf.write(data)\n\t\t\t\treturn True\n\t\texcept BaseException as e:\n\t\t\tprint(\"Error on_data: %s\" % str(e))\n\t\treturn True\n\n\tdef on_error(self,status):\n\t\tprint(status)\n\t\treturn True\n\ntwitter_stream = Stream(auth, MyStreamListener())\ntwitter_stream.filter(track = ['#Maggi'])","sub_path":"Livestreaming.py","file_name":"Livestreaming.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"477740184","text":"from django.http import Http404\nfrom rest_framework.views import APIView\n\nfrom api.models import *\nfrom api.paginations import ManilaCursorPagination\n\n\nclass PaginationAPIView(APIView):\n \"\"\"\n APIView with pagination\n \"\"\"\n pagination_class = ManilaCursorPagination\n\n @property\n def paginator(self):\n \"\"\"\n The paginator instance associated with the view, or `None`.\n \"\"\"\n if not hasattr(self, '_paginator'):\n if self.pagination_class is None:\n self._paginator = None\n else:\n self._paginator = self.pagination_class()\n return self._paginator\n\n def paginate_queryset(self, queryset):\n \"\"\"\n Return a single page of results, or `None` if pagination is disabled.\n \"\"\"\n if self.paginator is None:\n return None\n return self.paginator.paginate_queryset(queryset, self.request, view=self)\n\n def get_paginated_response(self, data):\n \"\"\"\n Return a paginated style `Response` object for the given output data.\n \"\"\"\n assert self.paginator is not None\n return self.paginator.get_paginated_response(data)\n\n\nclass UsersAPIView(APIView):\n def get_profile(self, username):\n try:\n queryset = UserProfile.objects \\\n .extra(\n select={\n 'reputation': \"SELECT COALESCE(SUM(api_reputation.reputation), 0) FROM api_reputation \"\n \"INNER JOIN api_user ON (api_reputation.target_user_id=api_user.id) \"\n \"WHERE LOWER(api_user.username)=LOWER('{0}')\".format(username),\n 'friends_count': \"SELECT COUNT(*) FROM api_follow \"\n \"INNER JOIN api_user ON (api_follow.user_id=api_user.id) \"\n \"WHERE LOWER(api_user.username)=LOWER('{0}')\".format(username),\n 'followers_count': \"SELECT COUNT(*) FROM api_follow \"\n \"INNER JOIN api_user ON (api_follow.follow_id=api_user.id) \"\n \"WHERE LOWER(api_user.username)=LOWER('{0}')\".format(username)\n })\n\n if self.request.user.id:\n queryset = queryset.extra(select={\n 'is_following': \"SELECT EXISTS (SELECT api_follow.id FROM api_follow \"\n \"INNER JOIN api_user ON (api_follow.follow_id=api_user.id) \"\n \"WHERE api_follow.user_id={0} AND LOWER(api_user.username)=LOWER('{1}'))\".format(self.request.user.id, username)\n })\n\n return queryset.get(user__username__iexact=username)\n except UserProfile.DoesNotExist:\n raise Http404\n\n\nclass PostsAPIView(PaginationAPIView):\n def get_queryset(self):\n if self.request.user.id:\n return Post.objects.authenticated(self.request.user)\n return Post.objects.guest()\n\n\nclass ExplorePostsAPIView(PostsAPIView):\n def get_queryset(self):\n user_primary_languages = UserLanguage.objects\\\n .filter(user=self.request.user, is_primary=True)\\\n .values('language')\n\n user_secondary_languages = UserLanguage.objects\\\n .filter(user=self.request.user, is_primary=False)\\\n .values('language')\n\n # user1 = en : kr\n # user2 = kr : en\n a1 = UserLanguage.objects\\\n .filter(Q(language__in=user_secondary_languages,\n is_primary=True)).values('user')\n\n a2 = UserLanguage.objects\\\n .filter(Q(language__in=user_primary_languages,\n is_primary=False)).values('user')\n\n a3 = a1.intersection(a2)\n\n # user1 = en, kr : any\n # user2 = en : kr\n b1 = UserLanguage.objects\\\n .filter(Q(language__in=user_primary_languages,\n is_primary=True)).values('user')\n\n b2 = UserLanguage.objects\\\n .filter(Q(language__in=user_primary_languages,\n is_primary=False)).values('user')\n\n b3 = b1.intersection(b2)\n\n # user1 = en : kr\n # user2 = en, kr : any\n c1 = UserLanguage.objects\\\n .filter(Q(language__in=user_primary_languages,\n is_primary=True)).values('user')\n\n c2 = UserLanguage.objects\\\n .filter(Q(language__in=user_secondary_languages,\n is_primary=True)).values('user')\n\n c3 = c1.intersection(c2)\n\n users = a3.union(b3).union(c3)\n\n queryset = super().get_queryset()\\\n .filter(Q(user__in=users) | Q(user=self.request.user))\n\n return queryset\n\n\nclass CommentsAPIView(PaginationAPIView):\n def get_queryset(self):\n if self.request.user.id:\n return Comment.objects.authenticated(self.request.user)\n return Comment.objects.guest()\n\n\nclass CorrectionsAPIView(PaginationAPIView):\n def get_queryset(self):\n if self.request.user.id:\n return Correction.objects.authenticated(self.request.user)\n return Correction.objects.guest()\n\n\nclass CorrectionCommentsAPIView(PaginationAPIView):\n def get_queryset(self):\n queryset = CorrectionComment.objects\\\n .select_related('user__userprofile', 'user__userprofile__country') \\\n .prefetch_related(Prefetch(\"user__languages__language\", queryset=Language.objects.all()))\\\n .filter(is_deleted=False)\n\n return queryset\n","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"257699622","text":"from slackbot.bot import respond_to\n# from slackbot.bot import listen_to\nfrom slackbot.bot import default_reply\nimport random\nfrom zikudriver import rider_time\nfrom RiderDriver.ooo import OOO\n\n\n@respond_to('変身!')\ndef random_Driver(message):\n rider = random.choice([2010, 2013, 2015, 2016, 2017, 2018])\n change = ''\n if rider == 2010:\n change = OOO()\n elif rider == 2013:\n change = open('./RiderDriver/Gaim.txt', encoding=\"utf-8\").readlines()\n change = random.choice(change)\n elif rider == 2015:\n change = open('./RiderDriver/Ghost.txt', encoding=\"utf-8\").readlines()\n change = random.choice(change)\n elif rider == 2016:\n change = open('./RiderDriver/Exaid.txt', encoding=\"utf-8\").readlines()\n change = random.choice(change)\n elif rider == 2017:\n change = open('./RiderDriver/Build.txt', encoding=\"utf-8\").readlines()\n change = random.choice(change)\n elif rider == 2018:\n change = rider_time\n\n message.reply(change)\n\n\n@respond_to('アナザータイム!')\ndef another_time(message):\n name = ['君が', 'あなたが', 'お前が']\n reply_message = ''\n riders = ['クウガ', 'アギト', '龍騎', 'ファイズ', '剣',\n '響鬼', 'カブト', '電王', 'キバ', 'ディケイド',\n 'W', 'オーズ', 'フォーゼ', 'ウィザード', '鎧武',\n 'ゴースト', 'エグゼイド', 'ビルド', 'ジオウ', 'ジオウII',\n 'シノビ', 'クイズ', 'キカイ', 'ゼロワン', '1号']\n your = random.choice(name)\n if your == '君が' or your == 'あなたが':\n reply_message += 'おめでとう。'\n\n rider = '仮面ライダー' + random.choice(riders)\n reply_message += '歴史が変わって、今日から' + your + rider\n if your == 'あなたが':\n reply_message += 'よ'\n else:\n reply_message += 'だ'\n message.reply(reply_message)\n\n\n@default_reply()\ndef default_func(message):\n global count\n count += 1\n message.reply('%d 回目のデフォルトの返事です。' % count)\n","sub_path":"slackbot/plugins/my_mention.py","file_name":"my_mention.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"19474170","text":"# -*- coding: utf-8 -*-\n# Manually created by dpm on 2016-12-30\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\nimport dateutil.parser\n\n\ndef forwards_func(apps, schema_editor):\n Crontab = apps.get_model(\"django_celery_beat\", \"CrontabSchedule\")\n PeriodicTask = apps.get_model(\"django_celery_beat\", \"PeriodicTask\")\n\n tab = Crontab( minute=\"0\" )\n tab.save()\n task = PeriodicTask( name=\"collect-100-tweets\",\n task=\"source.tasks.collect_tweets\",\n crontab=tab,\n args=\"[100]\" )\n task.save()\n\n\n tab = Crontab( minute=\"30\" )\n tab.save()\n task = PeriodicTask( name=\"classify-tweets\",\n task=\"source.tasks.classify_tweets\",\n crontab=tab,\n args=\"[]\" )\n task.save()\n\n\n\ndef reverse_func(apps, schema_editor):\n pass\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('django_celery_beat', '0001_initial'),\n ('source', '0002_data'),\n ]\n\n operations = [\n migrations.RunPython(forwards_func, reverse_func),\n ]\n","sub_path":"btt/source/migrations/0003_beat.py","file_name":"0003_beat.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"63139319","text":"import os\nimport sys\nimport subprocess\nimport argparse\n\nBENCH = '/home/mengcz/fox/fox'\nOUTPUTDIR = '/home/mengcz/fox/output'\nINPUTDIR = '/home/mengcz/fox'\nTRACES = ['macdrp', 'lammps_short']\nENGS = [5, 6, 7]\n\ndef main():\n if not os.path.exists(OUTPUTDIR):\n os.makedirs(OUTPUTDIR)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--default', action='store_true')\n parser.add_argument('--sbsize', action='store_true')\n parser.add_argument('--disksize', action='store_true')\n args = parser.parse_args()\n\n # default set\n if args.default:\n exprespath = os.path.join(OUTPUTDIR, 'default')\n if not os.path.exists(exprespath):\n os.makedirs(exprespath)\n for trace in TRACES:\n if trace == 'macdrp':\n nb, np = 62, 8 # for small IOs\n elif trace == 'lammps_short':\n nb, np = 62, 8 # for large IOs, cut\n for eng in ENGS:\n wd = os.path.join(exprespath, trace, str(eng))\n if not os.path.exists(wd):\n os.makedirs(wd)\n inputtrace = os.path.join(INPUTDIR, 'input_%s.csv' % (trace))\n subp = subprocess.Popen(\"sudo %s run -d /dev/nvme0n1 -j 1 -c 8 -l 4 -b %d -p %d -r 0 -w 100 -v 8 -e %d -o -i %s\" % (BENCH, nb, np, eng, inputtrace), shell=True, cwd=wd)\n subp.wait()\n\n # change sbsize\n if args.sbsize:\n exprespath = os.path.join(OUTPUTDIR, 'sbsize')\n if not os.path.exists(exprespath):\n os.makedirs(exprespath)\n for trace in TRACES:\n nb, np = 62, 8\n for npu, nblk in [(1, 1), (2, 1), (4, 1), (8, 1), (1, 2), (1, 4), (1, 8)]:\n eng = 7\n wd = os.path.join(exprespath, trace, str(eng), '%d_%d' % (npu, nblk))\n if not os.path.exists(wd):\n os.makedirs(wd)\n inputtrace = os.path.join(INPUTDIR, 'input_%s.csv' % (trace))\n subp = subprocess.Popen(\"sudo %s run -d /dev/nvme0n1 -j 1 -c 8 -l 4 -b %d -p %d -r 0 -w 100 -v 8 -e %d -o --sb_pus %d --sb_blks %d -i %s\" % (BENCH, nb, np, eng, npu, nblk, inputtrace), shell=True, cwd=wd)\n subp.wait()\n\n # change disk size\n if args.disksize:\n exprespath = os.path.join(OUTPUTDIR, 'disksize')\n if not os.path.exists(exprespath):\n os.makedirs(exprespath)\n for trace in TRACES:\n if trace == 'macdrp':\n np = 16\n elif trace == 'lammps_short':\n np = 32\n for nb in [16, 32, 62]:\n for eng in ENGS:\n wd = os.path.join(exprespath, trace, str(eng), '%d_%d' % (nb, np))\n if not os.path.exists(wd):\n os.makedirs(wd)\n inputtrace = os.path.join(INPUTDIR, 'input_%s.csv' % (trace))\n subp = subprocess.Popen(\"sudo %s run -d /dev/nvme0n1 -j 1 -c 8 -l 4 -b %d -p %d -r 0 -w 100 -v 8 -e %d -o -i %s\" % (BENCH, nb, np, eng, inputtrace), shell=True, cwd=wd)\n subp.wait()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"replay_trace.py","file_name":"replay_trace.py","file_ext":"py","file_size_in_byte":3131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"353950765","text":"#!/usr/bin/env python3\n\nimport psycopg2\nimport psycopg2.extras\n\nfrom textwrap import dedent as td\n\nfrom openarc.env import *\n\nclass TestOABase(object):\n \"\"\"Mixin class to assist with database testing\"\"\"\n def setUp_db(self):\n \"\"\"Create scratch \"test\" schema in database\"\"\"\n initenv(on_demand_oags=True)\n dbinfo = getenv().dbinfo\n self.dbconn = psycopg2.connect(dbname='openarc',\n user=dbinfo['user'],\n host=dbinfo['host'])\n with self.dbconn.cursor() as cur:\n cur.execute(self.SQL.drop_test_schema)\n cur.execute(self.SQL.create_test_schema)\n self.dbconn.commit()\n\n def tearDown_db(self):\n with self.dbconn.cursor() as cur:\n cur.execute(self.SQL.drop_test_schema)\n self.dbconn.commit()\n self.dbconn.close()\n\n def nuke_database(self):\n self.clear_openarc_schema()\n self.dbconn.commit()\n\n def clear_openarc_schema(self):\n with self.dbconn.cursor() as setupcur:\n setupcur.execute(self.SQL.delete_openarc_rpc)\n\n class SQL(object):\n ## Test schema helper SQL\n drop_test_schema = td(\"\"\"\n DROP SCHEMA IF EXISTS test CASCADE\"\"\")\n create_test_schema = td(\"\"\"\n CREATE SCHEMA test\"\"\")\n ## Common schema helper SQL\n delete_openarc_rpc = td(\"\"\"\n DELETE FROM openarc.rpc_registry\"\"\")\n","sub_path":"openarc/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"458812351","text":"#!/usr/bin/env python\n\nimport wa_xbee_comm\nimport wa_config_data\nimport wa_control\nimport wa_iot_comm\nimport wa_mqtt_comm\n\n\n#---------------------------------------------------------------\n# Define data strcutures\n#---------------------------------------------------------------\nclass irrigation_area:\n def __init__(self, id=0):\n self.area_id=id\n self.total_areas=4\n self.operation_mode='none' #Options: none,manual,sched,auto\n self.config_file='../config/config_area'+str(id)+'.csv'\n self.sched_file='../config/sched_area'+str(id)+'.csv'\n self.manual_control_file='../config/manual_control_area'+str(id)+'.csv'\n self.operation_mode_file='../config/operation_mode_area'+str(id)+'.csv'\n self.limits_file='../config/limits_area'+str(id)+'.csv'\n self.log_file='../www_flask/wa4_log/log_area'+str(id)+'.csv'\n self.data_file='../data/data_area'+str(id)+'.csv'\n self.verbose=1\n\n Q=0.0\n R=0.0\n p1_ant=1.0\n smKalman_ant=0.0\n smC=0.0\n sm1=0.0\n sm2=0.0\n sm3=0.0\n sm_sensors=0\n sm_actuators=0\n sensor_mac_address=bytearray()\n actuator_mac_address=bytearray()\n weather_mac_adress=bytearray()\n ndvi_mac_adress=bytearray()\n valve_status=0\n valve_flow=0.0\n altitude=1440\n w_radiation=0.0\n w_temperature=0.0\n w_humidity=0.0\n w_wind=0.0\n w_eto=0.0\n ndvi_alpha=0.0\n ndvi_value=0.0\n ndvi_red=0.0\n ndvi_nir=0.0\n error_code=0\n comm_status=0\n \n#---------------------------------------------------------------\n# Main program\n#---------------------------------------------------------------\narea1=irrigation_area(1)\nprint(\"IRRIGATION AREA 1\")\n\nif area1.verbose:\n print(\"(1) Read configuration files\")\nwa_config_data.read_config_file(area1.config_file,area1)\nwa_config_data.read_operation_mode_file(area1.operation_mode_file,area1)\nif area1.verbose:\n print(\" OPERATION MODE: \"+area1.operation_mode)\n\nif area1.verbose:\n print(\"(2) Configure serial port\")\nser = wa_xbee_comm.config_xbee_comm('/dev/ttyS0',9600)\n\nif area1.verbose:\n print(\"(3) Transmit message to weather node\")\nfor i in range(3): \n wa_xbee_comm.transmit_weather_message(ser,area1,'WEATHER')\n wa_xbee_comm.receive_weather_message(ser,area1,'WEATHER')\n if area1.comm_status:\n break\n else:\n print(\" Communication retry....\");\n\nif area1.verbose:\n print(\"(4) Transmit message to sensor node\")\nfor i in range(3): \n wa_xbee_comm.transmit_sensor_message(ser,area1,'S1')\n wa_xbee_comm.receive_sensor_message(ser,area1,'S1')\n if area1.comm_status:\n break\n else:\n print(\" Communication retry....\");\n\nif area1.comm_status:\n\n if area1.verbose:\n print(\"(5) Calculate control action\")\n wa_control.data_fusion(area1)\n wa_control.control_algorithm(area1)\n\n if area1.verbose:\n print(\"(6) Transmit control action to actuator\")\n for i in range(3): \n wa_xbee_comm.transmit_actuator_message(ser,area1,'ACTUATOR')\n wa_xbee_comm.receive_actuator_message(ser,area1,'ACTUATOR')\n if area1.comm_status:\n break\n else:\n print(\" Communication retry....\");\n\nelse:\n if area1.verbose:\n print(\"(5) No control action due SM data not available\")\n print(\"(6) No action transmit to actuator due SM data not available\")\n \nif area1.verbose:\n print(\"(7)Update configuration file\")\nwa_config_data.update_config_file(area1.config_file,area1)\n\nif area1.verbose:\n print(\"(8)Update data file\")\nwa_config_data.update_data_file(area1.data_file,area1)\n\nif area1.verbose:\n print(\"(9)Update log file\")\nwa_config_data.update_log_file(area1.log_file,area1)\n\t\nif area1.verbose:\n print(\"(10)Upload Data to Node Red via MQTT\")\nwa_mqtt_comm.update_irrigation(area1)\nwa_mqtt_comm.update_weather(area1)\n\n\n\n","sub_path":"01_Software/WA4_Raspberry_NodeRed/bin/control_loop_area1.py","file_name":"control_loop_area1.py","file_ext":"py","file_size_in_byte":4500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"197835341","text":"'''\n LeNet-3D\n For cifar10\n NOTE: INPUT_CHANNELS must > 1\n 本模型默认总参数量[参考基准:cifar10]:\n Total params: 1,800,222\n Trainable params: 1,800,222\n Non-trainable params: 0\n'''\n\n\nfrom models.network import NetWork\nfrom models.advanced import AdvNet\nfrom tensorflow.python.keras.models import Model\nfrom tensorflow.python.keras.layers import *\n\n\nclass lenet_3d(NetWork, AdvNet):\n \"\"\"\n LeNet-3D\n \"\"\"\n \n def args(self):\n self.CONV = [20, 50]\n self.CONV3D_SIZE = (3, 3, 1)\n self.STRIDES = (3, 3, 1)\n self.LOCAL_SIZE = 500\n self.DROP_RATE = 0.5\n # for test\n # self.BATCH_SIZE = 128\n # self.EPOCHS = 150\n self.OPT = 'sgd'\n self.OPT_EXIST = True\n\n def build_model(self):\n x_in = self.input(self.INPUT_SHAPE)\n\n # extand the rgb channel\n x = self.rgb_extand(x_in)\n # change 4D Tensor into 5D Tensor\n x = self.reshape(x, (self.INPUT_SHAPE[0], self.INPUT_SHAPE[1], 7, 1))\n \n # 3D Conv\n x = self.conv3d(x, self.CONV[0], self.CONV3D_SIZE)\n x = self.bn(x)\n x = self.relu(x)\n x = self.conv3d(x, self.CONV[0], self.CONV3D_SIZE)\n x = self.bn(x)\n x = self.relu(x)\n x = ZeroPadding3D((1, 1, 0))(x)\n x = MaxPool3D(self.STRIDES)(x)\n x = self.conv3d(x, self.CONV[1], self.CONV3D_SIZE)\n x = self.bn(x)\n x = self.relu(x)\n x = self.conv3d(x, self.CONV[1], self.CONV3D_SIZE)\n x = self.bn(x)\n x = self.relu(x)\n x = ZeroPadding3D((1, 1, 0))(x)\n x = MaxPool3D(self.STRIDES)(x)\n \n # Local\n # x = AvgPool3D((8, 8, 1))(x)\n x = self.flatten(x)\n x = self.local(x, self.LOCAL_SIZE)\n x = self.dropout(x, self.DROP_RATE)\n x = self.local(x, self.LOCAL_SIZE)\n x = self.local(x, self.NUM_CLASSES, activation='softmax')\n\n self.model = Model(inputs=x_in, outputs=x, name='lenet_3d')\n\n\n# test part\nif __name__ == \"__main__\":\n mod = lenet_3d(DATAINFO={'INPUT_SHAPE': (32, 32, 3), 'NUM_CLASSES': 10})\n print(mod.INPUT_SHAPE)\n print(mod.model.summary())\n","sub_path":"models/alpha/lenet_3d.py","file_name":"lenet_3d.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"233406198","text":"# coding=utf-8\n# Distributed under the MIT software license, see the accompanying\n# file LICENSE or http://www.opensource.org/licenses/mit-license.php.\nfrom unittest import TestCase\n\nfrom qrl.core import logger\nfrom qrl.core.VoteTracker import VoteTracker\nfrom qrl.core.Transaction import Vote\nfrom qrl.crypto.xmss import XMSS\nfrom tests.misc.helper import get_alice_xmss, get_random_xmss\n\nlogger.initialize_default()\n\n\nclass TestVoteTracker(TestCase):\n def __init__(self, *args, **kwargs):\n super(TestVoteTracker, self).__init__(*args, **kwargs)\n\n def test_add_vote1(self):\n vote_tracker = VoteTracker()\n alice_xmss = get_alice_xmss()\n slave_xmss = XMSS(alice_xmss.height, alice_xmss.get_seed())\n headerhash = b'ffff'\n vote = Vote.create(addr_from=alice_xmss.get_address().encode(),\n blocknumber=0,\n headerhash=headerhash,\n xmss=slave_xmss)\n vote.sign(slave_xmss)\n\n stake_amount = 101.5012\n\n self.assertFalse(vote_tracker.is_already_voted(vote))\n\n self.assertTrue(vote_tracker.add_vote(vote, stake_amount))\n self.assertFalse(vote_tracker.add_vote(vote, stake_amount))\n\n self.assertTrue(vote_tracker.is_already_voted(vote))\n\n vote_metadata = vote_tracker.get_consensus()\n\n self.assertNotEqual(vote_metadata, None)\n self.assertIn(vote.addr_from, vote_metadata.stake_validator_vote)\n self.assertEqual(vote, vote_metadata.stake_validator_vote[vote.addr_from])\n self.assertEqual(stake_amount, vote_metadata.total_stake_amount)\n self.assertEqual(vote_tracker.get_consensus_headerhash(), headerhash)\n\n def test_add_vote2(self):\n vote_tracker = VoteTracker()\n\n validator_xmss1 = get_alice_xmss()\n slave_xmss1 = XMSS(validator_xmss1.height, validator_xmss1.get_seed())\n stake_amount1 = 101.5012\n headerhash1 = b'ffff'\n vote1 = Vote.create(addr_from=validator_xmss1.get_address().encode(),\n blocknumber=0,\n headerhash=headerhash1,\n xmss=slave_xmss1)\n vote1.sign(slave_xmss1)\n\n self.assertTrue(vote_tracker.add_vote(vote1, stake_amount1))\n\n validator_xmss2 = get_random_xmss()\n slave_xmss2 = XMSS(validator_xmss2.height, validator_xmss2.get_seed())\n stake_amount2 = 10000\n headerhash2 = b'ffff'\n vote2 = Vote.create(addr_from=validator_xmss2.get_address().encode(),\n blocknumber=0,\n headerhash=headerhash2,\n xmss=slave_xmss2)\n vote2.sign(slave_xmss2)\n\n self.assertTrue(vote_tracker.add_vote(vote2, stake_amount2))\n\n vote_metadata = vote_tracker.get_consensus()\n\n self.assertNotEqual(vote_metadata, None)\n\n total_stake_amount = stake_amount1 + stake_amount2\n self.assertEqual(total_stake_amount, vote_metadata.total_stake_amount)\n self.assertEqual(vote_tracker.get_consensus_headerhash(), headerhash1)\n\n def test_add_vote3(self):\n vote_tracker = VoteTracker()\n\n validator_xmss1 = get_alice_xmss()\n slave_xmss1 = XMSS(validator_xmss1.height, validator_xmss1.get_seed())\n stake_amount1 = 101.5012\n headerhash1 = b'ffff'\n vote1 = Vote.create(addr_from=validator_xmss1.get_address().encode(),\n blocknumber=0,\n headerhash=headerhash1,\n xmss=slave_xmss1)\n vote1.sign(slave_xmss1)\n\n self.assertTrue(vote_tracker.add_vote(vote1, stake_amount1))\n\n validator_xmss2 = get_random_xmss()\n slave_xmss2 = XMSS(validator_xmss2.height, validator_xmss2.get_seed())\n headerhash2 = b'aaaa'\n stake_amount2 = 10000\n vote2 = Vote.create(addr_from=validator_xmss2.get_address().encode(),\n blocknumber=0,\n headerhash=headerhash2,\n xmss=slave_xmss2)\n vote2.sign(slave_xmss2)\n\n self.assertTrue(vote_tracker.add_vote(vote2, stake_amount2))\n\n vote_metadata = vote_tracker.get_consensus()\n\n self.assertNotEqual(vote_metadata, None)\n\n self.assertEqual(stake_amount2, vote_metadata.total_stake_amount)\n self.assertNotEqual(vote_tracker.get_consensus_headerhash(), headerhash1)\n self.assertEqual(vote_tracker.get_consensus_headerhash(), headerhash2)\n","sub_path":"tests/core/test_VoteTracker.py","file_name":"test_VoteTracker.py","file_ext":"py","file_size_in_byte":4526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"653637238","text":"from getData import *\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nHIGH_LIMIT = 180\nLOW_LIMIT = 70\n\ndata = collectData()\n\n# dict = {monday [high, in-range, low], tuesday [...], ...}\ndailyData = {}\ndailyDataPercent = {}\n\nfor k in data:\n date = datetime.fromtimestamp(k / 1000)\n dayNum = date.weekday()\n if not dayNum in dailyData:\n dailyData[dayNum] = [0, 0, 0]\n else:\n if data[k] > HIGH_LIMIT:\n dailyData[dayNum][0] += 1\n elif data[k] < LOW_LIMIT:\n dailyData[dayNum][2] += 1\n elif LOW_LIMIT < data[k] < HIGH_LIMIT:\n dailyData[dayNum][1] += 1\n\nfor k in list(dailyData):\n totalEntries = sum(dailyData[k])\n if totalEntries == 0:\n del dailyData[k]\n\nfor k in dailyData:\n # totalEntries equals the sum of the list at the key value\n totalEntries = sum(dailyData[k])\n if not k in dailyDataPercent:\n dailyDataPercent[k] = [0, 0, 0]\n for a in range(3):\n dailyDataPercent[k][a] = dailyData[k][a]/totalEntries * 100\n\n# Graphing: (Stacked) bar graph showing the percent high/in-range/low for each day of the week\nfig = plt.figure()\n\nax2 = fig.add_subplot(111)\n\nhighList2 = []\ninRangeList2 = []\nlowList2 = []\n\ndailyDataPercentValues = list(dailyDataPercent.values())\n\nfor i in range(len(dailyDataPercentValues)):\n highList2.append(dailyDataPercentValues[i][0])\n inRangeList2.append(dailyDataPercentValues[i][1])\n lowList2.append(dailyDataPercentValues[i][2])\n\nlow = ax2.bar(range(len(dailyDataPercent)), lowList2, align='center', color='#d60000')\ninRange = ax2.bar(range(len(dailyDataPercent)), inRangeList2, align='center', color='#01c61f', bottom=lowList2)\nhigh = ax2.bar(range(len(dailyDataPercent)), highList2, align='center', color='#ffee02', bottom=inRangeList2)\n\nax2.set(title=\"Time in range by weekday\", ylabel=\"Percent\", xlabel=\"Day\")\n\naxes = plt.gca()\naxes.set_ylim([0, 100])\n\nplt.legend((high[0], inRange[0], low[0]), (\"High\", \"In-range\", \"Low\"), bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n\nN = 7\nind = np.arange(N)\n\nplt.xticks(ind, ('Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun'))\nplt.show()\n","sub_path":"graph2.py","file_name":"graph2.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"606787896","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\eve\\client\\script\\ui\\shared\\neocom\\neocom\\neocomSvc.py\nimport collections\nimport blue\nimport carbonui.const as uiconst\nimport localization\nimport neocomPanelEntries\nimport service\nimport uicontrols\nimport uiutil\nimport uthread\nimport util\nfrom achievements.client.achievementTreeWindow import AchievementTreeWindow\nfrom eve.client.script.ui.control.browser.eveBrowserWindow import BrowserWindow\nfrom eve.client.script.ui.control.eveWindowStack import WindowStack\nfrom eve.client.script.ui.services.redeemsvc import RedeemWindow\nfrom eve.client.script.ui.services.tutorialWindow import TutorialWindow\nfrom eve.client.script.ui.shared.addressBookWindow import AddressBookWindow\nfrom eve.client.script.ui.shared.agentFinder import AgentFinderWnd\nfrom eve.client.script.ui.shared.assetsWindow import AssetsWindow\nfrom eve.client.script.ui.shared.bountyWindow import BountyWindow\nfrom eve.client.script.ui.shared.comtool.lscchannel import Channel\nfrom eve.client.script.ui.shared.eveCalendar import CalendarWnd\nfrom eve.client.script.ui.shared.fitting.fittingWnd import FittingWindow2\nfrom eve.client.script.ui.shared.fittingMgmtWindow import FittingMgmt\nfrom eve.client.script.ui.shared.fleet.fleetwindow import FleetWindow\nfrom eve.client.script.ui.shared.industry.industryWnd import Industry\nfrom eve.client.script.ui.shared.inventory.invWindow import InventoryPrimary, ActiveShipCargo, StationItems, StationShips, StationCorpHangars, StationCorpDeliveries\nfrom eve.client.script.ui.shared.mapView.mapViewPanel import MapViewPanel\nfrom eve.client.script.ui.shared.mapView.mapViewUtil import IsMapBetaEnabled\nfrom eve.client.script.ui.shared.maps.browserwindow import MapBrowserWnd\nfrom eve.client.script.ui.shared.market.marketbase import RegionalMarket\nfrom eve.client.script.ui.shared.neocom.calculator import Calculator\nfrom eve.client.script.ui.shared.neocom.channels import Channels\nfrom eve.client.script.ui.shared.neocom.characterSheetWindow import CharacterSheetWindow\nfrom eve.client.script.ui.shared.neocom.compare import TypeCompare\nfrom eve.client.script.ui.shared.neocom.contracts.contractsWnd import ContractsWindow\nfrom eve.client.script.ui.shared.neocom.corporation.base_corporation_ui import CorporationWindow\nfrom eve.client.script.ui.shared.neocom.evemail import MailWindow\nfrom eve.client.script.ui.shared.neocom.help import HelpWindow\nfrom eve.client.script.ui.shared.neocom.journal import JournalWindow\nfrom eve.client.script.ui.shared.neocom.neocom.neocomCommon import BTNTYPE_WINDOW\nfrom eve.client.script.ui.shared.neocom.notepad import NotepadWindow\nfrom eve.client.script.ui.shared.neocom.wallet import WalletWindow\nfrom eve.client.script.ui.shared.planet.planetWindow import PlanetWindow\nfrom eve.client.script.ui.shared.twitch.twitchStreaming import TwitchStreaming\nfrom eve.client.script.ui.shared.uilog import LoggerWindow\nfrom eve.client.script.ui.station.fw.base_fw import MilitiaWindow\nfrom eve.client.script.ui.station.securityOfficeWindow import SecurityOfficeWindow\nfrom eve.client.script.ui.structure.accessGroups.accesGroupsWnd import AccessGroupsWnd\nfrom eve.client.script.ui.structure.structureBrowser.structureBrowserWnd import StructureBrowserWnd\nfrom projectdiscovery.client.projectdiscoveryClientSvc import PROJECT_DISCOVERY_ID\nfrom projectdiscovery.client.window import ProjectDiscoveryWindow\nfrom seasons.client.const import SCOPE_LOGO_RES_PATH, get_seasons_title_label_path\nfrom seasons.client.seasonwindow import SeasonWindow\nfrom seasons.common.const import SEASONAL_EVENTS_ID\nfrom . import neocomCommon\nfrom .neocom import Neocom\nfrom .neocomPanels import PanelEveMenu, PanelBase\nDEBUG_ALWAYSLOADRAW = False\nNOTPERSISTED_BTNTYPES = (\n neocomCommon.BTNTYPE_WINDOW,)\nRAWDATA_NEOCOMDEFAULT = [\n (\n neocomCommon.BTNTYPE_CHAT, 'chat', None),\n (\n neocomCommon.BTNTYPE_CMD, 'inventory', None),\n (\n neocomCommon.BTNTYPE_CMD, 'addressbook', None),\n (\n neocomCommon.BTNTYPE_CMD, 'mail', None),\n (\n neocomCommon.BTNTYPE_CMD, 'fitting', None),\n (\n neocomCommon.BTNTYPE_CMD, 'market', None),\n (\n neocomCommon.BTNTYPE_CMD, 'industry', None),\n (\n neocomCommon.BTNTYPE_CMD, 'corporation', None),\n (\n neocomCommon.BTNTYPE_CMD, 'fleet', None),\n (\n neocomCommon.BTNTYPE_CMD, 'map', None),\n (\n neocomCommon.BTNTYPE_CMD, 'map_beta', None),\n (\n neocomCommon.BTNTYPE_CMD, 'shipTree', None),\n (\n neocomCommon.BTNTYPE_CMD, 'assets', None),\n (\n neocomCommon.BTNTYPE_CMD, 'wallet', None),\n (\n neocomCommon.BTNTYPE_CMD, 'journal', None),\n (\n neocomCommon.BTNTYPE_CMD, 'aurumStore', None),\n (\n neocomCommon.BTNTYPE_CMD, 'opportunities', None),\n (\n neocomCommon.BTNTYPE_CMD, 'tutorial', None),\n (\n neocomCommon.BTNTYPE_CMD, 'help', None)]\nRAWDATA_EVEMENU = [\n (\n neocomCommon.BTNTYPE_GROUP, 'groupInventory',\n [\n (\n neocomCommon.BTNTYPE_CMD, 'inventory', None),\n (\n neocomCommon.BTNTYPE_CMD, 'activeShipCargo', None),\n (\n neocomCommon.BTNTYPE_CMD, 'itemHangar', None),\n (\n neocomCommon.BTNTYPE_CMD, 'shipHangar', None),\n (\n neocomCommon.BTNTYPE_CMD, 'corpHangar', None),\n (\n neocomCommon.BTNTYPE_CMD, 'corpDeliveriesHangar', None),\n (\n neocomCommon.BTNTYPE_CMD, 'assets', None),\n (\n neocomCommon.BTNTYPE_CMD, 'redeemItems', None)]),\n (\n neocomCommon.BTNTYPE_GROUP, 'groupAccessories',\n [\n (\n neocomCommon.BTNTYPE_BOOKMARKS, 'bookmarkedsites', None),\n (\n neocomCommon.BTNTYPE_CMD, 'browser', None),\n (\n neocomCommon.BTNTYPE_CMD, 'calculator', None),\n (\n neocomCommon.BTNTYPE_CMD, 'notepad', None),\n (\n neocomCommon.BTNTYPE_CMD, 'log', None)]),\n (\n neocomCommon.BTNTYPE_GROUP, 'groupBusiness',\n [\n (\n neocomCommon.BTNTYPE_CMD, 'market', None),\n (\n neocomCommon.BTNTYPE_CMD, 'contracts', None),\n (\n neocomCommon.BTNTYPE_CMD, 'wallet', None),\n (\n neocomCommon.BTNTYPE_CMD, 'industry', None),\n (\n neocomCommon.BTNTYPE_CMD, 'planets', None),\n (\n neocomCommon.BTNTYPE_CMD, 'agentfinder', None),\n (\n neocomCommon.BTNTYPE_CMD, 'militia', None),\n (\n neocomCommon.BTNTYPE_CMD, 'bountyoffice', None),\n (\n neocomCommon.BTNTYPE_CMD, 'structurebrowser', None),\n (\n neocomCommon.BTNTYPE_CMD, 'accessgroups', None)]),\n (\n neocomCommon.BTNTYPE_GROUP, 'groupSocial',\n [\n (\n neocomCommon.BTNTYPE_CMD, 'mail', None),\n (\n neocomCommon.BTNTYPE_CMD, 'calendar', None),\n (\n neocomCommon.BTNTYPE_CMD, 'corporation', None),\n (\n neocomCommon.BTNTYPE_CMD, 'sovdashboard', None),\n (\n neocomCommon.BTNTYPE_CMD, 'fleet', None),\n (\n neocomCommon.BTNTYPE_TWITCH, 'twitch', None),\n (\n neocomCommon.BTNTYPE_CMD, 'chatchannels', None)]),\n (\n neocomCommon.BTNTYPE_CMD, 'charactersheet', None),\n (\n neocomCommon.BTNTYPE_CMD, 'addressbook', None),\n (\n neocomCommon.BTNTYPE_CMD, 'fitting', None),\n (\n neocomCommon.BTNTYPE_CMD, 'fittingMgmt', None),\n (\n neocomCommon.BTNTYPE_CMD, 'map_old', None),\n (\n neocomCommon.BTNTYPE_CMD, 'map_beta', None),\n (\n neocomCommon.BTNTYPE_CMD, 'shipTree', None),\n (\n neocomCommon.BTNTYPE_CMD, 'journal', None),\n (\n neocomCommon.BTNTYPE_CMD, 'compareTool', None),\n (\n neocomCommon.BTNTYPE_CMD, 'aurumStore', None),\n (\n neocomCommon.BTNTYPE_CMD, 'tutorial', None),\n (\n neocomCommon.BTNTYPE_CMD, 'opportunities', None)]\nRAWDATA_EVEMENU_DEFAULT = [\n (\n neocomCommon.BTNTYPE_CMD, 'help', None),\n (\n neocomCommon.BTNTYPE_CMD, 'settings', None)]\nSCOPE_NETWORK_BTN = (\n neocomCommon.BTNTYPE_CMD, SEASONAL_EVENTS_ID, None)\n\nclass BtnDataRaw():\n\n def __init__(self, label=None, cmdName=None, iconPath=None, wndCls=None):\n self.label = label\n self.cmdName = cmdName\n self.iconPath = iconPath\n self.wndCls = wndCls\n\n\nBTNDATARAW_BY_ID = {'addressbook': BtnDataRaw(cmdName='OpenPeopleAndPlaces', wndCls=AddressBookWindow),\n 'agentfinder': BtnDataRaw(cmdName='OpenAgentFinder', wndCls=AgentFinderWnd),\n 'assets': BtnDataRaw(cmdName='OpenAssets', wndCls=AssetsWindow),\n 'bookmarkedsites': BtnDataRaw(label='UI/Neocom/BrowserBookmarksBtn', iconPath='res:/UI/Texture/windowIcons/browserbookmarks.png'),\n 'bountyoffice': BtnDataRaw(cmdName='OpenBountyOffice', wndCls=BountyWindow),\n 'browser': BtnDataRaw(cmdName='OpenBrowser', wndCls=BrowserWindow),\n 'calculator': BtnDataRaw(cmdName='OpenCalculator', wndCls=Calculator),\n 'calendar': BtnDataRaw(cmdName='OpenCalendar', wndCls=CalendarWnd),\n 'charactersheet': BtnDataRaw(cmdName='OpenCharactersheet', wndCls=CharacterSheetWindow),\n 'chat': BtnDataRaw(wndCls=Channel, label='UI/Chat/ChannelWindow/Channels'),\n 'chatchannels': BtnDataRaw(cmdName='OpenChannels', wndCls=Channels),\n 'contracts': BtnDataRaw(cmdName='OpenContracts', wndCls=ContractsWindow),\n 'corporation': BtnDataRaw(cmdName='OpenCorporationPanel', wndCls=CorporationWindow),\n 'fitting': BtnDataRaw(cmdName='OpenFitting', wndCls=FittingWindow2),\n 'fittingMgmt': BtnDataRaw(cmdName='OpenFittingMgmt', wndCls=FittingMgmt),\n 'fleet': BtnDataRaw(cmdName='OpenFleet', wndCls=FleetWindow),\n 'group': BtnDataRaw(label='UI/Neocom/ButtonGroup', iconPath=neocomCommon.ICONPATH_GROUP),\n 'groupInventory': BtnDataRaw(label='UI/Neocom/GroupInventory', iconPath=neocomCommon.ICONPATH_GROUP),\n 'groupAccessories': BtnDataRaw(label='UI/Neocom/GroupAccessories', iconPath=neocomCommon.ICONPATH_GROUP),\n 'groupBusiness': BtnDataRaw(label='UI/Neocom/GroupBusiness', iconPath=neocomCommon.ICONPATH_GROUP),\n 'groupSocial': BtnDataRaw(label='UI/Neocom/GroupSocial', iconPath=neocomCommon.ICONPATH_GROUP),\n 'help': BtnDataRaw(cmdName='OpenHelp', wndCls=HelpWindow),\n 'inventory': BtnDataRaw(cmdName='OpenInventory', wndCls=InventoryPrimary),\n 'activeShipCargo': BtnDataRaw(cmdName='OpenCargoHoldOfActiveShip', wndCls=ActiveShipCargo),\n 'itemHangar': BtnDataRaw(cmdName='OpenHangarFloor', wndCls=StationItems),\n 'shipHangar': BtnDataRaw(cmdName='OpenShipHangar', wndCls=StationShips),\n 'corpHangar': BtnDataRaw(cmdName='OpenCorpHangar', wndCls=StationCorpHangars),\n 'corpDeliveriesHangar': BtnDataRaw(cmdName='OpenCorpDeliveries', wndCls=StationCorpDeliveries),\n 'journal': BtnDataRaw(cmdName='OpenJournal', wndCls=JournalWindow),\n 'log': BtnDataRaw(cmdName='OpenLog', wndCls=LoggerWindow),\n 'mail': BtnDataRaw(cmdName='OpenMail', wndCls=MailWindow),\n 'map': BtnDataRaw(cmdName='CmdToggleMap', iconPath='res:/UI/Texture/windowIcons/map.png'),\n 'map_old': BtnDataRaw(cmdName='CmdToggleMap', iconPath='res:/UI/Texture/windowIcons/map.png'),\n 'map_beta': BtnDataRaw(cmdName='CmdToggleMapBeta', iconPath='res:/UI/Texture/windowIcons/map.png'),\n 'shipTree': BtnDataRaw(cmdName='CmdToggleShipTree', iconPath='res:/ui/texture/windowIcons/ISIS.png'),\n 'market': BtnDataRaw(cmdName='OpenMarket', wndCls=RegionalMarket),\n 'militia': BtnDataRaw(cmdName='OpenMilitia', wndCls=MilitiaWindow),\n 'navyoffices': BtnDataRaw(wndCls=MilitiaWindow),\n 'notepad': BtnDataRaw(cmdName='OpenNotepad', wndCls=NotepadWindow),\n 'industry': BtnDataRaw(label='UI/Neocom/IndustryBtn', cmdName='OpenIndustry', wndCls=Industry),\n 'planets': BtnDataRaw(label='UI/ScienceAndIndustry/PlanetaryColonies', cmdName='OpenPlanets', wndCls=PlanetWindow),\n 'tutorial': BtnDataRaw(cmdName='OpenTutorial', wndCls=TutorialWindow),\n 'wallet': BtnDataRaw(cmdName='OpenWallet', wndCls=WalletWindow),\n 'settings': BtnDataRaw(cmdName='CmdToggleSystemMenu', iconPath='res:/ui/texture/WindowIcons/settings.png'),\n 'securityoffice': BtnDataRaw(cmdName='OpenSecurityOffice', wndCls=SecurityOfficeWindow),\n 'compareTool': BtnDataRaw(cmdName='OpenCompare', wndCls=TypeCompare),\n 'twitch': BtnDataRaw(cmdName='OpenTwitchStreaming', wndCls=TwitchStreaming),\n 'aurumStore': BtnDataRaw(cmdName='ToggleAurumStore', iconPath='res:/ui/texture/WindowIcons/NES.png'),\n 'redeemItems': BtnDataRaw(cmdName='ToggleRedeemItems', wndCls=RedeemWindow),\n 'opportunities': BtnDataRaw(cmdName='ToggleOpportunity', wndCls=AchievementTreeWindow),\n 'structurebrowser': BtnDataRaw(cmdName='OpenStructureBrowser', wndCls=StructureBrowserWnd),\n 'accessgroups': BtnDataRaw(cmdName='OpenAccessGroupsWindow', wndCls=AccessGroupsWnd)\n }\n\ndef AddDefaultEveMenu():\n for button in RAWDATA_EVEMENU_DEFAULT:\n if button not in RAWDATA_EVEMENU:\n RAWDATA_EVEMENU.append(button)\n\n\ndef AddProjectDiscoveryIfEnabled():\n if sm.RemoteSvc('ProjectDiscovery').is_enabled():\n if PROJECT_DISCOVERY_ID not in BTNDATARAW_BY_ID:\n RAWDATA_NEOCOMDEFAULT.append((\n neocomCommon.BTNTYPE_CMD, PROJECT_DISCOVERY_ID, None))\n RAWDATA_EVEMENU[2][2].append((neocomCommon.BTNTYPE_CMD, PROJECT_DISCOVERY_ID, None))\n BTNDATARAW_BY_ID[PROJECT_DISCOVERY_ID] = BtnDataRaw(label=PROJECT_DISCOVERY_ID, cmdName='ToggleProjectDiscovery', iconPath='res:/ui/texture/WindowIcons/projectdiscovery.png', wndCls=ProjectDiscoveryWindow)\n return\n\n\ndef AddScopeNetworkIfEnabled():\n if not sm.GetService('seasonService').is_season_active():\n if SEASONAL_EVENTS_ID in BTNDATARAW_BY_ID:\n RAWDATA_NEOCOMDEFAULT.remove(SCOPE_NETWORK_BTN)\n RAWDATA_EVEMENU.remove(SCOPE_NETWORK_BTN)\n del BTNDATARAW_BY_ID[SEASONAL_EVENTS_ID]\n return\n if SEASONAL_EVENTS_ID not in BTNDATARAW_BY_ID:\n BTNDATARAW_BY_ID[SEASONAL_EVENTS_ID] = BtnDataRaw(label=get_seasons_title_label_path(), cmdName='ToggleSeasonWindow', iconPath=SCOPE_LOGO_RES_PATH, wndCls=SeasonWindow)\n RAWDATA_NEOCOMDEFAULT.append(SCOPE_NETWORK_BTN)\n RAWDATA_EVEMENU.append(SCOPE_NETWORK_BTN)\n sm.GetService('cmd').AddSeasonWindowCommand()\n\n\ndef ConvertOldTypeOfRawData(rawData):\n if isinstance(rawData, tuple):\n if len(rawData) == 3:\n btnType, id, children = rawData\n else:\n btnType, id, iconPath, children = rawData\n return util.KeyVal(btnType=btnType, id=id, children=children)\n return rawData\n\n\nclass NeocomSvc(service.Service):\n __update_on_reload__ = 1\n __guid__ = 'svc.neocom'\n __notifyevents__ = ['OnSessionChanged', 'OnWindowOpened',\n 'OnWindowClosed', 'OnWindowMinimized', 'OnWindowMaximized']\n\n def Run(self, *args):\n self.eveMenu = None\n self.folderDropCookie = None\n self.currPanels = []\n self.neocom = None\n self.updatingWindowPush = False\n self.blinkQueue = []\n self.btnData = None\n self.blinkThread = None\n return\n\n def Stop(self, memStream=None):\n self.CloseAllPanels()\n for cont in uicore.layer.sidePanels.children:\n if cont.name == 'Neocom':\n cont.Close()\n\n for cont in uicore.layer.abovemain.children:\n if isinstance(cont, PanelBase):\n cont.Close()\n\n if self.neocom:\n self.neocom.Close()\n self.neocom = None\n if self.blinkThread:\n self.blinkThread.kill()\n self.blinkThread = None\n return\n\n def Reload(self):\n self.Stop()\n self.Run()\n if self.neocom:\n self.neocom.Close()\n self.CreateNeocom()\n self.UpdateNeocomButtons()\n\n def _CheckNewDefaultButtons(self, rawData):\n originalRawData = settings.char.ui.Get('neocomButtonRawDataOriginal', self._GetDefaultRawButtonData())\n newOriginalData = []\n for data in self._GetDefaultRawButtonData():\n data = ConvertOldTypeOfRawData(data)\n newOriginalData.append(data)\n if not self._IsWndIDInRawData(data.id, originalRawData):\n if not self._IsWndIDInRawData(data.id, rawData):\n rawData.append(data)\n\n settings.char.ui.Set('neocomButtonRawDataOriginal', tuple(newOriginalData))\n\n def _GetDefaultRawButtonData(self):\n return RAWDATA_NEOCOMDEFAULT\n\n def _IsWndIDInRawData(self, checkWndID, rawData):\n if not rawData:\n return False\n for data in rawData:\n data = ConvertOldTypeOfRawData(data)\n if checkWndID == data.id or self._IsWndIDInRawData(checkWndID, data.children):\n return True\n\n return False\n\n def ResetEveMenuBtnData(self):\n self.eveMenuBtnData = BtnDataHeadNode('eveMenu', RAWDATA_EVEMENU, isRemovable=False, persistChildren=False)\n\n def OnSessionChanged(self, isRemote, sess, change):\n if 'stationid' in change or 'structureid' in change:\n self.scopeSpecificBtnData = self.GetScopeSpecificButtonData(recreate=True)\n self.UpdateNeocomButtons()\n\n def CreateNeocom(self):\n AddProjectDiscoveryIfEnabled()\n AddScopeNetworkIfEnabled()\n AddDefaultEveMenu()\n if not self.btnData:\n rawData = settings.char.ui.Get('neocomButtonRawData', self._GetDefaultRawButtonData())\n self._CheckNewDefaultButtons(rawData)\n if DEBUG_ALWAYSLOADRAW:\n rawData = self._GetDefaultRawButtonData()\n self.btnData = BtnDataHeadNode('neocom', rawData)\n self.scopeSpecificBtnData = None\n self.ResetEveMenuBtnData()\n if not self.neocom:\n self.neocom = Neocom(parent=uicore.layer.sidePanels, idx=0)\n for wnd in uicore.registry.GetWindows():\n self.OnWindowOpened(wnd)\n\n for blinkData in self.blinkQueue:\n self.Blink(*blinkData)\n\n self.blinkQueue = []\n if self.blinkThread:\n self.blinkThread.kill()\n self.blinkThread = None\n self.blinkThread = uthread.new(self._BlinkThread)\n return\n\n def _BlinkThread(self):\n while True:\n blue.synchro.SleepWallclock(neocomCommon.BLINK_INTERVAL)\n sm.ChainEvent('ProcessNeocomBlinkPulse')\n\n def OnWindowOpened(self, wnd):\n if not self.neocom:\n return\n elif not wnd or wnd.destroyed:\n return\n elif not wnd.IsKillable() or self._IsWindowIgnored(wnd):\n return\n else:\n for btnHeadData in (self.btnData, self.scopeSpecificBtnData):\n if not btnHeadData:\n continue\n for btnData in btnHeadData.children:\n if btnData.btnType != neocomCommon.BTNTYPE_WINDOW and wnd.__class__ == btnData.wndCls:\n BtnDataNode(parent=btnData, children=None, iconPath=btnData.iconPath, label=wnd.GetCaption(), id=wnd.windowID, btnType=neocomCommon.BTNTYPE_WINDOW, wnd=wnd, isDraggable=False)\n btnData.SetActive()\n return\n\n self.AddWindowButton(wnd)\n return\n\n def ResetButtons(self):\n if uicore.Message('AskRestartNeocomButtons', {}, uiconst.YESNO) == uiconst.ID_YES:\n settings.char.ui.Set('neocomButtonRawData', None)\n settings.user.windows.Set('neocomWidth', Neocom.default_width)\n self.Reload()\n return\n\n def _IsWindowIgnored(self, wnd):\n IGNORECLASSES = (\n WindowStack, Channel, MapBrowserWnd, MapViewPanel)\n for classType in IGNORECLASSES:\n if isinstance(wnd, classType):\n return True\n\n if wnd.isModal:\n return True\n return False\n\n def GetCmdNameForWindowFromRawData(self, wnd):\n for k, rButtonData in BTNDATARAW_BY_ID.iteritems():\n if rButtonData.wndCls == wnd.__class__:\n return getattr(rButtonData, 'cmdName', None)\n\n return None\n\n def AddWindowButton(self, wnd):\n btnData = self._GetBtnDataByGUID(wnd.__class__)\n if not btnData:\n btnData = BtnDataNode(parent=self.btnData, children=None, iconPath=wnd.iconNum, label=wnd.GetCaption(), id=wnd.__guid__, guid=wnd.__class__, btnType=BTNTYPE_WINDOW, isActive=True)\n cmdName = self.GetCmdNameForWindowFromRawData(wnd)\n btnData.cmdName = cmdName\n btnType = wnd.GetNeocomButtonType()\n cmdName = self.GetCmdNameForWindowFromRawData(wnd)\n childButtonData = BtnDataNode(parent=btnData, children=None, iconPath=wnd.iconNum, label=wnd.GetCaption(), id=wnd.windowID, btnType=btnType, wnd=wnd, isDraggable=False)\n if cmdName:\n childButtonData.cmdName = cmdName\n if btnData and btnData.btnUI:\n btnData.btnUI.UpdateIcon()\n return\n\n def _GetBtnDataByGUID(self, guid):\n if not guid:\n return\n else:\n for btnHeadData in (self.btnData, self.scopeSpecificBtnData):\n if not btnHeadData:\n continue\n for btnData in btnHeadData.children:\n if getattr(btnData, 'guid', None) == guid:\n return btnData\n\n return\n\n def RemoveWindowButton(self, wndID, wndCaption, wndGUID):\n btnData = self._GetBtnDataByGUID(wndGUID)\n if not btnData:\n return\n else:\n for btnChildData in btnData.children:\n wnd = getattr(btnChildData, 'wnd', None)\n if not wnd or wnd.destroyed or wnd.windowID == wndID:\n btnChildData.Remove()\n elif not wnd.IsKillable() and not wnd.IsMinimized():\n btnChildData.Remove()\n\n if not btnData.children:\n if btnData.btnType == neocomCommon.BTNTYPE_WINDOW:\n btnData.Remove()\n else:\n btnData.SetInactive()\n return\n\n def UpdateNeocomButtons(self):\n if self.neocom is not None:\n self.neocom.UpdateButtons()\n return\n\n def OnWindowMinimized(self, wnd):\n if not self.neocom:\n return\n if not wnd or wnd.destroyed:\n return\n if not wnd.IsKillable():\n self.AddWindowButton(wnd)\n\n def OnWindowMaximized(self, wnd, wasMinimized):\n if not self.neocom:\n return\n if not wnd or wnd.destroyed:\n return\n if not wnd.IsKillable():\n self.RemoveWindowButton(wnd.windowID, wnd.GetCaption(), wnd.__class__)\n\n def OnWindowClosed(self, wndID, wndCaption, wndGUID):\n if not self.neocom:\n return\n self.RemoveWindowButton(wndID, wndCaption, wndGUID)\n\n def GetButtonData(self):\n return self.btnData.GetButtonsInScope()\n\n def GetScopeSpecificButtonData(self, recreate=False):\n if session.stationid is not None or session.structureid is not None:\n if recreate or self.scopeSpecificBtnData is None:\n self.scopeSpecificBtnData = self.GetStationButtonData()\n return self.scopeSpecificBtnData\n\n def GetStationButtonData(self):\n return None\n\n def GetMinimizeToPos(self, wnd):\n if not self.btnData:\n return (0, 0)\n else:\n if isinstance(wnd, uicontrols.WindowStack):\n wnd = wnd.GetActiveWindow()\n if isinstance(wnd, Channel):\n btnData = self.btnData.GetBtnDataByTypeAndID(neocomCommon.BTNTYPE_CHAT, 'chat')\n else:\n btnData = self._GetBtnDataByGUID(wnd.__class__)\n if btnData and btnData.btnUI:\n if btnData.btnUI.state == uiconst.UI_HIDDEN:\n uiObj = self.neocom.overflowBtn\n else:\n uiObj = btnData.btnUI\n uiObj.BlinkOnce()\n l, t, w, h = uiObj.GetAbsolute()\n return (\n l + w / 2, t + h / 2)\n uiObj = self.neocom.buttonCont.children[-1]\n if uiObj.state == uiconst.UI_HIDDEN:\n uiObj = self.neocom.overflowBtn\n uiObj.BlinkOnce()\n l, t, w, h = uiObj.GetAbsolute()\n return (\n l + w / 2, t + h / 2)\n\n def Blink(self, wndID, hint=None, numBlinks=None):\n if not self.neocom:\n self.blinkQueue.append((wndID, hint,\n numBlinks))\n return\n elif not self.IsBlinkingEnabled():\n return\n elif wndID == 'charactersheet':\n self.neocom.charSheetBtn.EnableBlink()\n return\n else:\n if wndID == 'calendar':\n if CalendarWnd.GetIfOpen():\n return\n btnData = self.btnData.GetBtnDataByTypeAndID(neocomCommon.BTNTYPE_CMD, wndID, recursive=True)\n if not btnData:\n self.neocom.clockCont.EnableBlink()\n return\n elif wndID == 'eveMenuBtn':\n self.eveMenuBtnData.isBlinking = True\n return\n headNodesToCheck = (self.btnData, self.scopeSpecificBtnData, self.eveMenuBtnData)\n for headBtnData in headNodesToCheck:\n if headBtnData is None:\n continue\n btnData = headBtnData.GetBtnDataByTypeAndID(neocomCommon.BTNTYPE_CMD, wndID, recursive=True)\n if btnData:\n btnData.SetBlinkingOn(hint, numBlinks)\n return\n\n return\n\n def BlinkOff(self, wndID):\n if wndID == 'calendar':\n self.neocom.clockCont.DisableBlink()\n if not self.neocom:\n return\n btnData = self.btnData.GetBtnDataByTypeAndID(neocomCommon.BTNTYPE_CMD, wndID, recursive=True)\n if not btnData:\n return\n btnData.SetBlinkingOff()\n\n def OnNeocomButtonsRecreated(self):\n self.CloseAllPanels()\n\n def ShowPanel(self, triggerCont, panelClass, panelAlign, *args, **kw):\n panel = panelClass(idx=0, *args, **kw)\n self.currPanels.append(panel)\n uicore.event.RegisterForTriuiEvents(uiconst.UI_MOUSEDOWN, self.OnGlobalMouseDown)\n self.CheckPanelPosition(panel, triggerCont, panelAlign)\n panel.EntryAnimation()\n return panel\n\n def CheckPanelPosition(self, panel, triggerCont, panelAlign):\n l, t, w, h = triggerCont.GetAbsolute()\n if panelAlign == neocomCommon.PANEL_SHOWABOVE:\n panel.left = l\n panel.top = t - panel.height\n if panel.left + panel.width > uicore.desktop.width:\n panel.left = l - panel.width + w\n elif panelAlign == neocomCommon.PANEL_SHOWONSIDE:\n if self.neocom.align == uiconst.TOLEFT:\n panel.left = l + w\n else:\n panel.left = l - panel.width\n panel.top = t\n if panel.top + panel.height > uicore.desktop.height - self.neocom.height:\n panel.top = t - panel.height + h\n if panel.left + panel.width > uicore.desktop.width:\n panel.left = l - panel.width\n dw = uicore.desktop.width\n dh = uicore.desktop.height\n if panel.top < 0:\n panel.top = 0\n elif panel.top + panel.height > dh:\n panel.top = dh - panel.height\n if panel.left < 0:\n panel.left = 0\n elif panel.left + panel.width > dw:\n panel.left = dw - panel.width\n\n def IsSomePanelOpen(self):\n return len(self.currPanels) > 0\n\n def OnGlobalMouseDown(self, cont, *args):\n if hasattr(cont, 'ToggleNeocomPanel'):\n return True\n if not isinstance(cont, neocomPanelEntries.PanelEntryBase):\n sm.ScatterEvent('OnNeocomPanelsClosed')\n self.CloseAllPanels()\n return False\n return True\n\n def CloseAllPanels(self):\n for panel in self.currPanels:\n panel.Close()\n\n self.currPanels = []\n\n def ClosePanel(self, panel):\n self.currPanels.remove(panel)\n panel.Close()\n\n def CloseChildrenPanels(self, btnData):\n toRemove = []\n for panel in self.currPanels:\n if panel.btnData and panel.btnData.IsDescendantOf(btnData):\n toRemove.append(panel)\n\n for panel in toRemove:\n panel.Close()\n self.currPanels.remove(panel)\n\n def ToggleEveMenu(self):\n if self.eveMenu and not self.eveMenu.destroyed:\n self.CloseAllPanels()\n self.eveMenu = None\n else:\n self.ShowEveMenu()\n return\n\n def ShowEveMenu(self):\n self.neocom.UnhideNeocom(sleep=True)\n self.eveMenu = self.ShowPanel(self.neocom, PanelEveMenu, neocomCommon.PANEL_SHOWONSIDE, parent=uicore.layer.abovemain, btnData=self.eveMenuBtnData)\n sm.ScatterEvent('OnEveMenuShown')\n return self.eveMenu\n\n def GetSideOffset(self):\n width = settings.user.windows.Get('neocomWidth', Neocom.default_width)\n align = settings.char.ui.Get('neocomAlign', Neocom.default_align)\n if align == uiconst.TOLEFT:\n return (width, 0)\n else:\n return (0, width)\n\n def GetUIObjectByID(self, wndID):\n if not self.neocom:\n return\n elif wndID == 'charactersheet':\n return self.neocom.charSheetBtn\n elif wndID == 'skillTrainingCont':\n return self.neocom.skillTrainingCont\n for btnData in (self.btnData, self.scopeSpecificBtnData):\n if btnData:\n node = btnData.GetBtnDataByTypeAndID(None, wndID, recursive=True)\n if node:\n if node.btnUI.destroyed:\n return\n return node.btnUI\n\n node = self.eveMenuBtnData.GetBtnDataByTypeAndID(None, wndID, recursive=True)\n if node:\n return self.neocom.eveMenuBtn\n else:\n return\n\n def IsButtonVisible(self, wndID):\n for btnData in (self.btnData, self.scopeSpecificBtnData):\n if btnData is None:\n continue\n node = btnData.GetBtnDataByTypeAndID(None, wndID)\n if node:\n return True\n\n return False\n\n def OnButtonDragEnter(self, btnData, dragBtnData, *args):\n if not self.IsValidDropData(dragBtnData):\n return\n btns = self.GetButtonData()\n if btnData in btns:\n index = btns.index(btnData)\n else:\n index = len(btns)\n self.neocom.ShowDropIndicatorLine(index)\n\n def OnButtonDragExit(self, *args):\n self.neocom.HideDropIndicatorLine()\n\n def OnBtnDataDropped(self, btnData, index=None):\n if not self.IsValidDropData(btnData):\n return\n oldHeadNode = btnData.GetHeadNode()\n oldBtnData = self.btnData.GetBtnDataByGUID(btnData.wndCls, recursive=False)\n if btnData.btnType == neocomCommon.BTNTYPE_GROUP and oldHeadNode != self.btnData.GetHeadNode():\n toRemove = []\n for child in btnData.children:\n btnDataFound = self.btnData.GetBtnDataByGUID(child.wndCls, recursive=True)\n if btnDataFound:\n toRemove.append(child)\n else:\n child.isRemovable = True\n\n for child in toRemove:\n btnData.RemoveChild(child)\n\n btnData.MoveTo(self.btnData, index)\n if oldBtnData and oldBtnData != btnData:\n for child in oldBtnData.children:\n child.parent = btnData\n\n btnData.SetActive()\n oldBtnData.Remove()\n if oldHeadNode == self.eveMenuBtnData:\n self.ResetEveMenuBtnData()\n btnData.isRemovable = True\n\n def IsValidDropData(self, btnData):\n if not btnData:\n return False\n if isinstance(btnData, collections.Iterable):\n btnData = btnData[0]\n if not isinstance(btnData, BtnDataNode):\n return False\n if btnData.GetHeadNode() != self.btnData.GetHeadNode():\n if btnData.btnType == neocomCommon.BTNTYPE_GROUP:\n if self.btnData.GetBtnDataByTypeAndID(neocomCommon.BTNTYPE_GROUP, btnData.id, recursive=True):\n return False\n else:\n foundBtnData = self.btnData.GetBtnDataByGUID(btnData.wndCls, recursive=True)\n if foundBtnData and foundBtnData.btnType != neocomCommon.BTNTYPE_WINDOW:\n return False\n return True\n\n def GetMenu(self):\n m = [\n (\n localization.GetByLabel('UI/Neocom/CreateNewGroup'),\n self.AddNewGroup), None]\n if self.neocom.IsSizeLocked():\n m.append((\n uiutil.MenuLabel('UI/Neocom/UnlockNeocom'),\n self.neocom.SetSizeLocked, (False,)))\n else:\n m.append((uiutil.MenuLabel('UI/Neocom/LockNeocom'), self.neocom.SetSizeLocked, (True,)))\n if self.neocom.IsAutoHideActive():\n m.append((\n uiutil.MenuLabel('UI/Neocom/AutohideOff'), self.neocom.SetAutoHideOff))\n else:\n m.append((uiutil.MenuLabel('UI/Neocom/AutohideOn'), self.neocom.SetAutoHideOn))\n if self.neocom.align == uiconst.TOLEFT:\n m.append((uiutil.MenuLabel('UI/Neocom/AlignRight'), self.neocom.SetAlignRight))\n else:\n m.append((uiutil.MenuLabel('UI/Neocom/AlignLeft'), self.neocom.SetAlignLeft))\n if self.IsBlinkingEnabled():\n m.append((uiutil.MenuLabel('UI/Neocom/ConfigBlinkOff'), self.SetBlinkingOff))\n else:\n m.append((uiutil.MenuLabel('UI/Neocom/ConfigBlinkOn'), self.SetBlinkingOn))\n m.append((uiutil.MenuLabel('UI/Neocom/ResetButtons'), self.ResetButtons))\n if eve.session.role & service.ROLEMASK_ELEVATEDPLAYER:\n m.extend([None, ('Reload Insider', sm.StartService('insider').Reload),\n ('Toggle Insider',\n lambda : sm.StartService('insider').Toggle(forceShow=True))])\n return m\n\n def AddNewGroup(self):\n wnd = NeocomGroupNamePopup.Open()\n ret = wnd.ShowModal()\n if ret in (uiconst.ID_CLOSE, uiconst.ID_CANCEL):\n return\n BtnDataNodeGroup(parent=self.btnData, children=[], iconPath=neocomCommon.ICONPATH_GROUP, label=ret.label or localization.GetByLabel('UI/Neocom/ButtonGroup'), id='group_%s' % ret.label, btnType=neocomCommon.BTNTYPE_GROUP, labelAbbrev=ret.labelAbbrev)\n\n def EditGroup(self, btnData):\n wnd = NeocomGroupNamePopup.Open(groupName=btnData.label, groupAbbrev=btnData.labelAbbrev)\n ret = wnd.ShowModal()\n if ret in (uiconst.ID_CLOSE, uiconst.ID_CANCEL):\n return\n btnData.label = ret.label or localization.GetByLabel('UI/Neocom/ButtonGroup')\n btnData.labelAbbrev = ret.labelAbbrev\n btnData.Persist()\n\n def SetBlinkingOn(self):\n settings.char.windows.Set('neoblink', True)\n\n def SetBlinkingOff(self):\n settings.char.windows.Set('neoblink', False)\n self.BlinkStopAll()\n\n def BlinkStopAll(self):\n self.eveMenuBtnData.SetBlinkingOff()\n self.btnData.SetBlinkingOff()\n self.neocom.charSheetBtn.DisableBlink()\n\n def IsBlinkingEnabled(self):\n return settings.char.windows.Get('neoblink', True)\n\n def GetMenuForBtnData(self, btnData):\n return []\n\n def GetSidePanelSideOffset(self):\n return uicore.layer.sidePanels.GetSideOffset()\n\n\nclass BtnDataNode(util.KeyVal):\n __guid__ = 'neocom.BtnDataNode'\n __notifyevents__ = []\n persistChildren = True\n\n def __init__(self, parent=None, children=None, iconPath=None, label=None, id=None, btnType=None, isRemovable=True, isDraggable=True, isActive=False, isBlinking=False, labelAbbrev=None, wndCls=None, **kw):\n sm.RegisterNotify(self)\n self._parent = parent\n self.iconPath = iconPath\n self._children = children or []\n self.label = label\n self.labelAbbrev = labelAbbrev\n self.btnType = btnType\n self.btnUI = None\n self.isRemovable = isRemovable\n self.isDraggable = isDraggable\n self.isActive = isActive\n self.isBlinking = isBlinking\n self.blinkHint = ''\n self.blinkEndThread = None\n self.id = id\n self.guid = None\n self.cmdName = None\n self.wndCls = wndCls\n if not iconPath and wndCls:\n self.iconPath = wndCls.default_iconNum\n for attrname, val in kw.iteritems():\n setattr(self, attrname, val)\n\n if parent:\n parent._AddChild(self)\n return\n\n def _AddChild(self, child):\n self._children.append(child)\n self.CheckContinueBlinking()\n if self.persistChildren:\n self.Persist()\n\n def GetChildren(self):\n return self._children\n\n children = property(GetChildren)\n\n def GetParent(self):\n return self._parent\n\n def SetParent(self, parent):\n self.parent._children.remove(self)\n self.parent.CheckContinueBlinking()\n self._parent = parent\n self.Persist()\n\n parent = property(GetParent, SetParent)\n\n def __repr__(self):\n return '' % (repr(self.label), len(self._children))\n\n def Persist(self, scatterEvent=True, fromChild=False):\n if fromChild and not self.persistChildren:\n return\n self.parent.Persist(scatterEvent, fromChild=True)\n\n def GetRawData(self):\n return util.KeyVal(btnType=self.btnType, id=self.id, iconPath=self.iconPath, children=self._GetRawChildren())\n\n def _GetRawChildren(self):\n rawChildren = None\n if self._children:\n rawChildren = []\n if self.persistChildren:\n for btnData in self._children:\n if btnData.btnType not in NOTPERSISTED_BTNTYPES:\n rawChildren.append(btnData.GetRawData())\n\n return rawChildren\n\n def SwitchWith(self, other):\n if other.parent != self.parent:\n return\n lst = self.parent._children\n indexSelf = lst.index(self)\n indexOther = lst.index(other)\n lst.insert(indexOther, lst.pop(indexSelf))\n self.Persist(scatterEvent=False)\n\n def GetIndex(self):\n lst = self.parent._children\n return lst.index(self)\n\n def GetBtnDataByTypeAndID(self, btnType, id, recursive=False):\n for btnData in self._children:\n if btnType is None or btnData.btnType == btnType:\n if btnData.id == id:\n return btnData\n if recursive:\n subBtnData = btnData.GetBtnDataByTypeAndID(btnType, id, True)\n if subBtnData:\n return subBtnData\n\n return\n\n def GetBtnDataByGUID(self, guid, recursive=False):\n if guid is None:\n return\n else:\n for btnData in self._children:\n if getattr(btnData, 'guid', None) == guid:\n return btnData\n if recursive:\n subBtnData = btnData.GetBtnDataByGUID(guid, True)\n if subBtnData:\n return subBtnData\n\n return\n\n def RemoveChild(self, btnData):\n btnData.parent = None\n self._children.remove(btnData)\n if self.persistChildren:\n self.Persist()\n return\n\n def MoveTo(self, newParent, index=None):\n if newParent == self:\n return\n elif not self.IsRemovable():\n return\n else:\n self.parent._children.remove(self)\n if index is None:\n newParent._children.append(self)\n else:\n newParent._children.insert(index, self)\n oldParent = self.parent\n self.parent = newParent\n oldParent.CheckContinueBlinking()\n self.Persist()\n oldParent.Persist()\n return\n\n def Remove(self):\n self.parent.RemoveChild(self)\n\n def IsRemovable(self):\n if self.isRemovable:\n for btnData in self._children:\n if not btnData.IsRemovable():\n return False\n\n return True\n\n def GetHeadNode(self):\n return self.parent.GetHeadNode()\n\n def IsDescendantOf(self, btnData):\n return self.parent._IsDescendantOf(btnData)\n\n def _IsDescendantOf(self, btnData):\n if self == btnData:\n return True\n return self.parent._IsDescendantOf(btnData)\n\n def SetBlinkingOn(self, hint='', numBlinks=None):\n self.isBlinking = True\n self.blinkHint = hint\n if numBlinks:\n uthread.new(self._StopBlinkThread, numBlinks)\n if self.parent:\n self.parent.SetBlinkingOn(hint)\n sm.ScatterEvent('OnNeocomBlinkingChanged')\n\n def _StopBlinkThread(self, numBlinks):\n blue.synchro.SleepWallclock(numBlinks * neocomCommon.BLINK_INTERVAL)\n self.SetBlinkingOff()\n\n def SetBlinkingOff(self):\n self._SetBlinkingOff()\n if self.parent:\n self.parent.CheckContinueBlinking()\n sm.ScatterEvent('OnNeocomBlinkingChanged')\n\n def _SetBlinkingOff(self):\n self.isBlinking = False\n self.blinkHint = ''\n for btnData in self._children:\n btnData._SetBlinkingOff()\n\n def CheckContinueBlinking(self):\n for btnData in self._children:\n if btnData.isBlinking:\n sm.ScatterEvent('OnNeocomBlinkingChanged')\n self.isBlinking = True\n return\n\n self.isBlinking = False\n if self.parent:\n self.parent.CheckContinueBlinking()\n else:\n sm.ScatterEvent('OnNeocomBlinkingChanged')\n\n def SetActive(self):\n self.isActive = True\n if hasattr(self.btnUI, 'CheckIfActive'):\n self.btnUI.CheckIfActive()\n\n def SetInactive(self):\n self.isActive = False\n if hasattr(self.btnUI, 'CheckIfActive'):\n self.btnUI.CheckIfActive()\n\n def GetHint(self, label=None):\n hintStr = label or self.label\n if self.btnType == neocomCommon.BTNTYPE_CMD:\n cmd = uicore.cmd.commandMap.GetCommandByName(self.cmdName)\n shortcutStr = cmd.GetShortcutAsString()\n if shortcutStr:\n hintStr += ' [%s]' % shortcutStr\n if self.blinkHint:\n hintStr += '
    %s' % self.blinkHint\n return hintStr\n\n def GetMenu(self):\n m = []\n if self.isRemovable and not self.isActive:\n m.append((localization.GetByLabel('UI/Commands/Remove'), self.Remove))\n m += sm.GetService('neocom').GetMenuForBtnData(self)\n return m\n\n def IsButtonInScope(self):\n if self.id == 'twitch' and blue.sysinfo.isTransgaming:\n return False\n if self.id == 'map_beta' and not IsMapBetaEnabled():\n return False\n if self.id == 'map' and IsMapBetaEnabled():\n return False\n if self.id in ('corpHangar', 'corpDeliveriesHangar') and util.IsNPCCorporation(session.corpid):\n return False\n if self.id == 'corpHangar' and not self._HasCorpOffice():\n return False\n if self.id == 'tutorial' and not sm.GetService('experimentClientSvc').IsTutorialEnabled():\n return False\n if self.id == 'opportunities' and not sm.GetService('experimentClientSvc').OpportunitiesEnabled():\n return False\n if self.wndCls:\n scope = self.wndCls.default_scope\n if not scope or scope == 'station_inflight':\n return True\n if session.structureid and scope in ('structure', 'station'):\n return True\n if session.stationid2 and scope != 'station':\n return False\n if session.solarsystemid and scope != 'space':\n return False\n return True\n\n def GetButtonsInScope(self):\n return [ btnData for btnData in self.children if btnData.IsButtonInScope() ]\n\n def _HasCorpOffice(self):\n if session.structureid:\n if sm.GetService('structureOffices').HasOffice():\n return True\n if sm.GetService('corp').GetOffice():\n return True\n return False\n\n\nclass BtnDataHeadNode(BtnDataNode):\n __guid__ = 'neocom.BtnDataHeadNode'\n\n def __init__(self, id=None, rawBtnData=None, isRemovable=True, persistChildren=True):\n self.id = id\n self._parent = None\n self._persistThread = None\n rawBtnData = rawBtnData or []\n self._children = []\n self._GetButtonDataFromnRawData(self, rawBtnData, isRemovable)\n self.isBlinking = False\n self.persistChildren = persistChildren\n return\n\n def __repr__(self):\n return '' % len(self._children)\n\n def Persist(self, scatterEvent=True, fromChild=False):\n if not self._persistThread:\n self._persistThread = uthread.new(self._Persist, scatterEvent)\n\n def _Persist(self, scatterEvent):\n if self.persistChildren:\n savedData = []\n for btnData in self._children:\n if btnData.btnType not in NOTPERSISTED_BTNTYPES:\n savedData.append(btnData.GetRawData())\n\n settings.char.ui.Set('%sButtonRawData' % self.id, savedData)\n if scatterEvent:\n sm.ScatterEvent('OnHeadNodeChanged', self.id)\n self._persistThread = None\n return\n\n def _GetButtonDataFromnRawData(self, parent, rawData, isRemovable):\n nodes = []\n for data in rawData:\n data = ConvertOldTypeOfRawData(data)\n nodeClass = NODECLASS_BY_TYPE.get(data.btnType, BtnDataNode)\n btnDataRaw = BTNDATARAW_BY_ID.get(data.id)\n if btnDataRaw:\n label = None\n if btnDataRaw.cmdName:\n cmd = uicore.cmd.commandMap.GetCommandByName(btnDataRaw.cmdName)\n if cmd:\n label = cmd.GetName()\n if label is None:\n if data.Get('label', None):\n label = data.label\n else:\n label = localization.GetByLabel(btnDataRaw.label)\n iconPath = btnDataRaw.iconPath\n cmdName = btnDataRaw.cmdName\n wndCls = btnDataRaw.wndCls\n guid = wndCls\n elif data.btnType == neocomCommon.BTNTYPE_GROUP:\n label = data.label\n iconPath = neocomCommon.ICONPATH_GROUP\n guid = data.id\n cmdName = None\n wndCls = None\n else:\n continue\n btnData = nodeClass(parent=parent, iconPath=iconPath, id=data.id, guid=guid, label=label, btnType=data.btnType, cmdName=cmdName, isRemovable=isRemovable, labelAbbrev=data.Get('labelAbbrev', None), wndCls=wndCls)\n if data.children:\n self._GetButtonDataFromnRawData(btnData, data.children, isRemovable)\n nodes.append(btnData)\n\n return nodes\n\n def GetHeadNode(self):\n return self\n\n def IsDescendantOf(self, btnData):\n return False\n\n def _IsDescendantOf(self, btnData):\n return btnData == self\n\n\nclass BtnDataNodeDynamic(BtnDataNode):\n __guid__ = 'neocom.BtnDataNodeDynamic'\n persistChildren = False\n\n def GetDataList(self):\n return []\n\n def GetNodeFromData(self, data, parent):\n pass\n\n def _AddChild(self, child):\n pass\n\n def ProcessNeocomBlinkPulse(self):\n pass\n\n def CheckContinueBlinking(self):\n pass\n\n def RemoveChild(self, btnData):\n pass\n\n def GetChildren(self):\n dataList = self.GetDataList()\n return self._GetChildren(dataList, self)\n\n def GetPanelEntryHeight(self):\n return 25\n\n def _GetChildren(self, dataList, parent=None):\n children = []\n entryHeight = self.GetPanelEntryHeight()\n maxEntries = uicore.desktop.height / entryHeight - 1\n for data in dataList[:maxEntries]:\n btnData = self.GetNodeFromData(data, parent)\n children.append(btnData)\n\n overflow = dataList[maxEntries:]\n if overflow:\n overflowBtnData = BtnDataNode(parent=parent, iconPath=neocomCommon.ICONPATH_GROUP, label=localization.GetByLabel('UI/Neocom/OverflowButtonsLabel', numButtons=len(overflow)), btnType=neocomCommon.BTNTYPE_GROUP, panelEntryHeight=entryHeight, isRemovable=False, isDraggable=False)\n children.append(overflowBtnData)\n self._GetChildren(dataList[maxEntries:], overflowBtnData)\n return children\n\n children = property(GetChildren)\n\n\nclass BtnDataNodeGroup(BtnDataNode):\n __guid__ = 'neocom.BtnDataNodeGroup'\n\n def GetMenu(self):\n if self.GetHeadNode() == sm.GetService('neocom').eveMenuBtnData:\n return\n m = []\n if self.IsRemovable():\n m.append((uiutil.MenuLabel('UI/Commands/Remove'), self.Remove, ()))\n m.append((localization.GetByLabel('UI/Neocom/Edit'), sm.GetService('neocom').EditGroup, (self,)))\n return m\n\n def GetRawData(self):\n return util.KeyVal(btnType=self.btnType, id=self.id, iconPath=self.iconPath, children=self._GetRawChildren(), label=self.label, labelAbbrev=self.labelAbbrev)\n\n\nclass BtnDataNodeBookmarks(BtnDataNodeDynamic):\n __guid__ = 'neocom.BtnDataNodeBookmarks'\n\n def GetDataList(self):\n bookmarkData = sm.GetService('sites').GetBookmarks()[:]\n bookmarkData.insert(0, util.KeyVal(url='home', name=localization.GetByLabel('UI/Neocom/Homepage')))\n return bookmarkData\n\n def GetNodeFromData(self, bookmark, parent):\n return BtnDataNode(parent=parent, children=None, iconPath=neocomCommon.ICONPATH_BOOKMARKS, label=bookmark.name, id=bookmark.name, btnType=neocomCommon.BTNTYPE_BOOKMARK, bookmark=bookmark, isRemovable=False, isDraggable=False)\n\n\nclass BtnDataNodeChat(BtnDataNodeDynamic):\n __guid__ = 'neocom.BtnDataNodeChat'\n __notifyevents__ = [\n 'ProcessNeocomBlinkPulse']\n\n def GetMenu(self):\n return [\n (\n localization.GetByLabel('UI/Commands/OpenChannels'),\n uicore.cmd.OpenChannels, [])]\n\n def ProcessNeocomBlinkPulse(self):\n self.isBlinking = False\n if sm.GetService('neocom').IsBlinkingEnabled():\n for wnd in self._GetOpenChatWindows():\n if getattr(wnd, 'isBlinking', False):\n if wnd.InStack() and wnd.GetStack().display:\n continue\n self.isBlinking = True\n return\n\n def _GetOpenChatWindows(self):\n return [ wnd for wnd in uicore.registry.GetWindows() if wnd.__class__ == Channel ]\n\n def GetDataList(self):\n\n def GetKey(wnd):\n priority = ('chatchannel_solarsystemid2', 'chatchannel_corpid', 'chatchannel_allianceid',\n 'chatchannel_fleetid', 'chatchannel_squadid', 'chatchannel_wingid')\n if wnd.name in priority:\n return priority.index(wnd.name)\n else:\n return wnd.GetCaption()\n\n sortedData = sorted(self._GetOpenChatWindows(), key=GetKey)\n data = uiutil.Bunch(addChatChannelWnd=1)\n sortedData.insert(0, data)\n return sortedData\n\n def GetNodeFromData(self, wnd, parent):\n if getattr(wnd, 'addChatChannelWnd', False):\n cmd = uicore.cmd.commandMap.GetCommandByName(BTNDATARAW_BY_ID['chatchannels'].cmdName)\n return BtnDataNode(parent=parent, children=None, iconPath=Channels.default_iconNum, id='chatchannels', guid=None, btnType=neocomCommon.BTNTYPE_CMD, cmdName=BTNDATARAW_BY_ID['chatchannels'].cmdName, isRemovable=False, isDraggable=False, label=cmd.GetName())\n else:\n return BtnDataNode(parent=parent, iconPath=neocomCommon.ICONPATH_CHAT, label=wnd.GetCaption(), id=wnd.windowID, btnType=neocomCommon.BTNTYPE_CHATCHANNEL, wnd=wnd, isRemovable=False, isDraggable=False, isBlinking=getattr(wnd, 'isBlinking', False))\n return None\n\n\nNODECLASS_BY_TYPE = {neocomCommon.BTNTYPE_CHAT: BtnDataNodeChat,\n neocomCommon.BTNTYPE_BOOKMARKS: BtnDataNodeBookmarks,\n neocomCommon.BTNTYPE_GROUP: BtnDataNodeGroup\n }\n\nclass NeocomGroupNamePopup(uicontrols.Window):\n default_windowID = 'NeocomGroupNamePopup'\n default_topParentHeight = 0\n default_fixedWidth = 180\n default_fixedHeight = 130\n default_caption = 'UI/Neocom/NeocomGroup'\n\n def ApplyAttributes(self, attributes):\n uicontrols.Window.ApplyAttributes(self, attributes)\n self.btnData = attributes.get('btnData', None)\n groupName = attributes.get('groupName', '')\n groupAbbrev = attributes.get('groupAbbrev', '')\n self.sr.main.padLeft = 6\n self.sr.main.padRight = 6\n self.sr.main.padBottom = 4\n self.labelEdit = uicontrols.SinglelineEdit(name='labelEdit', label=localization.GetByLabel('UI/Neocom/NeocomGroupName'), parent=self.sr.main, align=uiconst.TOTOP, padTop=20, setvalue=groupName, OnReturn=self.Confirm)\n self.labelEdit.SetMaxLength(30)\n self.labelAbbrevEdit = uicontrols.SinglelineEdit(name='labelAbbrevEdit', label=localization.GetByLabel('UI/Neocom/NeocomGroupNameAbbrev'), parent=self.sr.main, align=uiconst.TOTOP, padTop=20, setvalue=groupAbbrev, OnReturn=self.Confirm)\n self.labelAbbrevEdit.SetMaxLength(2)\n btns = uicontrols.ButtonGroup(parent=self.sr.main, line=False, btns=((localization.GetByLabel('UI/Common/Confirm'), self.Confirm, ()), (localization.GetByLabel('UI/Commands/Cancel'), self.Close, ())))\n return\n\n def Confirm(self, *args):\n kv = util.KeyVal(label=self.labelEdit.GetValue(), labelAbbrev=self.labelAbbrevEdit.GetValue())\n self.SetModalResult(kv)","sub_path":"client/eve/client/script/ui/shared/neocom/neocom/neocomSvc.py","file_name":"neocomSvc.py","file_ext":"py","file_size_in_byte":53432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"563359417","text":"#!/usr/bin/python3\n\nimport json\n\nsecrets ={}\ndata = {}\n\nwith open(\"./src/Thimble.UserAccount/appsettings.json\", \"r\") as f:\n data = json.load(f)\n\nwith open(\"secrets.json\", \"r\") as f:\n secrets = json.load(f)\n for param in secrets[\"Parameters\"]:\n if \"apiKey\" in param[\"Name\"]:\n data[\"apiKey\"] = param[\"Value\"]\n if \"accessKey\" in param[\"Name\"]:\n data[\"awsAccessKey\"] = param[\"Value\"]\n if \"secretKey\" in param[\"Name\"]:\n data[\"awsSecretKey\"] = param[\"Value\"]\n\nwith open('./src/Thimble.UserAccount/appsettings.json', 'w') as f:\n json.dump(data, f)","sub_path":"tools/jenkins/add-secrets.py","file_name":"add-secrets.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"628540999","text":"# coding:utf-8\nfrom flask import request, json\n\nfrom planner_project import app\nfrom planner_project.common import api_response, custom_error, request_back_helper\nfrom planner_project.data_access import mysql\nfrom planner_project.sql.backweb import upgrade_user_sql\n\n\n# 获取规划师升级审核列表\n@app.route(\"/backweb/user/get_upgrade_apply_list\", methods=['POST'])\ndef get_upgrade_apply_list():\n ApiResponse = api_response.ApiResponse()\n size = request.form.get(\"size\", type=int, default=10)\n name = request.form.get(\"name\", type=str, default=\"\")\n page = request.form.get(\"page\", type=int, default=1)\n if page <= 0:\n page = 1\n if size <= 0:\n size = 10\n sear = \"%\" + name + \"%\"\n data = mysql.get_list(upgrade_user_sql.get_upgrade_apply_list, (name, name, sear, (page - 1) * size, size))\n listCount = mysql.get_object(upgrade_user_sql.get_upgrade_apply_count, (name, name, sear))\n\n ApiResponse.message = \"成功\"\n ApiResponse.status = 200\n\n if listCount is not None:\n ApiResponse.data = data\n ApiResponse.listCount = listCount[\"count\"]\n return api_response.response_return(ApiResponse)\n\n\n# 获取规划师升级数据详情\n@app.route(\"/backweb/user/get_upgrade_apply_detail\", methods=['POST'])\ndef get_upgrade_apply_detail():\n request_back_helper.current_user_mush_login()\n ApiResponse = api_response.ApiResponse()\n Id = request.form.get(\"Id\", type=str, default=None)\n if Id is None or Id == \"\":\n raise custom_error.CustomFlaskErr(status_code=500, message=\"参数id不能为空\")\n ApiResponse.data = mysql.get_object(upgrade_user_sql.get_upgrade_apply_detail, (Id))\n\n ApiResponse.message = \"成功\"\n ApiResponse.status = 200\n return api_response.response_return(ApiResponse)\n\n\n# 获取规划师升级数据详情\n@app.route(\"/backweb/user/update_upgrade_user\", methods=['POST'])\ndef update_upgrade_user():\n UserId = request_back_helper.current_user_mush_login()[\"UserId\"]\n Id = request.form.get(\"Id\", type=str, default=None)\n Status = request.form.get(\"Status\", type=int, default=-1)\n\n if Id is None or Id == \"\":\n raise custom_error.CustomFlaskErr(status_code=500, message=\"参数id不能为空\")\n if Status == -1:\n raise custom_error.CustomFlaskErr(status_code=500, message=\"参数状态不能为空\")\n #查询规划师升级数据\n upgradeInfo = mysql.get_object(upgrade_user_sql.get_upgrade_info_by_id, (Id))\n\n if upgradeInfo is None:\n raise custom_error.CustomFlaskErr(status_code=500, message=\"该审核数据不存在\")\n if upgradeInfo[\"Status\"] != 0:\n raise custom_error.CustomFlaskErr(status_code=500, message=\"该审核数据已处理,请勿重复操作\")\n # 审核不通过\n if Status == 2:\n success = mysql.operate_object(upgrade_user_sql.update_upgrade_status, (Status, UserId, Id))\n if success <= 0:\n raise custom_error.CustomFlaskErr(status_code=500, message=\"审核失败\")\n # 审核通过\n if Status == 1:\n idCard=\" \"#判断IDCard是否不存在\n if 'IDCard' in upgradeInfo and upgradeInfo[\"IDCard\"] is not None:\n idCard = upgradeInfo[\"IDCard\"]\n sql_list = [upgrade_user_sql.update_upgrade_status,\n upgrade_user_sql.update_user_type,\n upgrade_user_sql.update_user_info,\n upgrade_user_sql.insert_planner_statistics]\n args_list = [(Status, UserId, Id),\n (UserId,upgradeInfo[\"UserId\"]),\n (upgradeInfo[\"Name\"],upgradeInfo[\"Sex\"],upgradeInfo[\"Address\"],upgradeInfo[\"Experience\"],\n upgradeInfo[\"Email\"],idCard,upgradeInfo[\"IDCardPic\"],upgradeInfo[\"IDCardBackPic\"],upgradeInfo[\"ServiceAreaId\"],\n upgradeInfo[\"ServiceId\"],UserId,upgradeInfo[\"UserId\"]),\n (upgradeInfo[\"UserId\"],UserId,UserId,upgradeInfo[\"UserId\"],upgradeInfo[\"UserId\"])]\n success = mysql.operate__many(sql_list, args_list)\n if success <= 0:\n raise custom_error.CustomFlaskErr(status_code=500, message=\"审核失败\")\n\n ApiResponse = api_response.ApiResponse()\n ApiResponse.message = \"成功\"\n ApiResponse.status = 200\n return api_response.response_return(ApiResponse)\n","sub_path":"API/planner_project/api/backweb/back_upgrade_user.py","file_name":"back_upgrade_user.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"75652255","text":"import os\nimport sys\nimport json\nimport logging.config\n\nfrom configparser import ConfigParser\n\nlogger = logging.getLogger(__name__ + \".config_server\")\nBASE_PROJECT_FOLDER = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSERVER_CONFIG_FILE = os.path.join(BASE_PROJECT_FOLDER, \"server_config.ini\")\nLOGGING_CONFIG_FILE = os.path.join(BASE_PROJECT_FOLDER, \"logger_config.json\")\n\n\nif os.path.exists(LOGGING_CONFIG_FILE):\n with open(LOGGING_CONFIG_FILE, \"rt\") as log_file:\n logging.config.dictConfig(json.load(log_file))\nelse:\n logging.basicConfig(level=logging.INFO)\n\nif os.path.exists(SERVER_CONFIG_FILE):\n config_parser = ConfigParser()\n config_parser.read(SERVER_CONFIG_FILE)\nelse:\n logger.error(\"Config file dosen't exist\")\n sys.exit(1)\n\nDATABASE_CONFIG = {\n str(option) : str(config_parser.get(\"database_config\", option)) for option in \\\n config_parser.options(\"database_config\")\n}\n","sub_path":"scripts/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"114443372","text":"#!/usr/bin/env python3\n# coding: utf-8\n# =============================================================================\n# Name : __init__.py\n# Function :\n# Usage :\n# Version : 1.0.0\n# vi : set expandtab shiftwidth=4 softtabstop=4\n# =============================================================================\n\n# import datetime\nimport sys\nimport logging\nimport atexit\nimport fregate.config as config\nimport fregate.commands as commands\nfrom fregate.commons.kubectl import kubectl\nimport yaml\n\n__version__ = \"0.0.2\"\n\n\n@atexit.register\ndef onexit():\n logging.info(\"Exiting ..\")\n sys.exit(0)\n\n\ndef _setup_logging():\n global logger\n # Setup logging\n logformat = '[%(levelname)-8s%(relativeCreated)8d]'\\\n ' [%(name)s] %(message)s'\n logging.basicConfig(level=logging.DEBUG, format=logformat)\n logger = logging.getLogger('fregate')\n logger.debug(\"-\"*30)\n logger.debug(\" Fregate {}\".format(__version__))\n\n\ndef execute_binary(args):\n kubectl((\" \".join(args[1:])))\n\n\ndef run(args, cfg, infra):\n \"\"\" Running correspondant command for action in command line\n \"\"\"\n if args.action == \"up\":\n commands.up(cfg, infra, daemonize=args.daemonize,\n cached=args.cached)\n elif args.action == \"clean\":\n commands.clean(network=infra[\"network\"])\n elif args.action == \"ssh\":\n commands.ssh(cfg, infra[\"nodes\"], args.vm_name)\n sys.exit(0)\n elif args.action == \"status\":\n commands.status(cfg, infra[\"nodes\"])\n elif args.action == \"down\":\n commands.down(cfg, infra[\"nodes\"])\n elif args.action == \"service\":\n service_name = args.name\n service_state = args.state\n commands.service_update(service_name,\n service_state,\n cfg=cfg, infra=infra)\n return 0\n\n\ndef main():\n _setup_logging()\n # Read config and command line\n cfg = config.read()\n args = commands.parse_args()\n if type(args) is list:\n return execute_binary(args)\n try:\n # Try to open default config file\n with open(args.configfile) as f:\n infra = yaml.load(f.read())\n except Exception:\n # Failed to read default config file\n logger.critical(\"Failed to open {}\".format(args.configfile))\n sys.exit(-1)\n else:\n # Run command line\n return run(args, cfg, infra)\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n sys.exit(0)\n","sub_path":"lib/fregate/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"650533067","text":"from typing import List\n\nimport scipy\nfrom jivago.lang.stream import Stream\n\nfrom experiment_framework.infrastructure.accuracy_experiment_info import AccuracyExperimentInfo\n\n\nclass AccuracyInfoAggregationService(object):\n\n def mean(self, accuracy_infos: List[AccuracyExperimentInfo],\n aggregated_keys=('r_squared', 'adjusted_r_squared', 'rmse', 'mae', 'relative_average_deviation')) -> dict:\n accuracy_data = Stream(accuracy_infos).map(lambda x: x.get_report_data()).toList()\n return Stream(aggregated_keys).map(lambda key: (key, self._average_for_key(accuracy_data, key))).toDict()\n\n def uncertainty(self, accuracy_infos: List[AccuracyExperimentInfo],\n aggregated_keys=('r_squared', 'adjusted_r_squared', 'rmse', 'mae', 'relative_average_deviation')) -> dict:\n indicator_uncertainty = {}\n for key in aggregated_keys:\n parameter_values = Stream(accuracy_infos).map(lambda x: x.get_report_data()[key]).toList()\n se = scipy.stats.sem(parameter_values)\n h = se * scipy.stats.t.ppf((1 + 0.95) / 2.0, len(parameter_values) - 1)\n indicator_uncertainty[key] = h\n return indicator_uncertainty\n\n def _average_for_key(self, dictionaries: List[dict], key):\n return Stream(dictionaries).map(lambda x: x[key]).sum() / len(dictionaries)\n","sub_path":"experiment_framework/infrastructure/accuracy_info_aggregation_service.py","file_name":"accuracy_info_aggregation_service.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"139290938","text":"class Solution:\n # @param num, a list of integer\n # @return a list of lists of integers\n def permuteUnique(self, num):\n def helper(n, item, used):\n if n == len(num):\n list.append(item)\n hash = {}# use a hashmap to see if the num is used in this position\n for i in range(len(num)):\n if used[i] == False:\n if num[i] not in hash:\n hash[num[i]] = 1\n item.append(num[i])\n \n used[i] = True\n helper(n+1, item[:], used)\n item = item[:-1]\n used[i] = False\n sorted(num)\n list = []\n used = [False for i in range(len(num))]\n helper(0, [], used)\n return list","sub_path":"lc/LC2/140 Permutation II/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"483797974","text":"\n\nfrom xai.brain.wordbase.verbs._crumple import _CRUMPLE\n\n#calss header\nclass _CRUMPLING(_CRUMPLE, ):\n\tdef __init__(self,): \n\t\t_CRUMPLE.__init__(self)\n\t\tself.name = \"CRUMPLING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"crumple\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_crumpling.py","file_name":"_crumpling.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"39104148","text":"'''\nThis script creates the data mart for outages data in weekly report\n\n## Steps\n* read data from reporting software outages database\n* transform it to fit the local raw data table and push into it\n\n## links\nauto increment in oracle - https://chartio.com/resources/tutorials/how-to-define-an-auto-increment-primary-key-in-oracle/\ninstall Python Docstring Generator for auto documentation of function\n'''\nimport argparse\nimport datetime as dt\nfrom src.appConfig import getConfig\nfrom src.rawDataCreators.outagesRawDataCreator import createOutageEventsRawData\n\n# get start and end dates from command line\nendDate = dt.datetime.now()\nstartDate = endDate - dt.timedelta(days=3)\n# get an instance of argument parser from argparse module\nparser = argparse.ArgumentParser()\n# setup firstname, lastname arguements\nparser.add_argument('--start_date', help=\"Enter Start date in yyyy-mm-dd format\",\n default=dt.datetime.strftime(startDate, '%Y-%m-%d'))\nparser.add_argument('--end_date', help=\"Enter last date in yyyy-mm-dd format\",\n default=dt.datetime.strftime(endDate, '%Y-%m-%d'))\n# get the dictionary of command line inputs entered by the user\nargs = parser.parse_args()\n# access each command line input from the dictionary\nstartDate = dt.datetime.strptime(args.start_date, '%Y-%m-%d')\nendDate = dt.datetime.strptime(args.end_date, '%Y-%m-%d')\n\nstartDate = startDate.replace(hour=0, minute=0, second=0, microsecond=0)\nendDate = endDate.replace(hour=0, minute=0, second=0, microsecond=0)\nprint('startDate = {0}, endDate = {1}'.format(dt.datetime.strftime(\n startDate, '%Y-%m-%d'), dt.datetime.strftime(endDate, '%Y-%m-%d')))\n# get application config\nappConfig = getConfig()\n\n# create outages raw data between start and end dates\nisRawDataCreationSuccess = createOutageEventsRawData(\n appConfig, startDate, endDate)\n\nif isRawDataCreationSuccess:\n print('raw outages data creation done...')\nelse:\n print('raw outages data creation failure...')\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"128733476","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('emails', '0011_auto_20160921_1839'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='emailinfo',\n name='username',\n field=models.CharField(default=None, max_length=64),\n ),\n ]\n","sub_path":"emails/migrations/0012_emailinfo_username.py","file_name":"0012_emailinfo_username.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"247776383","text":"import FWCore.ParameterSet.Config as cms\nfrom DQM.PhysicsHWW.JetIdParams_cfi import *\n\n\n#################################################################################################################### \nPhilV0 = cms.PSet( \n impactParTkThreshold = cms.double(1.) ,\n tmvaWeights = cms.string(\"CMGTools/External/data/mva_JetID.weights.xml\"),\n tmvaMethod = cms.string(\"JetID\"),\n version = cms.int32(0),\n JetIdParams = EmptyJetIdParams\n)\n\n\n#################################################################################################################### \nPhilV1 = cms.PSet(\n impactParTkThreshold = cms.double(1.) ,\n tmvaWeights = cms.string(\"CMGTools/External/data/mva_JetID_v1.weights.xml\"),\n tmvaMethod = cms.string(\"JetID\"),\n version = cms.int32(-1),\n tmvaVariables = cms.vstring(\n \"nvtx\",\n \"jetPt\",\n \"jetEta\",\n \"jetPhi\",\n \"dZ\",\n \"d0\",\n \"beta\",\n \"betaStar\",\n \"nCharged\",\n \"nNeutrals\",\n \"dRMean\",\n \"frac01\",\n \"frac02\",\n \"frac03\",\n \"frac04\",\n \"frac05\",\n ),\n tmvaSpectators = cms.vstring(),\n JetIdParams = JetIdParams,\n label = cms.string(\"philv1\")\n )\n\n","sub_path":"DQM/PhysicsHWW/python/puJetIDAlgo_cff.py","file_name":"puJetIDAlgo_cff.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"507286726","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 15 12:12:20 2019\n\n@author: prakh\n\"\"\"\n\n#Recurrent Neural Network\n\n\n\n#Part 1- Data preprocessing\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\ndataset_train=pd.read_csv('Google_Stock_Price_Train.csv')\ntraining_set=dataset_train.iloc[:,1:2].values\n\n#Feature Scaling\n#Two kinds of feature scaling - Standarisation and Normalisation, here we will use second one as better with sigmoid functions and RNN\nfrom sklearn.preprocessing import MinMaxScaler\nsc= MinMaxScaler(feature_range=(0,1))\ntraining_set_scaled=sc.fit_transform(training_set)\n\n\n\n#Creating a data structure with 60 (last 3 months - working days) timestamp and 1 output\nX_train=[]\ny_train=[]\nfor i in range(60,1250):\n X_train.append(training_set_scaled[i-60:i,0])\n y_train.append(training_set_scaled[i,0])\n\nX_train,y_train=np.array(X_train),np.array(y_train)\n\n#Reshaping\nX_train=np.reshape(X_train, (X_train.shape[0],X_train.shape[1],1))\n\n\n#Part 2- Building the RNN\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import LSTM\nfrom tensorflow.keras.layers import Dropout\n\n# Initialising the RNN\nregressor = Sequential()\n#Adding the first LSTM layer and some Dropout regularisation\nregressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1)))\n#Drop 20 percent neuron -- 10 neuron will be dropped out\nregressor.add(Dropout(0.2))\n\n# Adding a second LSTM layer and some Dropout regularisation\nregressor.add(LSTM(units = 50, return_sequences = True))\nregressor.add(Dropout(0.2))\n\n# Adding a third LSTM layer and some Dropout regularisation\nregressor.add(LSTM(units = 50, return_sequences = True))\nregressor.add(Dropout(0.2))\n\n# Adding a fourth LSTM layer and some Dropout regularisation\n#No return sequence for this layer,as we dont want to pass sequences\nregressor.add(LSTM(units = 50))\nregressor.add(Dropout(0.2))\n\n# Adding the output layer\nregressor.add(Dense(units = 1))\n\n#Compiling the RNN\nregressor.compile(optimiser='adam',loss='mean_squared_error')\n\n# Fitting the RNN to the Training set\nregressor.fit(X_train, y_train, epochs = 100, batch_size = 32)\n\n#Part 3- Making the prediction and visualising the results\n\n#Getting the real stock price of 2017\ndataset_test = pd.read_csv('Google_Stock_Price_Test.csv')\nreal_stock_price = dataset_test.iloc[:, 1:2].values\n\n#Getting the predited stock price of 2017\ndataset_total = pd.concat((dataset_train['Open'], dataset_test['Open']), axis = 0)\ninputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values\ninputs = inputs.reshape(-1,1)\ninputs = sc.transform(inputs)\nX_test = []\nfor i in range(60, 80):\n X_test.append(inputs[i-60:i, 0])\nX_test = np.array(X_test)\nX_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))\npredicted_stock_price = regressor.predict(X_test)\npredicted_stock_price = sc.inverse_transform(predicted_stock_price)\n\n# Visualising the results\nplt.plot(real_stock_price, color = 'red', label = 'Real Google Stock Price')\nplt.plot(predicted_stock_price, color = 'blue', label = 'Predicted Google Stock Price')\nplt.title('Google Stock Price Prediction')\nplt.xlabel('Time')\nplt.ylabel('Google Stock Price')\nplt.legend()\nplt.show()\n","sub_path":"RNN_Practice/LSTM_StockMarketPredictor.py","file_name":"LSTM_StockMarketPredictor.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"79528496","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport rospy, sys\nimport moveit_commander\nimport numpy as np\nimport numpy\nimport math\n\nfrom dofnine_func import *\nclass MoveItFkDemo:\n def __init__(self):\n\n # 初始化move_group的API\n moveit_commander.roscpp_initialize(sys.argv)\n\n # 初始化ROS节点\n rospy.init_node('moveit_fk_demo', anonymous=True)\n \n # 初始化需要使用move group控制的机械臂中的arm group\n # arm = moveit_commander.MoveGroupCommander('manipulator')\n arm = moveit_commander.MoveGroupCommander('arm')\n \n # 设置机械臂运动的允许误差值\n arm.set_goal_joint_tolerance(0.001)\n\n # 设置允许的最大速度和加速度\n arm.set_max_acceleration_scaling_factor(1)\n arm.set_max_velocity_scaling_factor(1)\n \n # 控制机械臂先回到初始化位置neng\n # arm.set_named_target('home_pose')\n # arm.go()\n # rospy.sleep(1)\n #-------------------\n rcm_pose = np.array([0,-60,120,-150,-90,0,0.1*180/pi,90,0])*pi/180 \n arm.set_joint_value_target(rcm_pose)\n arm.go()\n rospy.sleep(1)\n \n # joint_positions = for_joint(tha,0)#only 1\n # joint_positions =np.array([0.33, -0.66, 0.42, 1.17, -0.64, 0.94, 0.3, -1.59, 0.0])\n # joint_positions=list(joint_positions)\n # print('joint_positions',joint_positions)\n gamma=0\n beta=1.57\n alpha=-1.57\n # RCM=[-510,-100,7.3205]\n # Tend=[-510,-100,-192.679]\n # centre of circle p[-500,-100,-300] d=(0.01-0.5)\n # x=-500\n # y=0\n # z=-200\n # tha=inverse_nine(gamma,beta,alpha,x,y,z)# print('tha',tha)\n # arm.set_joint_value_target(for_joint(tha,1))\n # arm.go()\n # rospy.sleep(1)\n inter=50#interlotion\n for i in range(inter+1):\n y=-i*(200/inter)\n x=-sqrt(100**2-(y+100)**2)-500\n z=-300\n tha=inverse_nine(gamma,beta,alpha,x,y,z)\n arm.set_joint_value_target(for_joint(tha,1))\n arm.go()\n # rospy.sleep(0.1)\n for i in range(inter+1):\n y=-200+i*(200/inter)\n x=sqrt(100**2-(y+100)**2)-500\n z=-300\n tha=inverse_nine(gamma,beta,alpha,x,y,z)\n arm.set_joint_value_target(for_joint(tha,1))\n arm.go()\n\n # # 控制机械臂先回到初始化位置\n # arm.set_named_target('home_pose')\n # arm.go()\n # rospy.sleep(1)\n # arm.set_joint_value_target(rcm_pose)\n # arm.go()\n # rospy.sleep(1)\n\n # tha=inverse_nine(gamma,beta,alpha,-450,0,-300)\n # arm.set_joint_value_target(for_joint(tha,1))\n # arm.go()\n # tha=inverse_nine(gamma,beta,alpha,-550,-200,-200)\n # arm.set_joint_value_target(for_joint(tha,1))\n # arm.go()\n arm.set_joint_value_target(rcm_pose)\n arm.go()\n\n # 关闭并退出moveit\n moveit_commander.roscpp_shutdown()\n moveit_commander.os._exit(0)\n\nif __name__ == \"__main__\":\n try:\n MoveItFkDemo()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"src/dofnine/moveit_demo4/scripts/moveit_circle.py","file_name":"moveit_circle.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"243022753","text":"from ..common import *\n\n\"\"\"\nMac上布局有BUG,推测是RadioBox和scrolledpanel组合使用的问题,Mac上勉强还能用,暂时不改。(已修复,控件顺序影响布局)\n\n###1 新增参数步骤:\n1、在 constant.py 文件中注册参数,注册的有关变量:CON_MODELSCREATEDIALOG_COLS、CON_ARGS_NAME_DICT;\n2、将参数匹配对应的控件,添加到指定的页面位置(即布局);\n3、在 self.allArgs 变量中注册参数控件(仅需要最核心的一个输入控件);\n4、按 共用参数/特殊参数 区分,分别添加到 self.commonArgs / self.specialArgs 中(所有与核心控件相关的,包括布局都必须添加进去);\n5、在 self.readmeStaticTexts 和 self.labelStaticTexts 中分别添加 字段��明 和 字段标签;\n6、在 onBtnAddFieldToArea() 方法中加入行数据(根据实际情况适当做个校验);\n7、如果是特殊参数,则在选中对应字段类型时予以显示;\n8、最终,在 onBtnPreview() 函数中,进行预览展示。\n\n###2 新增字段类型步骤:\n1、在 constant.py 文件中新增一个变量,按照 '<类型>--<字段类型名>--<字段详细>' 的格式赋值,并添加到列表 CON_FIELD_TYPES 中最合适的位置;\n2、在 onChoiceFieldType() 方法中,编写选中事件。\n\"\"\"\n\n\"\"\"\n### 关联字段的一些注意点:\n1、OneToOneField 字段类型和其它关联字段类型不同,默认的反向名称是 '',而 ManyToManyField 和 ForeignField 默认是 '_set';\n(当然,反向名称可以自己指定.)\n\"\"\"\n\n\"\"\"\n### 一些使用注意点:\n1、选择应用程序后,模型创建代码默认写入程序搜索到的第一个模型文件路径。(若路径不存在,请手动在应用程序中新建一个模型文件,并在environment.xml文件中注册别名。)\n\"\"\"\n\nSTATIC_TEXT_WIDTH = -1 # StaticText宽度\n\nclass ModelsCreateDialog(wx.Dialog):\n def __init__(self, parent):\n wx.Dialog.__init__(self, parent, id = wx.ID_ANY, title = '新增模型', size=(730, 666), style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.MINIMIZE_BOX|wx.RESIZE_BORDER)\n\n # 必要的控制容器\n self.allArgs = [] # 所有的参数选项\n self.commonArgs = [] # 共有的参数选项\n self.specialArgs = [] # 特有的参数选项\n self.afterBtns = [] # 所有的后触发按钮\n self.allRows = [] # 所有的待新增字段及其参数\n self.readmeStaticTexts = [] # 所有的脚注提示信息控件\n self.labelStaticTexts = [] # 所有的标签控件\n\n self._init_UI()\n self._disable_all_args()\n self._init_all_args_value()\n self._init_input_args()\n\n self._disable_all_afterBtns()\n\n # 按顺序布局面板\n self._init_table() # 表格布局默认加最后\n self._init_Meta_panel() # 初始化Meta选项面板\n\n # 字体默认设置\n self._init_readme_font()\n self._init_label_font()\n\n def _init_readme_font(self):\n \"\"\"脚注提示信息字体初始化\"\"\"\n for _ in self.readmeStaticTexts:\n _.SetFont(wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))\n _.SetForegroundColour(CON_COLOR_MAIN)\n\n def _init_label_font(self):\n \"\"\"标签提示信息字体初始化\"\"\"\n for _ in self.labelStaticTexts:\n _.SetFont(wx.Font(16, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD))\n\n def _init_UI(self):\n \"\"\"初始化界面布局\"\"\"\n # 主界面\n self.panel = wx.Panel(self)\n self.panelSizer = wx.BoxSizer(wx.VERTICAL)\n self.panel.SetSizer(self.panelSizer)\n # self.panel.SetBackgroundColour(CON_COLOR_MAIN)\n\n # 选择文件写入路径【此处更改为选择App】\n self.selectFilePanel = wx.Panel(self.panel)\n selectFilePanelSizer = wx.BoxSizer(wx.HORIZONTAL)\n self.selectFilePanel.SetSizer(selectFilePanelSizer)\n self.panelSizer.Add(self.selectFilePanel, 0, wx.EXPAND | wx.ALL, 2)\n self.selectFilePanel.SetBackgroundColour(CON_COLOR_MAIN) # CON_COLOR_PURE_WHITE\n\n self.labelSelectFile = wx.StaticText(self.selectFilePanel, -1, \"请在右侧下拉列表选择模型所属的应用程序\")\n self.choiceSelectFile = wx.Choice(self.selectFilePanel, -1, choices=[' ',] + djangotools.SCONFIGS.app_names())\n selectFilePanelSizer.Add(self.labelSelectFile, 0, wx.EXPAND | wx.ALL, 2)\n selectFilePanelSizer.Add(self.choiceSelectFile, 1, wx.EXPAND | wx.ALL, 2)\n self.labelSelectFile.SetFont(wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))\n self.labelSelectFile.SetForegroundColour(CON_COLOR_PURE_WHITE)\n\n # 自定义工具栏\n self.toolPanel = wx.Panel(self.panel)\n toolPanelSizer = wx.BoxSizer(wx.HORIZONTAL)\n self.toolPanel.SetSizer(toolPanelSizer)\n self.panelSizer.Add(self.toolPanel, 0, wx.EXPAND | wx.ALL, 2)\n self.toolPanel.SetBackgroundColour(CON_COLOR_MAIN)\n\n self.btnAddNew = buttons.GenButton(self.toolPanel, -1, '新增字段')\n self.btnResetInput = buttons.GenButton(self.toolPanel, -1, '重置字段')\n self.btnAddFieldToArea = buttons.GenButton(self.toolPanel, -1, '添加至待新增区')\n # self.btnModifyFieldArgs = buttons.GenButton(self.toolPanel, -1, '修改')\n self.btnPreview = buttons.GenButton(self.toolPanel, -1, '代码预览')\n self.btnExecSave = buttons.GenButton(self.toolPanel, -1, '保存')\n self.btnExit = buttons.GenButton(self.toolPanel, -1, '退出')\n self.autoRegister = wx.CheckBox(self.toolPanel, -1, label = '自动注册后台') # 是否自动注册后台单选框\n self.autoRegister.SetForegroundColour(CON_COLOR_PURE_WHITE)\n self.btnWhite = buttons.GenButton(self.toolPanel, -1, ' ') # 空白区域补全按钮\n toolPanelSizer.Add(self.btnAddNew, 0, wx.EXPAND | wx.ALL, 2)\n toolPanelSizer.Add(self.btnResetInput, 0, wx.EXPAND | wx.ALL, 2)\n toolPanelSizer.Add(self.btnAddFieldToArea, 0, wx.EXPAND | wx.ALL, 2)\n # toolPanelSizer.Add(self.btnModifyFieldArgs, 0, wx.EXPAND | wx.ALL, 2)\n toolPanelSizer.Add(self.btnPreview, 0, wx.EXPAND | wx.ALL, 2)\n toolPanelSizer.Add(self.btnExecSave, 0, wx.EXPAND | wx.ALL, 2)\n toolPanelSizer.Add(self.btnExit, 0, wx.EXPAND | wx.ALL, 2)\n toolPanelSizer.Add(self.autoRegister, 0, wx.EXPAND | wx.ALL, 2)\n toolPanelSizer.Add(self.btnWhite, 1, wx.EXPAND | wx.ALL, 2)\n self.btnWhite.Enable(False)\n\n # 选择字段类型【行冻结】\n self.selectFieldTypeStaticBox = wx.StaticBox(self.panel, -1, '')\n self.selectFieldTypePanel = wx.StaticBoxSizer(self.selectFieldTypeStaticBox, wx.HORIZONTAL)\n self.panelSizer.Add(self.selectFieldTypePanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.choiceFieldTypeLabel = wx.StaticText(self.panel, -1, \"1、字段类型:\")\n self.choiceFieldType = wx.Choice(self.panel, -1, choices = [' ']+CON_FIELD_TYPES) # , style = wx.CB_SORT\n self.readmeChoiceFieldType = wx.StaticText(self.panel, -1, \"【字段类型】** 新增字段前,必须先选择字段类型,选择后即可填写详细的参数数据。\") # 选项说明\n self.selectFieldTypePanel.Add(self.choiceFieldTypeLabel, 0, wx.EXPAND | wx.ALL, 2)\n self.selectFieldTypePanel.Add(self.choiceFieldType, 1, wx.EXPAND | wx.ALL, 2)\n self.panelSizer.Add(self.readmeChoiceFieldType, 0, wx.EXPAND | wx.ALL, 2)\n\n # 可滚动面板(包裹所有的参数)\n self.scollPanel = scrolledpanel.ScrolledPanel(self.panel, -1)\n self.scollPanel.SetupScrolling()\n scollPanelSizer = wx.BoxSizer(wx.VERTICAL)\n self.scollPanel.SetSizer(scollPanelSizer)\n self.panelSizer.Add(self.scollPanel, 3, wx.EXPAND | wx.ALL, 2)\n\n # 字段属性命名\n self.modelsNameStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.modelsNamePanel = wx.StaticBoxSizer(self.modelsNameStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.modelsNamePanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelFieldModelName = wx.StaticText(self.scollPanel, -1, \"2、字段属性名:\")\n self.inputFieldModelName = wx.TextCtrl(self.scollPanel, -1)\n self.readmeInputFieldModelName = wx.StaticText(self.scollPanel, -1, \"【字段属性名】** 字段属性名,是代码中的字段名称,并非数据库中实际存储的列名。\")\n self.modelsNamePanel.Add(self.labelFieldModelName, 0, wx.EXPAND | wx.ALL, 2)\n self.modelsNamePanel.Add(self.inputFieldModelName, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeInputFieldModelName, 0, wx.EXPAND | wx.ALL, 2)\n\n # 数据库列名(db_column)\n self.dbColumnNameStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.dbColumnNamePanel = wx.StaticBoxSizer(self.dbColumnNameStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.dbColumnNamePanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelFieldDatabaseName = wx.StaticText(self.scollPanel, -1, \"3、数据库列名(db_column):\")\n self.inputFieldDatabaseName = wx.TextCtrl(self.scollPanel, -1)\n self.readmeInputFieldDatabaseName = wx.StaticText(self.scollPanel, -1, \"【数据库列名(db_column)】** 实际存储在数据库中的列名,若不指定默认取【字段属性名】。\")\n self.dbColumnNamePanel.Add(self.labelFieldDatabaseName, 0, wx.EXPAND | wx.ALL, 2)\n self.dbColumnNamePanel.Add(self.inputFieldDatabaseName, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeInputFieldDatabaseName, 0, wx.EXPAND | wx.ALL, 2)\n\n\n # 字段备注\n self.fieldRemarkStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.fieldRemarkPanel = wx.StaticBoxSizer(self.fieldRemarkStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.fieldRemarkPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelFieldRemarkName = wx.StaticText(self.scollPanel, -1, \"4、字段备注:\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.inputFieldRemarkName = wx.TextCtrl(self.scollPanel, -1)\n self.readmeInputFieldRemarkName = wx.StaticText(self.scollPanel, -1, \"【字段备注】** 字段备注默认取【字段属性名】,下划线将自动转换成空格。\")\n self.fieldRemarkPanel.Add(self.labelFieldRemarkName, 0, wx.EXPAND | wx.ALL, 2)\n self.fieldRemarkPanel.Add(self.inputFieldRemarkName, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeInputFieldRemarkName, 0, wx.EXPAND | wx.ALL, 2)\n\n # 默认值(default)\n self.inputDefaultValueStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.inputDefaultValuePanel = wx.StaticBoxSizer(self.inputDefaultValueStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.inputDefaultValuePanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelInputDefaultValue = wx.StaticText(self.scollPanel, -1, \"5、默认值(default)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.inputDefaultValue = wx.TextCtrl(self.scollPanel, -1)\n self.readmeInputDefaultValue = wx.StaticText(self.scollPanel, -1, \"【默认值(default)】** 字段默认值,可以是常量,也可以是一个函数。字符串用''括起来。\")\n self.inputDefaultValuePanel.Add(self.labelInputDefaultValue, 0, wx.EXPAND | wx.ALL, 2)\n self.inputDefaultValuePanel.Add(self.inputDefaultValue, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeInputDefaultValue, 0, wx.EXPAND | wx.ALL, 2)\n\n # 与日期组合唯一(unique_for_date)\n self.choicesFiledUniqueForDateStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.choicesFiledUniqueForDatePanel = wx.StaticBoxSizer(self.choicesFiledUniqueForDateStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.choicesFiledUniqueForDatePanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelChoicesFiledUniqueForDate = wx.StaticText(self.scollPanel, -1, \"6、与日期组合唯一(unique_for_date)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.choicesFiledUniqueForDate = wx.Choice(self.scollPanel, -1, choices=[' ',])\n self.readmeChoicesFiledUniqueForDate = wx.StaticText(self.scollPanel, -1, \"【与日期组合唯一(unique_for_date)】** 当前字段与当前选择日期字段的值组合唯一。\")\n self.choicesFiledUniqueForDatePanel.Add(self.labelChoicesFiledUniqueForDate, 0, wx.EXPAND | wx.ALL, 2)\n self.choicesFiledUniqueForDatePanel.Add(self.choicesFiledUniqueForDate, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeChoicesFiledUniqueForDate, 0, wx.EXPAND | wx.ALL, 2)\n\n # 与月份组合唯一(unique_for_month)\n self.choicesFiledUniqueForMonthStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.choicesFiledUniqueForMonthPanel = wx.StaticBoxSizer(self.choicesFiledUniqueForMonthStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.choicesFiledUniqueForMonthPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelChoicesFiledUniqueForMonth = wx.StaticText(self.scollPanel, -1, \"7、与月份组合唯一(unique_for_month)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.choicesFiledUniqueForMonth = wx.Choice(self.scollPanel, -1, choices=[' ',])\n self.readmeChoicesFiledUniqueForMonth = wx.StaticText(self.scollPanel, -1, \"【与月份组合唯一(unique_for_month)】** 当前字段与当前选择月份字段的值组合唯一。\")\n self.choicesFiledUniqueForMonthPanel.Add(self.labelChoicesFiledUniqueForMonth, 0, wx.EXPAND | wx.ALL, 2)\n self.choicesFiledUniqueForMonthPanel.Add(self.choicesFiledUniqueForMonth, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeChoicesFiledUniqueForMonth, 0, wx.EXPAND | wx.ALL, 2)\n\n # 与年份组合唯一(unique_for_year)\n self.choicesFiledUniqueForYearStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.choicesFiledUniqueForYearPanel = wx.StaticBoxSizer(self.choicesFiledUniqueForYearStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.choicesFiledUniqueForYearPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelChoicesFiledUniqueForYear = wx.StaticText(self.scollPanel, -1, \"8、与年份组合唯一(unique_for_year)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.choicesFiledUniqueForYear = wx.Choice(self.scollPanel, -1, choices=[' ',])\n self.readmeChoicesFiledUniqueForYear = wx.StaticText(self.scollPanel, -1, \"【与年份组合唯一(unique_for_year)】** 当前字段与当前选择年份字段的值组合唯一。\")\n self.choicesFiledUniqueForYearPanel.Add(self.labelChoicesFiledUniqueForYear, 0, wx.EXPAND | wx.ALL, 2)\n self.choicesFiledUniqueForYearPanel.Add(self.choicesFiledUniqueForYear, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeChoicesFiledUniqueForYear, 0, wx.EXPAND | wx.ALL, 2)\n\n # 主键(primary_key)\n self.radiosFiledPrimaryStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.radiosFiledPrimaryPanel = wx.StaticBoxSizer(self.radiosFiledPrimaryStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.radiosFiledPrimaryPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelRadiosFiledPrimary = wx.StaticText(self.scollPanel, -1, \"9、主键(primary_key):\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.radiosFiledPrimary = wx.RadioBox(self.scollPanel, -1, \"\", choices=['是', '否'])\n self.readmeRadiosFiledPrimary = wx.StaticText(self.scollPanel, -1, \"【主键(primary_key)】** 数据库主键唯��字段。\")\n self.radiosFiledPrimaryPanel.Add(self.labelRadiosFiledPrimary, 0, wx.EXPAND | wx.ALL, 2)\n self.radiosFiledPrimaryPanel.Add(self.radiosFiledPrimary, 0, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeRadiosFiledPrimary, 0, wx.EXPAND | wx.ALL, 2)\n\n # 值唯一(unique)\n self.radiosFiledUniqueStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.radiosFiledUniquePanel = wx.StaticBoxSizer(self.radiosFiledUniqueStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.radiosFiledUniquePanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelRadiosFiledUnique = wx.StaticText(self.scollPanel, -1, \"10、值唯一(unique):\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.radiosFiledUnique = wx.RadioBox(self.scollPanel, -1, \"\", choices=['唯一', '不唯一'])\n self.readmeRadiosFiledUnique = wx.StaticText(self.scollPanel, -1, \"【值唯一(unique)】** 数据库字段值唯一。\")\n self.radiosFiledUniquePanel.Add(self.labelRadiosFiledUnique, 0, wx.EXPAND | wx.ALL, 2)\n self.radiosFiledUniquePanel.Add(self.radiosFiledUnique, 0, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeRadiosFiledUnique, 0, wx.EXPAND | wx.ALL, 2)\n\n # 允许为空、blank\n self.radiosFiledBlankStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.radiosFiledBlankPanel = wx.StaticBoxSizer(self.radiosFiledBlankStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.radiosFiledBlankPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelRadiosFiledBlank = wx.StaticText(self.scollPanel, -1, \"11、允许为空(blank):\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.radiosFiledBlank = wx.RadioBox(self.scollPanel, -1, \"\", choices=['允许', '不允许'])\n self.readmeRadiosFiledBlank = wx.StaticText(self.scollPanel, -1, \"【允许为空(blank)】** 数据库表字段允许为空,表单验证允许为空。\")\n self.radiosFiledBlankPanel.Add(self.labelRadiosFiledBlank, 0, wx.EXPAND | wx.ALL, 2)\n self.radiosFiledBlankPanel.Add(self.radiosFiledBlank, 0, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeRadiosFiledBlank, 0, wx.EXPAND | wx.ALL, 2)\n\n # 为空时赋NULL(null)\n self.radiosFiledNullStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.radiosFiledNullPanel = wx.StaticBoxSizer(self.radiosFiledNullStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.radiosFiledNullPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelRadiosFiledNull = wx.StaticText(self.scollPanel, -1, \"12、为空时赋NULL(null):\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.radiosFiledNull = wx.RadioBox(self.scollPanel, -1, \"\", choices=['赋', '不赋'])\n self.readmeRadiosFiledNull = wx.StaticText(self.scollPanel, -1, \"【为空时赋NULL(null)】** 数据库表字段为空时,用NULL作默认值。\")\n self.radiosFiledNullPanel.Add(self.labelRadiosFiledNull, 0, wx.EXPAND | wx.ALL, 2)\n self.radiosFiledNullPanel.Add(self.radiosFiledNull, 0, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeRadiosFiledNull, 0, wx.EXPAND | wx.ALL, 2)\n\n # 创建索引(db_index)\n self.radiosFiledDbIndexStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.radiosFiledDbIndexPanel = wx.StaticBoxSizer(self.radiosFiledDbIndexStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.radiosFiledDbIndexPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelRadiosFiledDbIndex = wx.StaticText(self.scollPanel, -1, \"13、创建索引(db_index):\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.radiosFiledDbIndex = wx.RadioBox(self.scollPanel, -1, \"\", choices=['创建', '不创建'])\n self.readmeRadiosFiledDbIndex = wx.StaticText(self.scollPanel, -1, \"【创建索引(db_index)】** 创建数据库的字段索引。\")\n self.radiosFiledDbIndexPanel.Add(self.labelRadiosFiledDbIndex, 0, wx.EXPAND | wx.ALL, 2)\n self.radiosFiledDbIndexPanel.Add(self.radiosFiledDbIndex, 0, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeRadiosFiledDbIndex, 0, wx.EXPAND | wx.ALL, 2)\n\n # 表单显示(editable)\n self.radiosFiledEditableStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.radiosFiledEditablePanel = wx.StaticBoxSizer(self.radiosFiledEditableStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.radiosFiledEditablePanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelRadiosFiledEditable = wx.StaticText(self.scollPanel, -1, \"14、表单显示(editable):\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.radiosFiledEditable = wx.RadioBox(self.scollPanel, -1, \"\", choices=['显示', '不显示'])\n self.readmeRadiosFiledEditable = wx.StaticText(self.scollPanel, -1, \"【表单显示(editable)】** 表单页面提供交互式控件。\")\n self.radiosFiledEditablePanel.Add(self.labelRadiosFiledEditable, 0, wx.EXPAND | wx.ALL, 2)\n self.radiosFiledEditablePanel.Add(self.radiosFiledEditable, 0, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeRadiosFiledEditable, 0, wx.EXPAND | wx.ALL, 2)\n\n # 表单帮助文本信息(help_text)\n self.inputFormHelpTextStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.inputFormHelpTextPanel = wx.StaticBoxSizer(self.inputFormHelpTextStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.inputFormHelpTextPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelInputFormHelpText = wx.StaticText(self.scollPanel, -1, \"15、表单帮助信息(help_text)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.inputFormHelpText = wx.TextCtrl(self.scollPanel, -1)\n self.readmeInputFormHelpText = wx.StaticText(self.scollPanel, -1, \"【表单帮助信息(help_text)】** 表单填写时的提示信息。\")\n self.inputFormHelpTextPanel.Add(self.labelInputFormHelpText, 0, wx.EXPAND | wx.ALL, 2)\n self.inputFormHelpTextPanel.Add(self.inputFormHelpText, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeInputFormHelpText, 0, wx.EXPAND | wx.ALL, 2)\n\n # 表单错误提醒(error_messages)\n self.inputFormErrorMessageStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.inputFormErrorMessagePanel = wx.StaticBoxSizer(self.inputFormErrorMessageStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.inputFormErrorMessagePanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelInputFormErrorMessage = wx.StaticText(self.scollPanel, -1, \"16、表单错误提醒(error_messages)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.inputFormErrorMessage = wx.TextCtrl(self.scollPanel, -1)\n self.readmeInputFormErrorMessage = wx.StaticText(self.scollPanel, -1, \"【表单错误提醒(error_messages)】** 表单填写错误时的提示信息。\")\n self.inputFormErrorMessagePanel.Add(self.labelInputFormErrorMessage, 0, wx.EXPAND | wx.ALL, 2)\n self.inputFormErrorMessagePanel.Add(self.inputFormErrorMessage, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeInputFormErrorMessage, 0, wx.EXPAND | wx.ALL, 2)\n\n # 长度上限(max_length)\n self.inputMaxLengthStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.inputMaxLengthPanel = wx.StaticBoxSizer(self.inputMaxLengthStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.inputMaxLengthPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelInputMaxLength = wx.StaticText(self.scollPanel, -1, \"17、长度上限(max_length):\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.inputMaxLength = wx.TextCtrl(self.scollPanel, -1)\n self.readmeInputMaxLength = wx.StaticText(self.scollPanel, -1, \"【长度上限(max_length)】** 数据库允许存储的最大长度。\")\n self.inputMaxLengthPanel.Add(self.labelInputMaxLength, 0, wx.EXPAND | wx.ALL, 2)\n self.inputMaxLengthPanel.Add(self.inputMaxLength, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeInputMaxLength, 0, wx.EXPAND | wx.ALL, 2)\n\n # 实数总位数(max_digits)\n self.inputMaxDigitsStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.inputMaxDigitsPanel = wx.StaticBoxSizer(self.inputMaxDigitsStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.inputMaxDigitsPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelInputMaxDigits = wx.StaticText(self.scollPanel, -1, \"18、实数总位数(max_digits)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.inputMaxDigits = wx.TextCtrl(self.scollPanel, -1)\n self.readmeInputMaxDigits = wx.StaticText(self.scollPanel, -1, \"【实数总位数(max_digits)】** 整数位数和小数位数的总和,不包括小数点。\")\n self.inputMaxDigitsPanel.Add(self.labelInputMaxDigits, 0, wx.EXPAND | wx.ALL, 2)\n self.inputMaxDigitsPanel.Add(self.inputMaxDigits, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeInputMaxDigits, 0, wx.EXPAND | wx.ALL, 2)\n\n # 小数总位数(decimal_places)(默认为0)\n self.inputDecimalPlacesStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.inputDecimalPlacesPanel = wx.StaticBoxSizer(self.inputDecimalPlacesStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.inputDecimalPlacesPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelInputDecimalPlaces = wx.StaticText(self.scollPanel, -1, \"19、小数总位数(decimal_places)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.inputDecimalPlaces = wx.TextCtrl(self.scollPanel, -1)\n self.readmeInputDecimalPlaces = wx.StaticText(self.scollPanel, -1, \"【小数总位数(decimal_places)】** 小数位数的总和,不包括小数点。\")\n self.inputDecimalPlacesPanel.Add(self.labelInputDecimalPlaces, 0, wx.EXPAND | wx.ALL, 2)\n self.inputDecimalPlacesPanel.Add(self.inputDecimalPlaces, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeInputDecimalPlaces, 0, wx.EXPAND | wx.ALL, 2)\n\n # save调用更新日期(auto_now)\n self.radiosAutoNowStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.radiosAutoNowPanel = wx.StaticBoxSizer(self.radiosAutoNowStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.radiosAutoNowPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelRadiosAutoNow = wx.StaticText(self.scollPanel, -1, \"20、保存更新日期(auto_now):\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.radiosAutoNow = wx.RadioBox(self.scollPanel, -1, \"\", choices=['启用', '不启用'])\n self.readmeRadiosAutoNow = wx.StaticText(self.scollPanel, -1, \"【保存更新日期(auto_now)】** 仅在调用模型控制器的save()方法时自动更新该日期字段。\")\n self.radiosAutoNowPanel.Add(self.labelRadiosAutoNow, 0, wx.EXPAND | wx.ALL, 2)\n self.radiosAutoNowPanel.Add(self.radiosAutoNow, 0, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeRadiosAutoNow, 0, wx.EXPAND | wx.ALL, 2)\n\n # 仅创建时一次赋值日期(auto_now_add)\n self.radiosAutoNowAddStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.radiosAutoNowAddPanel = wx.StaticBoxSizer(self.radiosAutoNowAddStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.radiosAutoNowAddPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelRadiosAutoNowAdd = wx.StaticText(self.scollPanel, -1, \"21、仅创建时赋值日期(auto_now_add):\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.radiosAutoNowAdd = wx.RadioBox(self.scollPanel, -1, \"\", choices=['启用', '不启用'])\n self.readmeRadiosAutoNowAdd = wx.StaticText(self.scollPanel, -1, \"【创建赋值日期(auto_now_add)】** 仅在创建记录时一次赋值该日期,赋值后不允许修改。\")\n self.radiosAutoNowAddPanel.Add(self.labelRadiosAutoNowAdd, 0, wx.EXPAND | wx.ALL, 2)\n self.radiosAutoNowAddPanel.Add(self.radiosAutoNowAdd, 0, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeRadiosAutoNowAdd, 0, wx.EXPAND | wx.ALL, 2)\n\n # 文件上传路径(upload_to)\n self.inputUploadToStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.inputUploadToPanel = wx.StaticBoxSizer(self.inputUploadToStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.inputUploadToPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelInputUploadTo = wx.StaticText(self.scollPanel, -1, \"22、文件上传路径(upload_to)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.inputUploadTo = wx.TextCtrl(self.scollPanel, -1)\n self.readmeInputUploadTo = wx.StaticText(self.scollPanel, -1, \"【文件上传路径(upload_to)】** 指定文件上传路径。\")\n self.inputUploadToPanel.Add(self.labelInputUploadTo, 0, wx.EXPAND | wx.ALL, 2)\n self.inputUploadToPanel.Add(self.inputUploadTo, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeInputUploadTo, 0, wx.EXPAND | wx.ALL, 2)\n\n # 关联关系--模型下拉列表选择(多对一的一)\n self.choiceSelectModelStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.choiceSelectModelPanel = wx.StaticBoxSizer(self.choiceSelectModelStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.choiceSelectModelPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelChoiceSelectModel = wx.StaticText(self.scollPanel, -1, \"A、关联关系模型【外键关联模型】\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n # self.choiceSelectModel = wx.Choice(self.scollPanel, -1, choices = [' ']+['self'])\n self.choiceSelectModel = wx.TextCtrl(self.scollPanel, -1)\n self.readmeChoiceSelectModel = wx.StaticText(self.scollPanel, -1, \" ** 多对一的一、一对一的一、多对多的多。如:Person、'Person'、'other_app.Person'。\")\n self.choiceSelectModelPanel.Add(self.labelChoiceSelectModel, 0, wx.EXPAND | wx.ALL, 2)\n self.choiceSelectModelPanel.Add(self.choiceSelectModel, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeChoiceSelectModel, 0, wx.EXPAND | wx.ALL, 2)\n\n # 删除规则【on_delete】\n self.choiceSelectDelRuleStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.choiceSelectDelRulePanel = wx.StaticBoxSizer(self.choiceSelectDelRuleStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.choiceSelectDelRulePanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelChoiceSelectDelRule = wx.StaticText(self.scollPanel, -1, \"B、删除规则(on_delete)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.choiceSelectDelRule = wx.Choice(self.scollPanel, -1, choices = [' ']+['models.CASCADE','models.SET_NULL','models.PROTECT','models.SET_DEFAULT','models.DO_NOTHING',])\n self.readmeChoiceSelectDelRule = wx.StaticText(self.scollPanel, -1, \" ** 默认级联删除。\")\n self.choiceSelectDelRulePanel.Add(self.labelChoiceSelectDelRule, 0, wx.EXPAND | wx.ALL, 2)\n self.choiceSelectDelRulePanel.Add(self.choiceSelectDelRule, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeChoiceSelectDelRule, 0, wx.EXPAND | wx.ALL, 2)\n\n # 备注名【verbose_name】\n self.inputRelationRemarkStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.inputRelationRemarkPanel = wx.StaticBoxSizer(self.inputRelationRemarkStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.inputRelationRemarkPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelInputRelationRemark = wx.StaticText(self.scollPanel, -1, \"C、关联字段备注名(verbose_name)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.inputRelationRemark = wx.TextCtrl(self.scollPanel, -1)\n self.readmeInputRelationRemark = wx.StaticText(self.scollPanel, -1, \" ** 后台显示的关联字段的可读名称。\")\n self.inputRelationRemarkPanel.Add(self.labelInputRelationRemark, 0, wx.EXPAND | wx.ALL, 2)\n self.inputRelationRemarkPanel.Add(self.inputRelationRemark, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeInputRelationRemark, 0, wx.EXPAND | wx.ALL, 2)\n\n # 筛选关联字段【limit_choices_to】\n self.inputLimitChoicesToStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.inputLimitChoicesToPanel = wx.StaticBoxSizer(self.inputLimitChoicesToStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.inputLimitChoicesToPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelInputLimitChoicesTo = wx.StaticText(self.scollPanel, -1, \"D、筛选关联字段【limit_choices_to】\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.inputLimitChoicesTo = wx.TextCtrl(self.scollPanel, -1)\n self.readmeInputLimitChoicesTo = wx.StaticText(self.scollPanel, -1, \" ** 如:{'is_staff': True}。也可为一个Q对象,或可回调函数返回字典/Q。\")\n self.inputLimitChoicesToPanel.Add(self.labelInputLimitChoicesTo, 0, wx.EXPAND | wx.ALL, 2)\n self.inputLimitChoicesToPanel.Add(self.inputLimitChoicesTo, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeInputLimitChoicesTo, 0, wx.EXPAND | wx.ALL, 2)\n\n # 反向名称(related_name)\n self.inputRelatedNameStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.inputRelatedNamePanel = wx.StaticBoxSizer(self.inputRelatedNameStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.inputRelatedNamePanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelInputRelatedName = wx.StaticText(self.scollPanel, -1, \"E、反向名称(related_name)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.inputRelatedName = wx.TextCtrl(self.scollPanel, -1)\n self.readmeInputRelatedName = wx.StaticText(self.scollPanel, -1, \" ** 被关联模型对象找到本模型对象的名称。赋值'+'关闭反向查找功能。抽象类必需。\")\n self.inputRelatedNamePanel.Add(self.labelInputRelatedName, 0, wx.EXPAND | wx.ALL, 2)\n self.inputRelatedNamePanel.Add(self.inputRelatedName, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeInputRelatedName, 0, wx.EXPAND | wx.ALL, 2)\n\n # 反向过滤器名称(related_query_name)\n self.inputRelatedQueryNameStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.inputRelatedQueryNamePanel = wx.StaticBoxSizer(self.inputRelatedQueryNameStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.inputRelatedQueryNamePanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelInputRelatedQueryName = wx.StaticText(self.scollPanel, -1, \"F、反向过滤器名称(related_query_name)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.inputRelatedQueryName = wx.TextCtrl(self.scollPanel, -1)\n self.readmeInputRelatedQueryName = wx.StaticText(self.scollPanel, -1, \" ** 默认取related_name的值。用于:tag__name='important'之类的反向过滤前缀。\")\n self.inputRelatedQueryNamePanel.Add(self.labelInputRelatedQueryName, 0, wx.EXPAND | wx.ALL, 2)\n self.inputRelatedQueryNamePanel.Add(self.inputRelatedQueryName, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeInputRelatedQueryName, 0, wx.EXPAND | wx.ALL, 2)\n\n # 指定关联外键(to_field)\n self.inputToFieldStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.inputToFieldPanel = wx.StaticBoxSizer(self.inputToFieldStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.inputToFieldPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelInputToField = wx.StaticText(self.scollPanel, -1, \"G、指定关联外键(to_field)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.inputToField = wx.TextCtrl(self.scollPanel, -1)\n self.readmeInputToField = wx.StaticText(self.scollPanel, -1, \" ** 默认取primary_key=True的字段。若要改变,必须是设置unique=True的字段。\")\n self.inputToFieldPanel.Add(self.labelInputToField, 0, wx.EXPAND | wx.ALL, 2)\n self.inputToFieldPanel.Add(self.inputToField, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeInputToField, 0, wx.EXPAND | wx.ALL, 2)\n\n # 外键约束(db_constraint)\n self.radiosDBConstraintStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.radiosDBConstraintPanel = wx.StaticBoxSizer(self.radiosDBConstraintStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.radiosDBConstraintPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelRadiosDBConstraint = wx.StaticText(self.scollPanel, -1, \"H、外键约束(db_constraint)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.radiosDBConstraint = wx.RadioBox(self.scollPanel, -1, \"\", choices=['开启', '关闭'])\n self.readmeRadiosDBConstraint = wx.StaticText(self.scollPanel, -1, \" ** 当有无效冗余数据或为共享数据库时可关闭,否则不建议关闭。\")\n self.radiosDBConstraintPanel.Add(self.labelRadiosDBConstraint, 0, wx.EXPAND | wx.ALL, 2)\n self.radiosDBConstraintPanel.Add(self.radiosDBConstraint, 0, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeRadiosDBConstraint, 0, wx.EXPAND | wx.ALL, 2)\n\n # 多对多中间表名(db_table)\n self.inputDBTableStaticBox = wx.StaticBox(self.scollPanel, -1, '')\n self.inputDBTablePanel = wx.StaticBoxSizer(self.inputDBTableStaticBox, wx.HORIZONTAL)\n scollPanelSizer.Add(self.inputDBTablePanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelInputDBTable = wx.StaticText(self.scollPanel, -1, \"I、多对多中间表名(db_table)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.inputDBTable = wx.TextCtrl(self.scollPanel, -1)\n self.readmeInputDBTable = wx.StaticText(self.scollPanel, -1, \" ** Django默认生成关联表的哈希值表名,保证值唯一。也可自己命名。\")\n self.inputDBTablePanel.Add(self.labelInputDBTable, 0, wx.EXPAND | wx.ALL, 2)\n self.inputDBTablePanel.Add(self.inputDBTable, 1, wx.EXPAND | wx.ALL, 2)\n scollPanelSizer.Add(self.readmeInputDBTable, 0, wx.EXPAND | wx.ALL, 2)\n\n # all 交换类型(swappable)\n # 多对多 指定多对多模型(through)\n # 多对多 指定多对多模型外键(through_fields)\n # 一对一 父类链接(parent_link)\n # 暂时不开放上述参数\n\n # 后触发按钮\n self.afterBtns.extend([\n self.btnResetInput, self.btnAddFieldToArea,\n # self.btnExecSave,\n ])\n\n # 所有的参数\n self.allArgs.extend([\n self.choiceFieldType, # 字段类型选择放这里不合理【暂时不调整】\n self.inputFieldModelName, self.inputFieldDatabaseName, self.inputFieldRemarkName,\n self.radiosFiledBlank, self.radiosFiledNull, self.radiosFiledPrimary, # 英文拼错了,不改了\n self.radiosFiledUnique, self.radiosFiledDbIndex, self.radiosFiledEditable,\n self.choicesFiledUniqueForDate, self.choicesFiledUniqueForMonth, self.choicesFiledUniqueForYear,\n self.inputDefaultValue, self.inputFormHelpText, self.inputFormErrorMessage,\n self.inputMaxLength, self.inputMaxDigits, self.inputDecimalPlaces,\n self.radiosAutoNow, self.radiosAutoNowAdd, self.inputUploadTo,\n self.choiceSelectModel, self.choiceSelectDelRule, self.inputRelationRemark,\n self.inputLimitChoicesTo, self.inputRelatedName, self.inputRelatedQueryName,\n self.inputToField, self.radiosDBConstraint, self.inputDBTable,\n ])\n\n # 共用参数\n self.commonArgs.extend([\n self.inputFieldModelName, self.inputFieldDatabaseName, self.inputFieldRemarkName,\n self.radiosFiledBlank, self.radiosFiledNull, self.radiosFiledPrimary,\n self.radiosFiledUnique, self.radiosFiledDbIndex, self.radiosFiledEditable,\n self.choicesFiledUniqueForDate, self.choicesFiledUniqueForMonth, self.choicesFiledUniqueForYear,\n self.inputDefaultValue, self.inputFormHelpText, self.inputFormErrorMessage,\n ])\n\n # 私有参数\n self.specialArgs.extend([\n # 一行表示一组私有参数\n self.inputMaxLengthStaticBox, self.inputMaxLength, self.labelInputMaxLength, self.readmeInputMaxLength,\n self.inputMaxDigitsStaticBox, self.inputMaxDigits, self.labelInputMaxDigits, self.readmeInputMaxDigits,\n self.inputDecimalPlacesStaticBox, self.inputDecimalPlaces, self.labelInputDecimalPlaces, self.readmeInputDecimalPlaces,\n self.radiosAutoNowStaticBox, self.radiosAutoNow, self.labelRadiosAutoNow, self.readmeRadiosAutoNow,\n self.radiosAutoNowAddStaticBox, self.radiosAutoNowAdd, self.labelRadiosAutoNowAdd, self.readmeRadiosAutoNowAdd,\n self.inputUploadToStaticBox, self.inputUploadTo, self.labelInputUploadTo, self.readmeInputUploadTo,\n\n # 关联字段\n self.choiceSelectModelStaticBox, self.choiceSelectModel, self.labelChoiceSelectModel, self.readmeChoiceSelectModel,\n self.choiceSelectDelRuleStaticBox, self.choiceSelectDelRule, self.labelChoiceSelectDelRule, self.readmeChoiceSelectDelRule,\n self.inputRelationRemarkStaticBox, self.inputRelationRemark, self.labelInputRelationRemark, self.readmeInputRelationRemark,\n self.inputLimitChoicesToStaticBox, self.inputLimitChoicesTo, self.labelInputLimitChoicesTo, self.readmeInputLimitChoicesTo,\n self.inputRelatedNameStaticBox, self.inputRelatedName, self.labelInputRelatedName, self.readmeInputRelatedName,\n self.inputRelatedQueryNameStaticBox, self.inputRelatedQueryName, self.labelInputRelatedQueryName, self.readmeInputRelatedQueryName,\n self.inputToFieldStaticBox, self.inputToField, self.labelInputToField, self.readmeInputToField,\n self.radiosDBConstraintStaticBox, self.radiosDBConstraint, self.labelRadiosDBConstraint, self.readmeRadiosDBConstraint,\n self.inputDBTableStaticBox, self.inputDBTable, self.labelInputDBTable, self.readmeInputDBTable,\n\n ])\n\n # 字体初始化控件录入\n self.readmeStaticTexts.extend([\n self.readmeChoiceFieldType,self.readmeInputFieldModelName,\n self.readmeInputFieldDatabaseName,self.readmeInputFieldRemarkName,\n self.readmeRadiosFiledBlank,self.readmeRadiosFiledNull,\n self.readmeRadiosFiledPrimary,self.readmeRadiosFiledUnique,\n self.readmeRadiosFiledDbIndex,self.readmeRadiosFiledEditable,\n self.readmeInputMaxLength,self.readmeRadiosAutoNow,\n self.readmeRadiosAutoNowAdd,self.readmeInputDefaultValue,\n self.readmeInputFormHelpText,self.readmeInputFormErrorMessage,\n self.readmeInputUploadTo,self.readmeInputMaxDigits,\n self.readmeInputDecimalPlaces,self.readmeChoicesFiledUniqueForDate,\n self.readmeChoicesFiledUniqueForMonth,self.readmeChoicesFiledUniqueForYear,\n self.readmeChoiceSelectModel,self.readmeChoiceSelectDelRule,\n self.readmeInputRelationRemark,self.readmeInputLimitChoicesTo,\n self.readmeInputRelatedName,self.readmeInputRelatedQueryName,\n self.readmeInputToField,self.readmeRadiosDBConstraint,\n self.readmeInputDBTable,\n ])\n self.labelStaticTexts.extend([\n self.choiceFieldTypeLabel,self.labelFieldModelName,\n self.labelFieldDatabaseName,self.labelFieldRemarkName,\n self.labelRadiosFiledBlank,self.labelRadiosFiledNull,\n self.labelRadiosFiledPrimary,self.labelRadiosFiledUnique,\n self.labelRadiosFiledDbIndex,self.labelRadiosFiledEditable,\n self.labelInputMaxLength,self.labelRadiosAutoNow,\n self.labelRadiosAutoNowAdd,self.labelInputDefaultValue,\n self.labelInputFormHelpText,self.labelInputFormErrorMessage,\n self.labelInputUploadTo,self.labelInputMaxDigits,\n self.labelInputDecimalPlaces,self.labelChoicesFiledUniqueForDate,\n self.labelChoicesFiledUniqueForMonth,self.labelChoicesFiledUniqueForYear,\n self.labelChoiceSelectModel,self.labelChoiceSelectDelRule,\n self.labelInputRelationRemark,self.labelInputLimitChoicesTo,\n self.labelInputRelatedName,self.labelInputRelatedQueryName,\n self.labelInputToField,self.labelRadiosDBConstraint,\n self.labelInputDBTable,\n ])\n\n # 按钮点击事件\n self.Bind(wx.EVT_BUTTON, self.onExit, self.btnExit)\n self.Bind(wx.EVT_BUTTON, self.onBtnAddNew, self.btnAddNew)\n self.Bind(wx.EVT_BUTTON, self.onBtnResetInput, self.btnResetInput)\n self.Bind(wx.EVT_BUTTON, self.onBtnAddFieldToArea, self.btnAddFieldToArea)\n self.Bind(wx.EVT_BUTTON, self.onBtnExecSave, self.btnExecSave)\n self.Bind(wx.EVT_BUTTON, self.onBtnPreview, self.btnPreview)\n # 下拉框选择事件\n self.Bind(wx.EVT_CHOICE, self.onChoiceFieldType, self.choiceFieldType)\n self.Bind(wx.EVT_CHOICE, self.onChoiceSelectDelRule, self.choiceSelectDelRule)\n # 文本实时监听事件\n self.Bind(wx.EVT_TEXT, self.onInputFieldModelName, self.inputFieldModelName)\n self.Bind(wx.EVT_TEXT, self.onInputMaxLength, self.inputMaxLength)\n self.Bind(wx.EVT_TEXT, self.onInputMaxDigits, self.inputMaxDigits)\n self.Bind(wx.EVT_TEXT, self.onInputDecimalPlaces, self.inputDecimalPlaces)\n self.Bind(wx.EVT_TEXT, self.onInputRelatedName, self.inputRelatedName)\n # 单选框事件\n self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosFiledBlank)\n self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosFiledNull)\n self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosFiledPrimary)\n self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosFiledUnique)\n self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosFiledDbIndex)\n self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosFiledEditable)\n self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosAutoNow)\n self.Bind(wx.EVT_RADIOBOX, self.onRadioChanged, self.radiosAutoNowAdd)\n\n def _init_Meta_panel(self):\n \"\"\"初始化Meta选项面板\"\"\"\n # 显示和隐藏Meta按钮,用于空间的合理布局\n self.btnShowUnshowMeta = buttons.GenButton(self.panel, -1, '【显示】Meta元数据(表级参数设置)')\n self.panelSizer.Add(self.btnShowUnshowMeta, 0, wx.EXPAND | wx.ALL, 2)\n self.btnShowUnshowMeta.SetBackgroundColour(CON_COLOR_MAIN)\n self.btnShowUnshowMeta.SetForegroundColour(CON_COLOR_WHITE)\n\n self.metaScollPanel = scrolledpanel.ScrolledPanel(self.panel, -1, size=(730,444))\n self.metaScollPanel.SetupScrolling()\n metaScollPanelSizer = wx.BoxSizer(wx.VERTICAL)\n self.metaScollPanel.SetSizer(metaScollPanelSizer)\n self.panelSizer.Add(self.metaScollPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n # Meta的各种选项\n # 抽象类(abstract)\n self.metaAbstractOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaAbstractOptionPanel = wx.StaticBoxSizer(self.metaAbstractOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaAbstractOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaAbstractOption = wx.StaticText(self.metaScollPanel, -1, \"1、抽象类(abstract):\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaAbstractOption = wx.RadioBox(self.metaScollPanel, -1, \"\", choices=['是', '否'])\n self.readmeMetaAbstractOption = wx.StaticText(self.metaScollPanel, -1, \" ** 该模型声明为抽象模型后,不会在数据库中建表。\")\n self.metaAbstractOptionPanel.Add(self.labelMetaAbstractOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaAbstractOptionPanel.Add(self.metaAbstractOption, 0, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaAbstractOption, 0, wx.EXPAND | wx.ALL, 2)\n\n # 模型归属应用程序(app_label)\n # 可以用model._meta.label或model._meta.label_lower获取模型名称\n self.metaAppLabelOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaAppLabelOptionPanel = wx.StaticBoxSizer(self.metaAppLabelOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaAppLabelOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaAppLabelOption = wx.StaticText(self.metaScollPanel, -1, \"2、模型归属应用程序(app_label):\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaAppLabelOption = wx.Choice(self.metaScollPanel, -1, choices=[' ',]+get_configs(CONFIG_PATH)['app_names'])\n self.readmeMetaAppLabelOption = wx.StaticText(self.metaScollPanel, -1, \" ** 不指定,则默认归属于当前模型文件所在的应用程序。\")\n self.metaAppLabelOptionPanel.Add(self.labelMetaAppLabelOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaAppLabelOptionPanel.Add(self.metaAppLabelOption, 1, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaAppLabelOption, 0, wx.EXPAND | wx.ALL, 2)\n\n # 模型管理器名称(base_manager_name)\n self.metaObjectsOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaObjectsOptionPanel = wx.StaticBoxSizer(self.metaObjectsOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaObjectsOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaObjectsOption = wx.StaticText(self.metaScollPanel, -1, \"3、模型管理器名称(base_manager_name)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaObjectsOption = wx.TextCtrl(self.metaScollPanel, -1)\n self.readmeMetaObjectsOption = wx.StaticText(self.metaScollPanel, -1, \" ** 默认为objects。可用model.objects调出管理器。\")\n self.metaObjectsOptionPanel.Add(self.labelMetaObjectsOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaObjectsOptionPanel.Add(self.metaObjectsOption, 1, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaObjectsOption, 0, wx.EXPAND | wx.ALL, 2)\n\n # 数据表名(db_table)\n # 在mysql中均小写,Oracle中数据库表名要用双引号括起来\n self.metaDBTableOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaDBTableOptionPanel = wx.StaticBoxSizer(self.metaDBTableOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaDBTableOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaDBTableOption = wx.StaticText(self.metaScollPanel, -1, \"4、数据表名(db_table)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaDBTableOption = wx.TextCtrl(self.metaScollPanel, -1)\n self.readmeMetaDBTableOption = wx.StaticText(self.metaScollPanel, -1, \" ** 默认为应用程序名+模型名,全小写。如:app_model。\")\n self.metaDBTableOptionPanel.Add(self.labelMetaDBTableOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaDBTableOptionPanel.Add(self.metaDBTableOption, 1, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaDBTableOption, 0, wx.EXPAND | wx.ALL, 2)\n\n # 表空间名(db_tablespace)\n self.metaDBTableSpaceOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaDBTableSpaceOptionPanel = wx.StaticBoxSizer(self.metaDBTableSpaceOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaDBTableSpaceOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaDBTableSpaceOption = wx.StaticText(self.metaScollPanel, -1, \"5、表空间名(db_tablespace)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaDBTableSpaceOption = wx.TextCtrl(self.metaScollPanel, -1)\n self.readmeMetaDBTableSpaceOption = wx.StaticText(self.metaScollPanel, -1, \" ** 默认使用settings.py中的DEFAULT_TABLESPACE值。\")\n self.metaDBTableSpaceOptionPanel.Add(self.labelMetaDBTableSpaceOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaDBTableSpaceOptionPanel.Add(self.metaDBTableSpaceOption, 1, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaDBTableSpaceOption, 0, wx.EXPAND | wx.ALL, 2)\n\n # 指定默认解析管理器(default_manager_name)\n self.metaDefaultManagerNameOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaDefaultManagerNameOptionPanel = wx.StaticBoxSizer(self.metaDefaultManagerNameOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaDefaultManagerNameOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaDefaultManagerNameOption = wx.StaticText(self.metaScollPanel, -1, \"6、指定默认解析管理器(default_manager_name)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaDefaultManagerNameOption = wx.TextCtrl(self.metaScollPanel, -1)\n self.readmeMetaDefaultManagerNameOption = wx.StaticText(self.metaScollPanel, -1, \" ** 用于Django的默认行为,防止数据集缺失导致的错误。常用于一个模型多个解析器的情况。\")\n self.metaDefaultManagerNameOptionPanel.Add(self.labelMetaDefaultManagerNameOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaDefaultManagerNameOptionPanel.Add(self.metaDefaultManagerNameOption, 1, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaDefaultManagerNameOption, 0, wx.EXPAND | wx.ALL, 2)\n\n # 默认关联名称(default_related_name)\n self.metaDefaultRelatedNameOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaDefaultRelatedNameOptionPanel = wx.StaticBoxSizer(self.metaDefaultRelatedNameOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaDefaultRelatedNameOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaDefaultRelatedNameOption = wx.StaticText(self.metaScollPanel, -1, \"7、反向名称(default_related_name)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaDefaultRelatedNameOption = wx.TextCtrl(self.metaScollPanel, -1)\n self.readmeMetaDefaultRelatedNameOption = wx.StaticText(self.metaScollPanel, -1, \" ** 外键关联反向名称,默认_set。\")\n self.metaDefaultRelatedNameOptionPanel.Add(self.labelMetaDefaultRelatedNameOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaDefaultRelatedNameOptionPanel.Add(self.metaDefaultRelatedNameOption, 1, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaDefaultRelatedNameOption, 0, wx.EXPAND | wx.ALL, 2)\n\n # 取最新的一条记录(get_latest_by)\n # 配合latest()函数使用\n self.metaGetLatestByOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaGetLatestByOptionPanel = wx.StaticBoxSizer(self.metaGetLatestByOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaGetLatestByOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaGetLatestByOption = wx.StaticText(self.metaScollPanel, -1, \"8、取最新的一条记录(get_latest_by)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaGetLatestByOption = wx.TextCtrl(self.metaScollPanel, -1)\n self.readmeMetaGetLatestByOption = wx.StaticText(self.metaScollPanel, -1, \" ** 推荐指定日期字段,加前缀'-'表示倒序,可组合,用英文逗号隔开。配合latest()使用。\")\n self.metaGetLatestByOptionPanel.Add(self.labelMetaGetLatestByOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaGetLatestByOptionPanel.Add(self.metaGetLatestByOption, 1, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaGetLatestByOption, 0, wx.EXPAND | wx.ALL, 2)\n\n # 托管模型(managed)\n self.metaManagedOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaManagedOptionPanel = wx.StaticBoxSizer(self.metaManagedOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaManagedOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaManagedOption = wx.StaticText(self.metaScollPanel, -1, \"9、托管模型(managed)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaManagedOption = wx.RadioBox(self.metaScollPanel, -1, \"\", choices=['是', '否'])\n self.readmeMetaManagedOption = wx.StaticText(self.metaScollPanel, -1, \" ** 托管意味着由Django掌控模型的所有生命周期,这也是Django的默认行为。\")\n self.metaManagedOptionPanel.Add(self.labelMetaManagedOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaManagedOptionPanel.Add(self.metaManagedOption, 0, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaManagedOption, 0, wx.EXPAND | wx.ALL, 2)\n \n # 指定排序字段(ordering)\n # ordering = [F('author').asc(nulls_last=True)]\n self.metaOrderingOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaOrderingOptionPanel = wx.StaticBoxSizer(self.metaOrderingOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaOrderingOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaOrderingOption = wx.StaticText(self.metaScollPanel, -1, \"10、指定排序字段(ordering)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaOrderingOption = wx.TextCtrl(self.metaScollPanel, -1)\n self.readmeMetaOrderingOption = wx.StaticText(self.metaScollPanel, -1, \" ** 前缀'-'表示倒叙,可多字段组合,中间用英文逗号隔开。\")\n self.metaOrderingOptionPanel.Add(self.labelMetaOrderingOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaOrderingOptionPanel.Add(self.metaOrderingOption, 1, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaOrderingOption, 0, wx.EXPAND | wx.ALL, 2)\n\n # 默认权限(default_permissions)\n self.metaDefaultPermissionsOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaDefaultPermissionsOptionPanel = wx.StaticBoxSizer(self.metaDefaultPermissionsOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaDefaultPermissionsOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaDefaultPermissionsOption = wx.StaticText(self.metaScollPanel, -1, \"11、默认权限(default_permissions)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaDefaultPermissionsOption = wx.TextCtrl(self.metaScollPanel, -1)\n self.readmeMetaDefaultPermissionsOption = wx.StaticText(self.metaScollPanel, -1, \" ** 默认值('add', 'change', 'delete', 'view'),view为Django2.1版本后添加。\")\n self.metaDefaultPermissionsOptionPanel.Add(self.labelMetaDefaultPermissionsOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaDefaultPermissionsOptionPanel.Add(self.metaDefaultPermissionsOption, 1, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaDefaultPermissionsOption, 0, wx.EXPAND | wx.ALL, 2)\n\n # 额外权限(permissions)\n # (permission_code, human_readable_permission_name)\n self.metaPermissionsOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaPermissionsOptionPanel = wx.StaticBoxSizer(self.metaPermissionsOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaPermissionsOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaPermissionsOption = wx.StaticText(self.metaScollPanel, -1, \"12、额外权限(permissions)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaPermissionsOption = wx.TextCtrl(self.metaScollPanel, -1)\n self.readmeMetaPermissionsOption = wx.StaticText(self.metaScollPanel, -1, \" ** 默认添加增删改查权限,可新增权限,用二元组列表表示。如[('code', 'name'),]\")\n self.metaPermissionsOptionPanel.Add(self.labelMetaPermissionsOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaPermissionsOptionPanel.Add(self.metaPermissionsOption, 1, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaPermissionsOption, 0, wx.EXPAND | wx.ALL, 2)\n\n # 代理模型(proxy)\n self.metaProxyOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaProxyOptionPanel = wx.StaticBoxSizer(self.metaProxyOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaProxyOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaProxyOption = wx.StaticText(self.metaScollPanel, -1, \"13、代理模型(proxy)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaProxyOption = wx.RadioBox(self.metaScollPanel, -1, \"\", choices=['是', '否'])\n self.readmeMetaProxyOption = wx.StaticText(self.metaScollPanel, -1, \" ** 为原模型创建一个代理,用于扩展排序或管理器,与原模型共用一个表。\")\n self.metaProxyOptionPanel.Add(self.labelMetaProxyOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaProxyOptionPanel.Add(self.metaProxyOption, 0, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaProxyOption, 0, wx.EXPAND | wx.ALL, 2)\n \n # 保存旧算法(select_on_save)\n self.metaSelectOnSaveOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaSelectOnSaveOptionPanel = wx.StaticBoxSizer(self.metaSelectOnSaveOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaSelectOnSaveOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaSelectOnSaveOption = wx.StaticText(self.metaScollPanel, -1, \"14、保存旧算法(select_on_save)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaSelectOnSaveOption = wx.RadioBox(self.metaScollPanel, -1, \"\", choices=['是', '否'])\n self.readmeMetaSelectOnSaveOption = wx.StaticText(self.metaScollPanel, -1, \" ** 旧算法先查询后更新,新算法直接尝试更新。\")\n self.metaSelectOnSaveOptionPanel.Add(self.labelMetaSelectOnSaveOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaSelectOnSaveOptionPanel.Add(self.metaSelectOnSaveOption, 0, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaSelectOnSaveOption, 0, wx.EXPAND | wx.ALL, 2)\n\n # 指定后端数据库类型(required_db_vendor)\n self.metaRequiredDBVendorOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaRequiredDBVendorOptionPanel = wx.StaticBoxSizer(self.metaRequiredDBVendorOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaRequiredDBVendorOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaRequiredDBVendorOption = wx.StaticText(self.metaScollPanel, -1, \"15、指定后端数据库类型(required_db_vendor)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaRequiredDBVendorOption = wx.Choice(self.metaScollPanel, -1, choices=[' ',]+env.getDjangoSupportDatabase())\n self.readmeMetaRequiredDBVendorOption = wx.StaticText(self.metaScollPanel, -1, \" ** 不指定则默认支持所有。\")\n self.metaRequiredDBVendorOptionPanel.Add(self.labelMetaRequiredDBVendorOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaRequiredDBVendorOptionPanel.Add(self.metaRequiredDBVendorOption, 0, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaRequiredDBVendorOption, 0, wx.EXPAND | wx.ALL, 2)\n\n # 索引集合(indexes)\n self.metaIndexesOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaIndexesOptionPanel = wx.StaticBoxSizer(self.metaIndexesOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaIndexesOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaIndexesOption = wx.StaticText(self.metaScollPanel, -1, \"16、索引集合(indexes)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaIndexesOption = wx.TextCtrl(self.metaScollPanel, -1)\n self.readmeMetaIndexesOption = wx.StaticText(self.metaScollPanel, -1, \" ** 示例:[models.Index(fields=['first_name',], name='first_name_idx'),]\")\n self.metaIndexesOptionPanel.Add(self.labelMetaIndexesOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaIndexesOptionPanel.Add(self.metaIndexesOption, 1, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaIndexesOption, 0, wx.EXPAND | wx.ALL, 2)\n\n # 值唯一组合(unique_together)\n self.metaUniqueTogetherOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaUniqueTogetherOptionPanel = wx.StaticBoxSizer(self.metaUniqueTogetherOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaUniqueTogetherOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaUniqueTogetherOption = wx.StaticText(self.metaScollPanel, -1, \"17、值唯一组合(unique_together)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaUniqueTogetherOption = wx.TextCtrl(self.metaScollPanel, -1)\n self.readmeMetaUniqueTogetherOption = wx.StaticText(self.metaScollPanel, -1, \" ** 示例:[['driver', 'restaurant',],]。将来可能被弃用。\")\n self.metaUniqueTogetherOptionPanel.Add(self.labelMetaUniqueTogetherOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaUniqueTogetherOptionPanel.Add(self.metaUniqueTogetherOption, 1, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaUniqueTogetherOption, 0, wx.EXPAND | wx.ALL, 2)\n\n # 索引组合(index_together)\n self.metaIndexTogetherOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaIndexTogetherOptionPanel = wx.StaticBoxSizer(self.metaIndexTogetherOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaIndexTogetherOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaIndexTogetherOption = wx.StaticText(self.metaScollPanel, -1, \"18、索引组合(index_together)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaIndexTogetherOption = wx.TextCtrl(self.metaScollPanel, -1)\n self.readmeMetaIndexTogetherOption = wx.StaticText(self.metaScollPanel, -1, \" ** 示例:[['pub_date', 'deadline'],]。将来可能被弃用。\")\n self.metaIndexTogetherOptionPanel.Add(self.labelMetaIndexTogetherOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaIndexTogetherOptionPanel.Add(self.metaIndexTogetherOption, 1, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaIndexTogetherOption, 0, wx.EXPAND | wx.ALL, 2)\n\n # 约束条件(constraints)\n self.metaConstraintsOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaConstraintsOptionPanel = wx.StaticBoxSizer(self.metaConstraintsOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaConstraintsOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaConstraintsOption = wx.StaticText(self.metaScollPanel, -1, \"19、约束条件(constraints)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaConstraintsOption = wx.TextCtrl(self.metaScollPanel, -1)\n self.readmeMetaConstraintsOption = wx.StaticText(self.metaScollPanel, -1, \" ** 示例:[models.CheckConstraint(check=models.Q(age__gte=18), name='age_gte_18'),]。\")\n self.metaConstraintsOptionPanel.Add(self.labelMetaConstraintsOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaConstraintsOptionPanel.Add(self.metaConstraintsOption, 1, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaConstraintsOption, 0, wx.EXPAND | wx.ALL, 2)\n\n # 模型可读单数名称(verbose_name)\n self.metaVerboseNameOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaVerboseNameOptionPanel = wx.StaticBoxSizer(self.metaVerboseNameOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaVerboseNameOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaVerboseNameOption = wx.StaticText(self.metaScollPanel, -1, \"20、模型可读单数名称(verbose_name)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaVerboseNameOption = wx.TextCtrl(self.metaScollPanel, -1)\n self.readmeMetaVerboseNameOption = wx.StaticText(self.metaScollPanel, -1, \" ** 用于后台展示模型的可读名称。\")\n self.metaVerboseNameOptionPanel.Add(self.labelMetaVerboseNameOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaVerboseNameOptionPanel.Add(self.metaVerboseNameOption, 1, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaVerboseNameOption, 0, wx.EXPAND | wx.ALL, 2)\n\n # 模型可读复数名称(verbose_name_plural)\n self.metaVerboseNamePluralOptionStaticBox = wx.StaticBox(self.metaScollPanel, -1, '')\n self.metaVerboseNamePluralOptionPanel = wx.StaticBoxSizer(self.metaVerboseNamePluralOptionStaticBox, wx.HORIZONTAL)\n metaScollPanelSizer.Add(self.metaVerboseNamePluralOptionPanel, 0, wx.EXPAND | wx.ALL, 2)\n\n self.labelMetaVerboseNamePluralOption = wx.StaticText(self.metaScollPanel, -1, \"21、模型可读复数名称(verbose_name_plural)\", size=(STATIC_TEXT_WIDTH, -1), style=wx.ALIGN_CENTRE_HORIZONTAL)\n self.metaVerboseNamePluralOption = wx.TextCtrl(self.metaScollPanel, -1)\n self.readmeMetaVerboseNamePluralOption = wx.StaticText(self.metaScollPanel, -1, \" ** 默认是verbose_name+s。\")\n self.metaVerboseNamePluralOptionPanel.Add(self.labelMetaVerboseNamePluralOption, 0, wx.EXPAND | wx.ALL, 2)\n self.metaVerboseNamePluralOptionPanel.Add(self.metaVerboseNamePluralOption, 1, wx.EXPAND | wx.ALL, 2)\n metaScollPanelSizer.Add(self.readmeMetaVerboseNamePluralOption, 0, wx.EXPAND | wx.ALL, 2)\n\n # order_with_respect_to暂不放出\n\n # 标签显示优化\n self.readmeStaticTexts.extend([\n self.readmeMetaAbstractOption,\n self.readmeMetaAppLabelOption,\n self.readmeMetaObjectsOption,\n self.readmeMetaDBTableOption,\n self.readmeMetaDBTableSpaceOption,\n self.readmeMetaDefaultManagerNameOption,\n self.readmeMetaDefaultRelatedNameOption,\n self.readmeMetaGetLatestByOption,\n self.readmeMetaManagedOption,\n self.readmeMetaOrderingOption,\n self.readmeMetaPermissionsOption,\n self.readmeMetaDefaultPermissionsOption,\n self.readmeMetaProxyOption,\n self.readmeMetaSelectOnSaveOption,\n self.readmeMetaRequiredDBVendorOption,\n self.readmeMetaIndexesOption,\n self.readmeMetaUniqueTogetherOption,\n self.readmeMetaIndexTogetherOption,\n self.readmeMetaConstraintsOption,\n self.readmeMetaVerboseNameOption,\n self.readmeMetaVerboseNamePluralOption,\n ])\n self.labelStaticTexts.extend([\n self.labelMetaAbstractOption,\n self.labelMetaAppLabelOption,\n self.labelMetaObjectsOption,\n self.labelMetaDBTableOption,\n self.labelMetaDBTableSpaceOption,\n self.labelMetaDefaultManagerNameOption,\n self.labelMetaDefaultRelatedNameOption,\n self.labelMetaGetLatestByOption,\n self.labelMetaManagedOption,\n self.labelMetaOrderingOption,\n self.labelMetaPermissionsOption,\n self.labelMetaDefaultPermissionsOption,\n self.labelMetaProxyOption,\n self.labelMetaSelectOnSaveOption,\n self.labelMetaRequiredDBVendorOption,\n self.labelMetaIndexesOption,\n self.labelMetaUniqueTogetherOption,\n self.labelMetaIndexTogetherOption,\n self.labelMetaConstraintsOption,\n self.labelMetaVerboseNameOption,\n self.labelMetaVerboseNamePluralOption,\n ])\n\n # 按钮事件\n self.Bind(wx.EVT_BUTTON, self.onBtnShowUnshowMeta, self.btnShowUnshowMeta)\n # 单选框事件\n self.Bind(wx.EVT_RADIOBOX, self.onMetaRadioChanged, self.metaAbstractOption)\n\n self.metaScollPanel.Show(False) # 默认不显示\n self._init_meta_data()\n \n def _init_meta_data(self):\n \"\"\"初始化Meta选项数据\"\"\"\n self.metaAbstractOption.SetSelection(1)\n self.metaAppLabelOption.SetSelection(0)\n self.metaObjectsOption.SetValue('objects')\n self.metaDBTableOption.SetValue('')\n self.metaDBTableSpaceOption.SetValue('')\n self.metaDefaultManagerNameOption.SetValue('')\n self.metaDefaultRelatedNameOption.SetValue('')\n self.metaGetLatestByOption.SetValue('')\n self.metaManagedOption.SetSelection(0)\n self.metaOrderingOption.SetValue('')\n self.metaDefaultPermissionsOption.SetValue(\"('add', 'change', 'delete', 'view')\")\n self.metaPermissionsOption.SetValue('')\n self.metaProxyOption.SetSelection(1)\n self.metaSelectOnSaveOption.SetSelection(1)\n self.metaRequiredDBVendorOption.SetSelection(0)\n self.metaIndexesOption.SetValue('')\n self.metaUniqueTogetherOption.SetValue('')\n self.metaIndexTogetherOption.SetValue('')\n self.metaConstraintsOption.SetValue('')\n self.metaVerboseNameOption.SetValue('')\n self.metaVerboseNamePluralOption.SetValue('')\n \n def onMetaRadioChanged(self, e):\n \"\"\"单选框值更新事件\"\"\"\n fid = e.GetId() # 控件id\n\n status_abstract = self.metaAbstractOption.GetSelection()\n\n if fid == self.metaAbstractOption.GetId():\n if 0 == status_abstract:\n RichMsgDialog.showOkMsgDialog(self, '抽象模型不会在数据库中建表,并且表级的一些参数设置将对子类无效。', '警告')\n\n def onBtnShowUnshowMeta(self, e):\n \"\"\"显示和隐藏Meta按钮,用于空间的合理布局\"\"\"\n if '【显示】Meta元数据(表级参数设置)' == self.btnShowUnshowMeta.Label:\n self.metaScollPanel.Show(True)\n self.btnShowUnshowMeta.SetLabel('【隐藏】Meta元数据(表级参数设置)')\n self.panel.Layout() # 重新计算布局\n else:\n self.metaScollPanel.Show(False)\n self.btnShowUnshowMeta.SetLabel('【显示】Meta元数据(表级参数设置)')\n self.panel.Layout()\n\n def _generate_create_code(self, mode: str='A'):\n \"\"\"生成创建模型代码\"\"\"\n # A: 预览模式\n # B: 写入模式\n pre_fields = self._get_fields_attrs() # 字段详细定义列表\n meta_attrs = self._get_meta_attrs() # Meta参数\n \n if len(pre_fields) > 0:\n fields_code = '\\n'.join([f' {_}' for _ in pre_fields])\n else:\n fields_code = ' pass'\n\n # Meta元数据定义\n if len(meta_attrs) > 0:\n meta_code = '\\n'.join([f' {_}' for _ in meta_attrs])\n else:\n meta_code = ' pass'\n\n # __str__()返回值\n str_msg = \" # return ''\"\n\n # 如果没有设置主键,则自动增加主键【预览界面有效,实际代码无此行】\n if len([_ for _ in self.allRows if CON_YES==_['primary_key']]) <= 0: # 用户无主动设置主键\n if 'A' == mode:\n auto_primary = ' id = models.AutoField(primary_key=True)'\n else:\n auto_primary = ''\n else:\n auto_primary = ''\n \n return f\"\"\"\\\nclass (models.Model):\n{auto_primary}\n{fields_code}\n\n class Meta:\n{meta_code}\n\n def __str__(self):\n return '如:self.name'\n\n\"\"\"\n\n def onBtnPreview(self, e):\n \"\"\"预览待插入代码\"\"\"\n model_code = self._generate_create_code()\n RichMsgDialog.showScrolledMsgDialog(self, model_code, \"代码预览\")\n\n def _get_fields_attrs(self):\n \"\"\"获取字段参数输出字符串\"\"\"\n pre_fields = []\n\n for _ in self.allRows:\n\n # 若和默认值一致,则不显式显示参数\n args = []\n field_name = _['field_name']\n field_type = _['field_type']\n # 位置参数\n if field_type not in CON_FOREIGN_FIELDS and _['remarker'] != _['field_name'].replace('_', ' '): # 默认下划线默认换成空格)\n t = _['remarker']\n args.append(f\"'{t}'\")\n\n if field_type in CON_FOREIGN_FIELDS and '' != _[\"relate_model\"]:\n t = _['relate_model']\n args.append(f\"{t}\")\n\n # 关键字参数\n if _['field_name'] != _['db_column']: # 默认一致,不一致则新增\n t = _['db_column']\n args.append(f\"db_column='{t}'\")\n \n if CON_YES == _['primary_key']:\n args.append(f\"primary_key=True\")\n \n if CON_YES == _['blank']:\n args.append(f\"blank=True\")\n \n if CON_YES == _['null']:\n args.append(f\"null=True\")\n\n if CON_YES == _['unique']:\n args.append(f\"unique=True\")\n\n if CON_YES == _['db_index'] and field_type not in CON_FOREIGN_FIELDS:\n args.append(f\"db_index=True\")\n \n if CON_NO == _['db_index'] and field_type in CON_FOREIGN_FIELDS:\n args.append(f\"db_index=False\")\n\n if CON_YES == _['auto_now']:\n args.append(f\"auto_now=True\")\n\n if CON_YES == _['auto_now_add']:\n args.append(f\"auto_now_add=True\")\n\n if CON_NO == _['editable']:\n args.append(f\"editable=False\")\n\n if '' != _['default']:\n t = _['default']\n args.append(f\"default={t}\")\n\n if '' != _['unique_for_date']:\n t = _['unique_for_date']\n args.append(f\"unique_for_date='{t}'\")\n\n if '' != _['unique_for_month']:\n t = _['unique_for_month']\n args.append(f\"unique_for_month='{t}'\")\n \n if '' != _['unique_for_year']:\n t = _['unique_for_year']\n args.append(f\"unique_for_year='{t}'\")\n \n if '' != _['error_messages']:\n t = _['error_messages']\n args.append(f\"error_messages='{t}'\") \n\n if '' != _['help_text']:\n t = _['help_text']\n args.append(f\"help_text='{t}'\") \n\n if '' != _['max_length']:\n t = _['max_length']\n args.append(f\"max_length={t}\")\n\n if 'DecimalField' == field_type:\n if '' != _['max_digits']:\n t = _['max_digits']\n args.append(f\"max_digits={t}\")\n\n if '' != _['decimal_places']:\n t = _['decimal_places']\n args.append(f\"decimal_places={t}\")\n\n if '' != _['upload_to']:\n t = _['upload_to']\n args.append(f\"upload_to={t}\")\n\n # 关联字段专属\n if field_type in CON_FOREIGN_FIELDS:\n if '' != _[\"on_delete\"] and 'ManyToManyField' != field_type:\n t = _['on_delete']\n args.append(f\"on_delete={t}\")\n if '' != _[\"verbose_name\"]:\n t = _['verbose_name']\n args.append(f\"verbose_name='{t}'\")\n if '' != _[\"limit_choices_to\"]:\n t = _['limit_choices_to']\n args.append(f\"limit_choices_to={t}\")\n if '' != _[\"related_name\"]:\n t = _['related_name']\n args.append(f\"related_name='{t}'\")\n if '' != _[\"related_query_name\"]:\n t = _['related_query_name']\n args.append(f\"related_query_name='{t}'\")\n if '' != _[\"to_field\"]:\n t = _['to_field']\n args.append(f\"to_field='{t}'\")\n if CON_NO == _['db_constraint']:\n args.append(f\"db_constraint=False\")\n if '' != _['db_table']:\n t = _['db_table']\n args.append(f\"db_table='{t}'\")\n\n pre_fields.append(f\"{field_name} = models.{field_type}({', '.join(args)})\")\n return pre_fields\n \n def _get_meta_attrs(self):\n \"\"\"获取Meta参数输出字符串\"\"\"\n meta_str = []\n if 0 == self.metaAbstractOption.GetSelection():\n meta_str.append(\"abstract = True\")\n\n app_label = self.metaAppLabelOption.GetString(self.metaAppLabelOption.GetSelection()).strip()\n if app_label:\n meta_str.append(f\"app_label = '{app_label}'\")\n\n base_manager_name = self.metaObjectsOption.GetValue().strip()\n if base_manager_name and 'objects' != base_manager_name:\n meta_str.append(f\"base_manager_name = '{base_manager_name}'\")\n\n db_table = self.metaDBTableOption.GetValue().strip()\n if db_table:\n meta_str.append(f\"db_table = '{db_table}'\")\n\n db_tablespace = self.metaDBTableSpaceOption.GetValue().strip()\n if db_tablespace:\n meta_str.append(f\"db_tablespace = '{db_tablespace}'\")\n\n default_manager_name = self.metaDefaultManagerNameOption.GetValue().strip()\n if default_manager_name:\n meta_str.append(f\"default_manager_name = '{default_manager_name}'\")\n\n default_related_name = self.metaDefaultRelatedNameOption.GetValue().strip()\n if default_related_name:\n meta_str.append(f\"default_related_name = '{default_related_name}'\")\n\n get_latest_by = self.metaGetLatestByOption.GetValue().strip()\n if get_latest_by:\n temp = \", \".join([f\"'{_}'\" for _ in get_latest_by.split(',') if _])\n meta_str.append(f\"get_latest_by = [{temp}]\")\n\n if 1 == self.metaManagedOption.GetSelection():\n meta_str.append(\"managed = False\")\n\n ordering = self.metaOrderingOption.GetValue().strip()\n if ordering:\n temp = \", \".join([f\"'{_}'\" for _ in ordering.split(',') if _])\n meta_str.append(f\"ordering = [{temp}]\")\n\n default_permissions = self.metaDefaultPermissionsOption.GetValue().strip()\n if default_permissions and \"('add', 'change', 'delete', 'view')\" != default_permissions:\n meta_str.append(f\"default_permissions = {default_permissions}\")\n\n permissions = self.metaPermissionsOption.GetValue().strip()\n if permissions:\n meta_str.append(f\"permissions = {permissions}\")\n\n if 0 == self.metaProxyOption.GetSelection():\n meta_str.append(\"proxy = True\")\n\n if 0 == self.metaSelectOnSaveOption.GetSelection():\n meta_str.append(\"select_on_save = True\")\n\n required_db_vendor = self.metaRequiredDBVendorOption.GetString(self.metaRequiredDBVendorOption.GetSelection()).strip()\n if required_db_vendor:\n meta_str.append(f\"required_db_vendor = '{required_db_vendor}'\")\n\n indexes = self.metaIndexesOption.GetValue().strip()\n if indexes:\n meta_str.append(f\"indexes = {indexes}\")\n\n unique_together = self.metaUniqueTogetherOption.GetValue().strip()\n if unique_together:\n meta_str.append(f\"unique_together = {unique_together}\")\n\n index_together = self.metaIndexTogetherOption.GetValue().strip()\n if index_together:\n meta_str.append(f\"index_together = {index_together}\")\n\n constraints = self.metaConstraintsOption.GetValue().strip()\n if constraints:\n meta_str.append(f\"constraints = {constraints}\")\n\n verbose_name = self.metaVerboseNameOption.GetValue().strip()\n if verbose_name:\n meta_str.append(f\"verbose_name = '{verbose_name}'\")\n \n verbose_name_plural = self.metaVerboseNamePluralOption.GetValue().strip()\n if verbose_name_plural:\n meta_str.append(f\"verbose_name_plural = '{verbose_name_plural}'\")\n \n return meta_str\n\n def _show_special_args(self):\n \"\"\"显示特殊参数\"\"\"\n for _ in self.specialArgs:\n _.Show(True)\n\n def _unshow_special_args(self):\n \"\"\"隐藏特殊参数\"\"\"\n for _ in self.specialArgs:\n _.Show(False)\n\n def onRadioChanged(self, e):\n \"\"\"单选框值更新事件\"\"\"\n fid = e.GetId() # 控件id\n\n field_type = con_getFieldTypeName(self.choiceFieldType.GetString(self.choiceFieldType.GetSelection()).strip()) # 当前字段类型\n\n status_null = self.radiosFiledNull.GetSelection()\n status_blank = self.radiosFiledBlank.GetSelection()\n status_unique = self.radiosFiledUnique.GetSelection()\n status_primary_key = self.radiosFiledPrimary.GetSelection()\n status_editable = self.radiosFiledEditable.GetSelection()\n status_autonow = self.radiosAutoNow.GetSelection()\n status_autonowadd = self.radiosAutoNowAdd.GetSelection()\n\n if fid == self.radiosFiledPrimary.GetId():\n # 同时只能有一个显式主键存在\n if len([_ for _ in self.allRows if CON_YES==_['primary_key']]) > 0:\n self.radiosFiledPrimary.SetSelection(1)\n RichMsgDialog.showOkMsgDialog(self, '一个模型只能拥有一个显式主键,若想对此字段设置主键,请使用隐式方式:null=False且unique=True。', '警告')\n return\n\n # 自动赋值默认值None\n if 0 == status_primary_key: # 主键\n self.inputDefaultValue.SetValue('None')\n self.inputDefaultValue.Enable(False)\n # 自动锁定null blank unique db_index\n self.radiosFiledNull.Enable(False)\n self.radiosFiledBlank.Enable(False)\n self.radiosFiledUnique.Enable(False)\n self.radiosFiledDbIndex.Enable(False)\n # 初始状态\n self.radiosFiledBlank.SetSelection(1) # 不允许为空\n self.radiosFiledNull.SetSelection(1) # 字段为空不赋值NULL\n self.radiosFiledUnique.SetSelection(1) # 值不唯一\n self.radiosFiledDbIndex.SetSelection(1) # 不创建索引\n else: # 反向操作,状态复原\n self.inputDefaultValue.SetValue('')\n self.inputDefaultValue.Enable(True)\n self.radiosFiledNull.Enable(True)\n self.radiosFiledBlank.Enable(True)\n self.radiosFiledUnique.Enable(True)\n self.radiosFiledDbIndex.Enable(True)\n\n elif fid == self.radiosFiledNull.GetId():\n # 避免在CharField之类的字段中使用 null=True 【用户选中时给予提示】\n # 当 CharField 同时具有 unique=True 和 blank=True 时。 在这种情况下,需要设置 null=True\n if field_type in CON_CHAR_FIELDS and 0 == status_null:\n RichMsgDialog.showOkMsgDialog(self, '字符类型的字段设置null=True会出现两种可能的值,如非必要,请勿选择。', '警告')\n \n if 'BooleanField' == field_type and 0 == status_null:\n RichMsgDialog.showOkMsgDialog(self, 'BooleanField字段在2.1版本之前不支持设置null=True,新版本可以。不建议使用NullBooleanField。', '警告')\n\n elif fid == self.radiosFiledBlank.GetId():\n if field_type in CON_CHAR_FIELDS and 0 == status_unique and 0 == status_blank:\n self.radiosFiledNull.SetSelection(0)\n self.radiosFiledNull.Enable(False) # 同时锁定无法修改\n RichMsgDialog.showOkMsgDialog(self, '字符类型的字段同时设置unique=True和blank=True时,必须设置null=True。', '警告')\n if 0 != status_blank:\n self.radiosFiledNull.Enable(True) # 不是同时选中的状态,解锁null字段\n\n elif fid == self.radiosFiledUnique.GetId():\n if field_type in CON_CHAR_FIELDS and 0 == status_unique and 0 == status_blank:\n self.radiosFiledNull.SetSelection(0)\n self.radiosFiledNull.Enable(False) # 同时锁定无法修改\n RichMsgDialog.showOkMsgDialog(self, '字符类型的字段同时设置unique=True和blank=True时,必须设置null=True。', '警告')\n if 0 != status_unique:\n self.radiosFiledNull.Enable(True) # 不是同时选中的状态,解锁null字段\n\n elif fid == self.radiosFiledEditable.GetId():\n # BinaryField字段在2.1版本之前不支持editable=True\n if 'BinaryField' == field_type and 0 == status_editable:\n RichMsgDialog.showOkMsgDialog(self, 'Django2.1版本之前(不包括2.1),不支持设置editable=True。', '警告')\n \n elif fid == self.radiosAutoNow.GetId():\n if 0 == status_autonow:\n self.radiosAutoNowAdd.SetSelection(1)\n self.inputDefaultValue.SetValue('')\n self.inputDefaultValue.Enable(False)\n # 当设置auto_now_add=True或auto_now=True时,默认同时设置editable=False和blank=True\n self.radiosFiledEditable.SetSelection(1)\n self.radiosFiledBlank.SetSelection(0)\n self.radiosFiledEditable.Enable(False)\n self.radiosFiledBlank.Enable(False)\n\n else:\n if 1 == status_autonowadd:\n self.inputDefaultValue.SetValue('date.today')\n # 反向操作\n self.inputDefaultValue.Enable(True)\n self.radiosFiledEditable.SetSelection(0)\n self.radiosFiledBlank.SetSelection(1)\n self.radiosFiledEditable.Enable(True)\n self.radiosFiledBlank.Enable(True)\n\n elif fid == self.radiosAutoNowAdd.GetId():\n if 0 == status_autonowadd:\n self.radiosAutoNow.SetSelection(1)\n self.inputDefaultValue.SetValue('')\n self.inputDefaultValue.Enable(False)\n # 当设置auto_now_add=True或auto_now=True时,默认同时设置editable=False和blank=True\n self.radiosFiledEditable.SetSelection(1)\n self.radiosFiledBlank.SetSelection(0)\n self.radiosFiledEditable.Enable(False)\n self.radiosFiledBlank.Enable(False)\n else:\n if 1 == status_autonow:\n self.inputDefaultValue.SetValue('date.today')\n self.inputDefaultValue.Enable(True)\n self.radiosFiledEditable.SetSelection(0)\n self.radiosFiledBlank.SetSelection(1)\n self.radiosFiledEditable.Enable(True)\n self.radiosFiledBlank.Enable(True)\n\n def onInputFieldModelName(self, e):\n \"\"\"模型字段名设置时自动触发值更新\"\"\"\n field_name = self.inputFieldModelName.GetValue().strip()\n # 每次取最新的一次输入字符\n if retools.PATT_CHARS.match(field_name):\n self.inputFieldDatabaseName.SetValue(field_name)\n self.inputFieldRemarkName.SetValue(field_name.replace('_', ' '))\n else:\n self.inputFieldModelName.SetValue(retools.PATT_CHARS_REVERSED.sub('', field_name))\n self.inputFieldModelName.SetInsertionPointEnd() # 光标定位到最后\n\n def onInputMaxLength(self, e):\n \"\"\"长度上限属性填写时自动触发值更新\"\"\"\n v = str(self.inputMaxLength.GetValue().strip())\n if '0' == v:\n self.inputMaxLength.SetValue('')\n return\n if v and isinstance(v, str): # 此处条件分支解决递归错误问题\n if not retools.PATT_DIGITS_WHOLE.match(v):\n self.inputMaxLength.SetValue(retools.PATT_DIGITS_REVERSED.sub('', v))\n self.inputMaxLength.SetInsertionPointEnd()\n\n def onInputMaxDigits(self, e):\n \"\"\"实数总位数自动触发值更新\"\"\"\n v = str(self.inputMaxDigits.GetValue().strip())\n if '0' == v:\n self.inputMaxDigits.SetValue('')\n return\n if v and isinstance(v, str):\n if not retools.PATT_DIGITS_WHOLE.match(v):\n self.inputMaxDigits.SetValue(retools.PATT_DIGITS_REVERSED.sub('', v))\n self.inputMaxDigits.SetInsertionPointEnd()\n\n def onInputRelatedName(self, e):\n \"\"\"反向名称->反向过滤器名称\"\"\"\n v = str(self.inputRelatedName.GetValue().strip())\n self.inputRelatedQueryName.SetValue(v)\n\n def onInputDecimalPlaces(self, e):\n \"\"\"小数总位数自动触发值更新\"\"\"\n v = str(self.inputDecimalPlaces.GetValue().strip())\n if '0' == v:\n self.inputDecimalPlaces.SetValue('')\n return\n if v and isinstance(v, str):\n if not retools.PATT_DIGITS_WHOLE.match(v):\n self.inputDecimalPlaces.SetValue(retools.PATT_DIGITS_REVERSED.sub('', v))\n self.inputDecimalPlaces.SetInsertionPointEnd()\n\n def _disable_all_args(self):\n \"\"\"关闭所有的参数填写入口\"\"\"\n for _ in self.allArgs:\n _.Enable(False)\n\n def _init_all_args_value(self):\n \"\"\"初始化参数默认值\"\"\"\n self.radiosFiledBlank.SetSelection(1) # 不允许为空\n self.radiosFiledNull.SetSelection(1) # 字段为空不赋值NULL\n self.radiosFiledPrimary.SetSelection(1) # 不是主键\n self.radiosFiledUnique.SetSelection(1) # 值不唯一\n self.radiosFiledDbIndex.SetSelection(1) # 不创建索引\n self.radiosFiledEditable.SetSelection(0) # 菜单默认可编辑\n self.choicesFiledUniqueForDate.SetSelection(0) # 无组合唯一\n self.choicesFiledUniqueForMonth.SetSelection(0) # 无组合唯一\n self.choicesFiledUniqueForYear.SetSelection(0) # 无组合唯一\n self.radiosAutoNow.SetSelection(1)\n self.radiosAutoNowAdd.SetSelection(1)\n # self.choiceSelectModel.SetSelection(0)\n self.choiceSelectDelRule.SetSelection(1)\n self.radiosDBConstraint.SetSelection(0)\n\n def _init_input_args(self):\n \"\"\"初始化输入框\"\"\"\n self.choiceFieldType.SetSelection(0)\n self.inputFieldModelName.SetValue('')\n self.inputFieldRemarkName.SetValue('')\n self.inputFieldDatabaseName.SetValue('')\n self.inputDefaultValue.SetValue('')\n self.inputFormHelpText.SetValue('')\n self.inputFormErrorMessage.SetValue('')\n self.inputMaxLength.SetValue('')\n self.inputMaxDigits.SetValue('')\n self.inputDecimalPlaces.SetValue('')\n self.inputUploadTo.SetValue('')\n self.inputRelationRemark.SetValue('')\n self.choiceSelectModel.SetValue('') # 后期类型改动(暂不更改)\n self.inputRelationRemark.SetValue('')\n self.inputLimitChoicesTo.SetValue('')\n self.inputRelatedName.SetValue('')\n self.inputRelatedQueryName.SetValue('')\n self.inputToField.SetValue('')\n\n def _disable_all_afterBtns(self):\n \"\"\"关闭所有的后触发按钮\"\"\"\n for _ in self.afterBtns:\n _.Enable(False)\n\n def _init_table(self):\n \"\"\"初始化表格控件\"\"\"\n\n # 显示和隐藏按钮,用于空间的合理布局\n self.btnShowUnshowTable = buttons.GenButton(self.panel, -1, '【显示】待新增字段表格数据')\n self.panelSizer.Add(self.btnShowUnshowTable, 0, wx.EXPAND | wx.ALL, 2)\n self.btnShowUnshowTable.SetBackgroundColour(CON_COLOR_MAIN)\n self.btnShowUnshowTable.SetForegroundColour(CON_COLOR_WHITE)\n\n # 表格\n self.tableObjPanel = wx.Panel(self.panel, size=(730, 222))\n tableObjPanelSizer = wx.BoxSizer(wx.VERTICAL)\n self.tableObjPanel.SetSizer(tableObjPanelSizer)\n self.panelSizer.Add(self.tableObjPanel, 0, wx.EXPAND | wx.ALL, 2)\n self.tableObjPanel.SetBackgroundColour('#000000')\n\n # 表头\n self.gridToolsPanel = wx.Panel(self.tableObjPanel)\n gridToolsPanelSizer = wx.BoxSizer(wx.HORIZONTAL)\n self.gridToolsPanel.SetSizer(gridToolsPanelSizer)\n tableObjPanelSizer.Add(self.gridToolsPanel, 0, wx.EXPAND | wx.ALL, 2)\n \n self.gridBtnDelete = buttons.GenButton(self.gridToolsPanel, -1, '删除选中行')\n self.gridBtnOther = buttons.GenButton(self.gridToolsPanel, -1, ' ')\n self.gridBtnOther.Enable(False)\n gridToolsPanelSizer.Add(self.gridBtnDelete, 0, wx.EXPAND | wx.ALL, 2)\n gridToolsPanelSizer.Add(self.gridBtnOther, 1, wx.EXPAND | wx.ALL, 2)\n\n # 表体\n self.infoGrid = wx.grid.Grid( self.tableObjPanel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )\n\n self.infoGrid.CreateGrid( 0, len(CON_MODELSCREATEDIALOG_COLS) ) # row col\n self.infoGrid.EnableEditing( False )\n self.infoGrid.EnableGridLines( True )\n self.infoGrid.EnableDragGridSize( True )\n self.infoGrid.SetMargins( 0, 0 )\n\n self.infoGrid.EnableDragColMove( False )\n self.infoGrid.EnableDragColSize( True )\n self.infoGrid.SetColLabelSize( 30 )\n self.infoGrid.SetColLabelAlignment( wx.ALIGN_CENTER, wx.ALIGN_CENTER )\n\n self.infoGrid.EnableDragRowSize( True )\n self.infoGrid.SetRowLabelSize( 70 )\n self.infoGrid.SetRowLabelAlignment( wx.ALIGN_CENTER, wx.ALIGN_CENTER )\n\n self.infoGrid.SetDefaultCellAlignment( wx.ALIGN_LEFT, wx.ALIGN_TOP )\n tableObjPanelSizer.Add( self.infoGrid, 1, wx.EXPAND | wx.ALL, 2 ) # 表格默认加最后\n\n self._init_header()\n\n # 事件\n self.Bind(wx.EVT_BUTTON, self.onGridBtnDelete, self.gridBtnDelete)\n self.Bind(wx.EVT_BUTTON, self.onBtnShowUnshowTable, self.btnShowUnshowTable)\n\n self.tableObjPanel.Show(False) # 默认隐藏\n\n def onBtnShowUnshowTable(self, e):\n \"\"\"显示和隐藏按钮,用于空间的合理布局\"\"\"\n if '【显示】待新增字段表格数据' == self.btnShowUnshowTable.Label:\n self.tableObjPanel.Show(True)\n self.btnShowUnshowTable.SetLabel('【隐藏】待新增字段表格数据')\n self.panel.Layout() # 重新计算布局\n else:\n self.tableObjPanel.Show(False)\n self.btnShowUnshowTable.SetLabel('【显示】待新增字段表格数据')\n self.panel.Layout()\n\n def onGridBtnDelete(self, e):\n \"\"\"删除行\"\"\"\n row_indexs = self.infoGrid.GetSelectedRows()\n t = '、'.join([str(_+1) for _ in row_indexs])\n if len(row_indexs) > 0:\n dlg_tip = wx.MessageDialog(self, f\"确认删除第{t}行?一旦删除不可恢复。\", CON_TIPS_COMMON, wx.CANCEL | wx.OK)\n if dlg_tip.ShowModal() == wx.ID_OK:\n result = self.removeRows(row_indexs)\n if not result:\n RichMsgDialog.showOkMsgDialog(self, '删除成功!', '提示')\n else:\n if isinstance(result, list):\n RichMsgDialog.showOkMsgDialog(self, f\"{'、'.join(result)}删除失败!\", '提示')\n else:\n RichMsgDialog.showOkMsgDialog(self, '未知错误,删除失败。', '提示')\n dlg_tip.Close(True)\n else:\n RichMsgDialog.showOkMsgDialog(self, '无选择行可删除。', '警告')\n\n def _init_header(self):\n \"\"\"初始化列名\"\"\"\n for i,v in enumerate(CON_MODELSCREATEDIALOG_COLS):\n self.infoGrid.SetColLabelValue(i, v)\n\n def onChoiceSelectDelRule(self, e):\n \"\"\"on_delete选项监听\"\"\"\n delete_type = e.GetString().strip()\n\n if 'models.SET_NULL' == delete_type:\n self.radiosFiledBlank.SetSelection(0)\n self.radiosFiledNull.SetSelection(0)\n self.radiosFiledBlank.Enable(False)\n self.radiosFiledNull.Enable(False)\n else:\n self.radiosFiledBlank.SetSelection(1)\n self.radiosFiledNull.SetSelection(1)\n self.radiosFiledBlank.Enable(True)\n self.radiosFiledNull.Enable(True)\n\n def onChoiceFieldType(self, e):\n \"\"\"选择要新建的字段类型\"\"\"\n field_type = e.GetString().strip(string.whitespace+'-')\n\n if not field_type:\n return\n\n # try:\n # if self.record != field_type: # 值未更新\n # # 每次更新时均初始化状态\n # self._init_all_args_value()\n # self._init_input_args()\n # except: ...\n # self.record = field_type # 记录上一次的状态\n\n self._open_required_args() # 共用参���开启\n self._unshow_special_args() # 先隐藏所有的特殊参数,后按需开启\n\n if CON_BINARYFIELD == field_type:\n self.selectBinaryField()\n elif CON_SMALLINTEGERFIELD == field_type:\n self.selectSmallIntegerField()\n elif CON_POSITIVESMALLINTEGERFIELD == field_type:\n self.selectPositiveSmallIntegerField()\n elif CON_INTEGERFIELD == field_type:\n self.selectIntegerField()\n elif CON_POSITIVEINTEGERFIELD == field_type:\n self.selectPositiveIntegerField()\n elif CON_BIGINTEGERFIELD == field_type:\n self.selectBigIntegerField()\n elif CON_AUTOFIELD == field_type:\n self.selectAutoField()\n elif CON_BIGAUTOFIELD == field_type:\n self.selectBigAutoField()\n elif CON_FLOATFIELD == field_type:\n self.selectFloatField()\n elif CON_DECIMALFIELD == field_type:\n self.selectDecimalField()\n elif CON_BOOLEANFIELD == field_type:\n self.selectBooleanField()\n elif CON_CHARFIELD == field_type:\n self.selectCharField()\n elif CON_TEXTFIELD == field_type:\n self.selectTextField()\n elif CON_EMAILFIELD == field_type:\n self.selectEmailField()\n elif CON_IPADRESSFIELD == field_type:\n self.selectGenericIPAddressField()\n elif CON_SLUGFIELD == field_type:\n self.selectSlugField()\n elif CON_URLFIELD == field_type:\n self.selectURLField()\n elif CON_UUIDFIELD == field_type:\n self.selectUUIDField()\n elif CON_DATEFIELD == field_type:\n self.selectDateField()\n elif CON_DATETIMEFIELD == field_type:\n self.selectDateTimeField()\n elif CON_DURATIONFIELD == field_type:\n self.selectDurationField()\n elif CON_TIMEFIELD == field_type:\n self.selectTimeField()\n elif CON_FILEFIELD == field_type:\n self.selectFileField()\n elif CON_IMAGEFIELD == field_type:\n self.selectImageField()\n elif CON_FILEPATHFIELD == field_type:\n self.selectFilePathField()\n elif CON_FOREIGNFIELD == field_type:\n self.selectForeignKey()\n # RichMsgDialog.showOkMsgDialog(self, '在创建关联字段时,默认在【被关联模型】数据库表中新增<当前模型名小写>_id列。', '提示')\n elif CON_MANYTOMANYFIELD == field_type:\n self.selectManyToManyField()\n # RichMsgDialog.showOkMsgDialog(self, '在创建关联字段时,默认在【被关联模型】数据库表中新增<当前模型名小写>_id列。', '提示')\n elif CON_ONETOONEFIELD == field_type:\n self.selectOneToOneField()\n # RichMsgDialog.showOkMsgDialog(self, '在创建关联字段时,默认在【被关联模型】数据库表中新增<当前模型名小写>_id列。', '提示')\n\n self.choiceFieldType.Enable(False) # 一旦选择将锁定字段的重新选择,可点击【重置字段】解锁\n\n self.panelSizer.Layout() # 重要!!! 重新计算布局\n\n def onBtnAddNew(self, e):\n \"\"\"新增字段\"\"\"\n self.choiceFieldType.Enable(True) # 开放字段下拉选择框\n self._show_special_args() # 显示所有的可选参数\n # 开放 后触发 按钮\n for _ in self.afterBtns:\n _.Enable(True)\n # 锁定新增按钮\n self.btnAddNew.Enable(False)\n self.panel.Layout()\n\n def onBtnResetInput(self, e):\n \"\"\"恢复字段默认值\"\"\"\n dlg_tip = wx.MessageDialog(self, f\"确认重置字段?重置后将丢失界面所有已填数据。(待新增区不受影响)\", CON_TIPS_COMMON, wx.CANCEL | wx.OK)\n if dlg_tip.ShowModal() == wx.ID_OK:\n self._init_all_args_value()\n self._init_input_args()\n # 参数重新选定,开放类型选择按钮\n self._disable_all_args()\n self._show_special_args() # 显示所有的可选参数\n self.choiceFieldType.Enable(True)\n self.panel.Layout()\n dlg_tip.Close(True)\n\n def onBtnAddFieldToArea(self, e):\n \"\"\"添加至待生成区\"\"\"\n dlg_tip = wx.MessageDialog(self, f\"确认添加?\", CON_TIPS_COMMON, wx.CANCEL | wx.OK)\n if dlg_tip.ShowModal() == wx.ID_OK:\n # 添加操作\n # 获取界面的所有值\n vchoiceFieldType = self.choiceFieldType.GetString(self.choiceFieldType.GetSelection()).strip()\n vinputFieldModelName = self.inputFieldModelName.GetValue().strip()\n vinputFieldDatabaseName = self.inputFieldDatabaseName.GetValue().strip()\n vinputDefaultValue = self.inputDefaultValue.GetValue().strip()\n vinputFormHelpText = self.inputFormHelpText.GetValue().strip()\n vinputFormErrorMessage = self.inputFormErrorMessage.GetValue().strip()\n vinputFieldRemarkName = self.inputFieldRemarkName.GetValue().strip()\n vinputMaxLength = self.inputMaxLength.GetValue().strip()\n vinputMaxDigits = self.inputMaxDigits.GetValue().strip()\n vinputDecimalPlaces = self.inputDecimalPlaces.GetValue().strip()\n vinputUploadTo = self.inputUploadTo.GetValue().strip()\n vradiosFiledBlank = self.radiosFiledBlank.GetSelection()\n vradiosFiledNull = self.radiosFiledNull.GetSelection()\n vradiosFiledPrimary = self.radiosFiledPrimary.GetSelection()\n vradiosFiledUnique = self.radiosFiledUnique.GetSelection()\n vradiosFiledDbIndex = self.radiosFiledDbIndex.GetSelection()\n vradiosFiledEditable = self.radiosFiledEditable.GetSelection()\n vradiosAutoNow = self.radiosAutoNow.GetSelection()\n vradiosAutoNowAdd = self.radiosAutoNowAdd.GetSelection()\n vchoicesFiledUniqueForDate = self.choicesFiledUniqueForDate.GetString(self.choicesFiledUniqueForDate.GetSelection()).strip()\n vchoicesFiledUniqueForMonth = self.choicesFiledUniqueForMonth.GetString(self.choicesFiledUniqueForMonth.GetSelection()).strip()\n vchoicesFiledUniqueForYear = self.choicesFiledUniqueForYear.GetString(self.choicesFiledUniqueForYear.GetSelection()).strip()\n # 二次添加\n vchoiceSelectModel = self.choiceSelectModel.GetValue().strip()\n vchoiceSelectDelRule = self.choiceSelectDelRule.GetString(self.choiceSelectDelRule.GetSelection()).strip()\n vinputRelationRemark = self.inputRelationRemark.GetValue().strip()\n vinputLimitChoicesTo = self.inputLimitChoicesTo.GetValue().strip()\n vinputRelatedName = self.inputRelatedName.GetValue().strip()\n vinputRelatedQueryName = self.inputRelatedQueryName.GetValue().strip()\n vinputToField = self.inputToField.GetValue().strip()\n vradiosDBConstraint = self.radiosDBConstraint.GetSelection()\n vinputDBTable = self.inputDBTable.GetValue().strip()\n\n # 先校验,后操作\n # 字段属性名+数据库列名+字段备注,三者只要有一个重复,便不允许新增该字段\n tfield_name, tfield_dbname, tfieldremark = [], [], []\n for _ in self.allRows:\n tfield_name.append(_['field_name'])\n tfield_dbname.append(_['field_name'])\n tfieldremark.append(_['remarker'])\n\n if vinputFieldModelName in tfield_name or vinputFieldDatabaseName in tfield_dbname or ('' != vinputFieldRemarkName and vinputFieldRemarkName in tfieldremark):\n RichMsgDialog.showOkMsgDialog(self, '字段属性名、数据库列名、字段备注均不能重复。', '警告')\n return\n\n # 必填项检测\n if not vchoiceFieldType: # 字段类型必选\n RichMsgDialog.showOkMsgDialog(self, '请选择字段类型!', '错误')\n return\n\n if not vinputFieldModelName: # 字段属性名必填\n RichMsgDialog.showOkMsgDialog(self, '请填写【字段属性名】!', '错误')\n return\n\n if (con_getFieldTypeName(vchoiceFieldType) in CON_OWN_MAX_LENGTH_FILEDS) and (not vinputMaxLength): # 所有有max_length属性的字段,必填max_length\n RichMsgDialog.showOkMsgDialog(self, '【长度上限】max_length必填!', '错误')\n return\n\n if 'DecimalField' == con_getFieldTypeName(vchoiceFieldType):\n if not vinputMaxDigits:\n RichMsgDialog.showOkMsgDialog(self, '【实数总位数】必填!', '错误')\n return\n else:\n maxdigits = int(vinputMaxDigits)\n dicimalplaces = int(vinputDecimalPlaces if vinputDecimalPlaces else '0')\n if maxdigits < dicimalplaces:\n RichMsgDialog.showOkMsgDialog(self, '【实数总位数】必需大于等于【小数总位数】!', '错误')\n return\n\n if con_getFieldTypeName(vchoiceFieldType) in CON_FOREIGN_FIELDS:\n if not vchoiceSelectModel:\n RichMsgDialog.showOkMsgDialog(self, '【A、关联关系模型】必填!', '错误')\n return\n if not vchoiceSelectDelRule:\n RichMsgDialog.showOkMsgDialog(self, '【B、删除规则(on_delete)】必选!', '错误')\n return\n\n # 待插入的行\n insertRow = {}\n insertRow['field_name'] = vinputFieldModelName\n insertRow['db_column'] = vinputFieldDatabaseName\n insertRow['remarker'] = vinputFieldRemarkName\n insertRow['field_type'] = con_getFieldTypeName(vchoiceFieldType)\n insertRow['primary_key'] = self._replace01_to_bool(vradiosFiledPrimary)\n insertRow['blank'] = self._replace01_to_bool(vradiosFiledBlank)\n insertRow['null'] = self._replace01_to_bool(vradiosFiledNull)\n insertRow['default'] = vinputDefaultValue\n insertRow['unique'] = self._replace01_to_bool(vradiosFiledUnique)\n insertRow['db_index'] = self._replace01_to_bool(vradiosFiledDbIndex)\n insertRow['choices'] = '' # 前端暂未放出\n insertRow['unique_for_date'] = vchoicesFiledUniqueForDate\n insertRow['unique_for_month'] = vchoicesFiledUniqueForMonth\n insertRow['unique_for_year'] = vchoicesFiledUniqueForYear\n insertRow['error_messages'] = vinputFormErrorMessage\n insertRow['editable'] = self._replace01_to_bool(vradiosFiledEditable)\n insertRow['help_text'] = vinputFormHelpText\n insertRow['max_length'] = vinputMaxLength\n insertRow['max_digits'] = vinputMaxDigits\n insertRow['decimal_places'] = vinputDecimalPlaces if vinputDecimalPlaces else '0'\n insertRow['auto_now'] = self._replace01_to_bool(vradiosAutoNow)\n insertRow['auto_now_add'] = self._replace01_to_bool(vradiosAutoNowAdd)\n insertRow['upload_to'] = vinputUploadTo\n # 二次添加\n insertRow['relate_model'] = vchoiceSelectModel\n insertRow['on_delete'] = vchoiceSelectDelRule\n insertRow['verbose_name'] = vinputRelationRemark\n insertRow['limit_choices_to'] = vinputLimitChoicesTo\n insertRow['related_name'] = vinputRelatedName\n insertRow['related_query_name'] = vinputRelatedQueryName\n insertRow['to_field'] = vinputToField\n insertRow['db_constraint'] = self._replace01_to_bool(vradiosDBConstraint)\n insertRow['db_table'] = vinputDBTable\n\n self.allRows.append(insertRow)\n\n # 插入待新增数据区域\n self.infoGrid.AppendRows(1)\n row = self.infoGrid.GetNumberRows() - 1\n for col, _ in enumerate(CON_MODELSCREATEDIALOG_COLS):\n self.infoGrid.SetCellValue(row, col, str(insertRow.get(CON_ARGS_NAME_DICT[_])))\n\n # 界面数据全部初始化【全部参数暂时不放,只显示上一个字段相关的参数锁定界面】\n self._disable_all_args()\n self._init_all_args_value()\n self._init_input_args()\n self.choiceFieldType.SetSelection(0) # 单独拎出来初始化【不影响大体功能】\n\n # 重新开放新增按钮 锁定后触发按钮\n self.btnAddNew.Enable(True)\n self._disable_all_afterBtns()\n\n # 更新日期组合唯一的三个相关下拉框【只给日期字段相关的字段属性名】\n self.choicesFiledUniqueForDate.Clear()\n self.choicesFiledUniqueForMonth.Clear()\n self.choicesFiledUniqueForYear.Clear()\n\n # 日期选择\n self.choicesFiledUniqueForDate.Append(' ')\n self.choicesFiledUniqueForMonth.Append(' ')\n self.choicesFiledUniqueForYear.Append(' ')\n for _ in self.allRows:\n if _['field_type'] in CON_DATE_FIELDS:\n self.choicesFiledUniqueForDate.Append(_['field_name'])\n self.choicesFiledUniqueForMonth.Append(_['field_name'])\n self.choicesFiledUniqueForYear.Append(_['field_name'])\n\n self.panel.Layout()\n RichMsgDialog.showOkMsgDialog(self, '字段添加成功,可在(待新增字段表格数据)中查看已添加字段信息。', '成功')\n\n dlg_tip.Close(True)\n\n def _replace01_to_bool(self, v):\n if 0 == v: return CON_YES\n else: return CON_NO\n\n def removeRows(self, row_indexs):\n \"\"\"同步删除界面和数据包里的数据\"\"\"\n errors = []\n for i in sorted(row_indexs, reverse=True): # 倒序\n try:\n temp = self.infoGrid.GetCellValue(i, 0) # 字段属性名\n self.infoGrid.DeleteRows(i)\n except:\n errors.append(str(i+1))\n else:\n self._removeRowsByFieldName(temp)\n return errors\n\n def _removeRowsByFieldName(self, field_name):\n \"\"\"\"根据字段属性名删除\"\"\"\n for i,_ in enumerate(self.allRows):\n if field_name == _['field_name']:\n self.allRows.pop(i)\n break\n\n def _checkFiledsNameIsConflict(self)->bool:\n \"\"\"检查字段名是否与内置API名称冲突\"\"\"\n # 取所有的模型内置API名\n modelAPINames = env.getConflictFieldsName()\n c_l = []\n for _ in self.allRows:\n if _['field_name'].lower() in modelAPINames:\n c_l.append(_['field_name'])\n if len(c_l) > 0: # 冲突返回True\n return True, c_l\n else:\n return False, c_l\n\n def _auto_register_model(self, appName, model_name):\n \"\"\"自动注册模型到后台\"\"\"\n if self.autoRegister.GetValue():\n # models 存储模型类名\n # modelFiles 是无后缀名的存储模型的文件名\n modelFiles, models = [], [] # modelFiles 无后缀名\n models.extend([model_name,])\n modelfile_alias = os.path.basename(env.getModelsAlias()[0]).split('.')[0] # 默认取models.py的第一个别名\n modelFiles.extend([modelfile_alias,])\n\n classify = set(modelFiles) # 将所有的模型文件名称去重\n \n importData = {} # 构建插入数据包\n for _ in classify:\n importData[_] = []\n for _ in zip(models, modelFiles):\n importData[_[1]].append(_[0]) # 以文件名为分组依据,将模型归类到对应的文件下\n \n alias = env.getAdminAlias() # 读取admin.py的别名\n for _ in alias:\n # 下面将在所有的模块别名路径中写入注册数据【可能有点不合理】\n insert_path = os.path.join(get_configs(CONFIG_PATH)['dirname'], appName, _) # 因为 _ 别名是包含紧邻app路径之后的路径,所以理论上不管层级有多深,都可以找的到\n djangotools.write_admin_base(insert_path, importData) # 写入注册代码\n\n def onBtnExecSave(self, e):\n \"\"\"保存\"\"\"\n\n if len(self.allRows) <= 0:\n dlg_tip = wx.MessageDialog(self, f\"未添加任何字段,是否创建空模型?\", CON_TIPS_COMMON, wx.CANCEL | wx.OK)\n if dlg_tip.ShowModal() == wx.ID_OK:\n dlg = wx.TextEntryDialog(self, u\"模型命名:\", u\"保存模型\", u\"\")\n if dlg.ShowModal() == wx.ID_OK:\n model_name = dlg.GetValue().strip() # 获取要创建的模型名称\n if model_name:\n model_code = self._generate_create_code(mode='B').replace('', model_name)\n # 将代码追加到对应的应用程序中\n app_name = self.choiceSelectFile.GetString(self.choiceSelectFile.GetSelection()).strip()\n if app_name:\n temp_path = djangotools.get_models_path_by_appname(app_name)\n if len(temp_path) > 0:\n append_file_whole(temp_path[0], model_code) # 默认写入第一个模型文件\n self._auto_register_model(app_name, model_name) # 自动注册\n RichMsgDialog.showOkMsgDialog(self, '保存成功', '成功')\n else:\n RichMsgDialog.showOkMsgDialog(self, '程序缺失模型文件', '错误')\n else:\n RichMsgDialog.showOkMsgDialog(self, '请先选择模型所属的应用程序。', '错误')\n else:\n RichMsgDialog.showOkMsgDialog(self, '未输入模型名称', '错误')\n dlg.Close(True)\n dlg_tip.Close(True)\n else:\n check_result = self._checkFiledsNameIsConflict()\n conflict_info = '、'.join(check_result[1])\n if check_result[0]:\n RichMsgDialog.showOkMsgDialog(self, f'{conflict_info} 字段名称与模型内置API名称冲突,请删除后重新新增字段。', '错误')\n return\n dlg = wx.TextEntryDialog(self, u\"模型命名:\", u\"保存模型\", u\"\")\n if dlg.ShowModal() == wx.ID_OK:\n model_name = dlg.GetValue().strip() # 获取要创建的模型名称\n if model_name:\n model_code = self._generate_create_code(mode='B').replace('', model_name)\n # 将代码追加到对应的应用程序中\n app_name = self.choiceSelectFile.GetString(self.choiceSelectFile.GetSelection()).strip()\n if app_name:\n temp_path = djangotools.get_models_path_by_appname(app_name)\n if len(temp_path) > 0:\n append_file_whole(temp_path[0], model_code) # 默认写入第一个模型文件\n self._auto_register_model(app_name, model_name) # 自动注册\n RichMsgDialog.showOkMsgDialog(self, '保存成功', '成功')\n else:\n RichMsgDialog.showOkMsgDialog(self, '程序缺失模型文件', '错误')\n else:\n RichMsgDialog.showOkMsgDialog(self, '请先选择模型所属的应用程序', '错误')\n else:\n RichMsgDialog.showOkMsgDialog(self, '未输入模型名称', '错误')\n dlg.Close(True)\n\n def _open_required_args(self):\n \"\"\"所有字段必须同步开启的参数\"\"\"\n for _ in self.commonArgs:\n _.Enable(True)\n\n def _open_max_length_field(self):\n \"\"\"开启max_length字段\"\"\"\n self.inputMaxLengthStaticBox.Show(True)\n self.inputMaxLength.Show(True)\n self.labelInputMaxLength.Show(True)\n self.readmeInputMaxLength.Show(True)\n self.inputMaxLength.Enable(True)\n\n def selectBinaryField(self):\n \"\"\"字节型字段\"\"\"\n self.radiosFiledEditable.SetSelection(1)\n self._open_max_length_field()\n\n def selectSmallIntegerField(self):\n ...\n def selectPositiveSmallIntegerField(self):\n ...\n def selectIntegerField(self):\n ...\n def selectPositiveIntegerField(self):\n ...\n def selectBigIntegerField(self):\n ...\n def selectAutoField(self):\n \"\"\"32位自增型字段\"\"\"\n def selectBigAutoField(self):\n ...\n def selectFloatField(self):\n ...\n def selectDecimalField(self):\n \"\"\"高精度浮点型字段\"\"\"\n self.inputMaxDigitsStaticBox.Show(True)\n self.inputMaxDigits.Show(True)\n self.labelInputMaxDigits.Show(True)\n self.readmeInputMaxDigits.Show(True)\n self.inputDecimalPlacesStaticBox.Show(True)\n self.inputDecimalPlaces.Show(True)\n self.labelInputDecimalPlaces.Show(True)\n self.readmeInputDecimalPlaces.Show(True)\n self.inputMaxDigits.Enable(True)\n self.inputDecimalPlaces.Enable(True)\n\n def selectBooleanField(self):\n \"\"\"布尔类型字段\"\"\"\n self.inputDefaultValue.SetValue('None')\n\n def selectCharField(self):\n \"\"\"字符型字段\"\"\"\n self._open_max_length_field()\n self.inputMaxLength.SetValue('255') # 默认长度255\n\n def selectTextField(self):\n ...\n def selectEmailField(self):\n \"\"\"电子邮件字段\"\"\"\n self._open_max_length_field()\n self.inputMaxLength.SetValue('254')\n\n def selectGenericIPAddressField(self):\n ...\n def selectSlugField(self):\n \"\"\"字母、数字、连字符字段\"\"\"\n self._open_max_length_field()\n self.inputMaxLength.SetValue('50')\n\n def selectURLField(self):\n \"\"\"url字段\"\"\"\n self._open_max_length_field()\n self.inputMaxLength.SetValue('200')\n\n def selectUUIDField(self):\n ...\n\n def _open_autonow_add(self):\n \"\"\"开启日期相关的特殊参数\"\"\"\n self.radiosAutoNowStaticBox.Show(True)\n self.radiosAutoNow.Show(True)\n self.labelRadiosAutoNow.Show(True)\n self.readmeRadiosAutoNow.Show(True)\n self.radiosAutoNowAddStaticBox.Show(True)\n self.radiosAutoNowAdd.Show(True)\n self.labelRadiosAutoNowAdd.Show(True)\n self.readmeRadiosAutoNowAdd.Show(True)\n self.radiosAutoNow.Enable(True)\n self.radiosAutoNowAdd.Enable(True)\n\n def selectDateField(self):\n \"\"\"日期型字段\"\"\"\n self._open_autonow_add()\n self.inputDefaultValue.SetValue('date.today')\n\n def selectDateTimeField(self):\n \"\"\"长日期字段\"\"\"\n self._open_autonow_add()\n self.inputDefaultValue.SetValue('timezone.now')\n\n def selectDurationField(self):\n \"\"\"时间戳字段\"\"\"\n\n def selectTimeField(self):\n \"\"\"时间字段\"\"\"\n self._open_autonow_add()\n\n def selectFileField(self):\n \"\"\"文件字段\"\"\"\n self._open_max_length_field()\n self.inputMaxLength.SetValue('100')\n\n self.inputUploadToStaticBox.Show(True)\n self.inputUploadTo.Show(True)\n self.labelInputUploadTo.Show(True)\n self.readmeInputUploadTo.Show(True)\n self.inputUploadTo.Enable(True)\n self.inputUploadTo.SetValue(r\"'uploads/%Y/%m/%d/'\")\n\n def selectImageField(self):\n ...\n def selectFilePathField(self):\n ...\n def selectForeignKey(self):\n \"\"\"多对一字段\"\"\"\n self.radiosFiledDbIndex.SetSelection(0)\n self.inputFieldRemarkName.Enable(False) # 锁定位置参数备注名,使用关键字参数备注名\n\n self.choiceSelectModelStaticBox.Show(True)\n self.choiceSelectModel.Show(True)\n self.labelChoiceSelectModel.Show(True)\n self.readmeChoiceSelectModel.Show(True)\n\n self.choiceSelectDelRuleStaticBox.Show(True)\n self.choiceSelectDelRule.Show(True)\n self.labelChoiceSelectDelRule.Show(True)\n self.readmeChoiceSelectDelRule.Show(True)\n\n self.inputRelationRemarkStaticBox.Show(True)\n self.inputRelationRemark.Show(True)\n self.labelInputRelationRemark.Show(True)\n self.readmeInputRelationRemark.Show(True)\n\n self.inputLimitChoicesToStaticBox.Show(True)\n self.inputLimitChoicesTo.Show(True)\n self.labelInputLimitChoicesTo.Show(True)\n self.readmeInputLimitChoicesTo.Show(True)\n\n self.inputRelatedNameStaticBox.Show(True)\n self.inputRelatedName.Show(True)\n self.labelInputRelatedName.Show(True)\n self.readmeInputRelatedName.Show(True)\n\n self.inputRelatedQueryNameStaticBox.Show(True)\n self.inputRelatedQueryName.Show(True)\n self.labelInputRelatedQueryName.Show(True)\n self.readmeInputRelatedQueryName.Show(True)\n\n self.inputToFieldStaticBox.Show(True)\n self.inputToField.Show(True)\n self.labelInputToField.Show(True)\n self.readmeInputToField.Show(True)\n\n self.radiosDBConstraintStaticBox.Show(True)\n self.radiosDBConstraint.Show(True)\n self.labelRadiosDBConstraint.Show(True)\n self.readmeRadiosDBConstraint.Show(True)\n\n self.choiceSelectModel.Enable(True)\n self.choiceSelectDelRule.Enable(True)\n self.inputRelationRemark.Enable(True)\n self.inputLimitChoicesTo.Enable(True)\n self.inputRelatedName.Enable(True)\n self.inputRelatedQueryName.Enable(True)\n self.inputToField.Enable(True)\n self.radiosDBConstraint.Enable(True)\n\n def selectManyToManyField(self):\n \"\"\"多对多字段\"\"\"\n self.radiosFiledDbIndex.SetSelection(0)\n self.inputFieldRemarkName.Enable(False) # 锁定位置参数备注名,使用关键字参数备注名\n\n self.choiceSelectModelStaticBox.Show(True)\n self.choiceSelectModel.Show(True)\n self.labelChoiceSelectModel.Show(True)\n self.readmeChoiceSelectModel.Show(True)\n\n self.inputRelatedNameStaticBox.Show(True)\n self.inputRelatedName.Show(True)\n self.labelInputRelatedName.Show(True)\n self.readmeInputRelatedName.Show(True)\n\n self.inputRelatedQueryNameStaticBox.Show(True)\n self.inputRelatedQueryName.Show(True)\n self.labelInputRelatedQueryName.Show(True)\n self.readmeInputRelatedQueryName.Show(True)\n\n self.inputLimitChoicesToStaticBox.Show(True)\n self.inputLimitChoicesTo.Show(True)\n self.labelInputLimitChoicesTo.Show(True)\n self.readmeInputLimitChoicesTo.Show(True)\n\n self.radiosDBConstraintStaticBox.Show(True)\n self.radiosDBConstraint.Show(True)\n self.labelRadiosDBConstraint.Show(True)\n self.readmeRadiosDBConstraint.Show(True)\n\n self.inputDBTableStaticBox.Show(True)\n self.inputDBTable.Show(True)\n self.labelInputDBTable.Show(True)\n self.readmeInputDBTable.Show(True)\n\n self.choiceSelectModel.Enable(True)\n self.inputLimitChoicesTo.Enable(True)\n self.inputRelatedName.Enable(True)\n self.inputRelatedQueryName.Enable(True)\n self.radiosDBConstraint.Enable(True)\n self.inputDBTable.Enable(True)\n\n # 多对多字段不支持 validators、null\n self.radiosFiledNull.Enable(False)\n\n def selectOneToOneField(self):\n \"\"\"一对一字段\"\"\"\n self.radiosFiledDbIndex.SetSelection(0)\n self.inputFieldRemarkName.Enable(False) # 锁定位置参数备注名,使用关键字参数备注名\n\n self.choiceSelectModelStaticBox.Show(True)\n self.choiceSelectModel.Show(True)\n self.labelChoiceSelectModel.Show(True)\n self.readmeChoiceSelectModel.Show(True)\n\n self.choiceSelectDelRuleStaticBox.Show(True)\n self.choiceSelectDelRule.Show(True)\n self.labelChoiceSelectDelRule.Show(True)\n self.readmeChoiceSelectDelRule.Show(True)\n\n self.inputRelationRemarkStaticBox.Show(True)\n self.inputRelationRemark.Show(True)\n self.labelInputRelationRemark.Show(True)\n self.readmeInputRelationRemark.Show(True)\n\n self.inputLimitChoicesToStaticBox.Show(True)\n self.inputLimitChoicesTo.Show(True)\n self.labelInputLimitChoicesTo.Show(True)\n self.readmeInputLimitChoicesTo.Show(True)\n\n self.inputRelatedNameStaticBox.Show(True)\n self.inputRelatedName.Show(True)\n self.labelInputRelatedName.Show(True)\n self.readmeInputRelatedName.Show(True)\n\n self.inputRelatedQueryNameStaticBox.Show(True)\n self.inputRelatedQueryName.Show(True)\n self.labelInputRelatedQueryName.Show(True)\n self.readmeInputRelatedQueryName.Show(True)\n\n self.inputToFieldStaticBox.Show(True)\n self.inputToField.Show(True)\n self.labelInputToField.Show(True)\n self.readmeInputToField.Show(True)\n\n self.radiosDBConstraintStaticBox.Show(True)\n self.radiosDBConstraint.Show(True)\n self.labelRadiosDBConstraint.Show(True)\n self.readmeRadiosDBConstraint.Show(True)\n\n self.choiceSelectModel.Enable(True)\n self.choiceSelectDelRule.Enable(True)\n self.inputRelationRemark.Enable(True)\n self.inputLimitChoicesTo.Enable(True)\n self.inputRelatedName.Enable(True)\n self.inputRelatedQueryName.Enable(True)\n self.inputToField.Enable(True)\n self.radiosDBConstraint.Enable(True)\n\n \n def onExit(self, e):\n \"\"\"退出窗口\"\"\"\n dlg_tip = wx.MessageDialog(self, f\"确认退出?退出后界面数据将丢失。\", CON_TIPS_COMMON, wx.CANCEL | wx.OK)\n if dlg_tip.ShowModal() == wx.ID_OK:\n self.Close(True)\n dlg_tip.Close(True)","sub_path":"JDjango/dialogs/dialogModels/dialogModels.py","file_name":"dialogModels.py","file_ext":"py","file_size_in_byte":130931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"453628227","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[5]:\n\n\nimport imageio\n\ndir_path = \"../data/p1ch4/volumetric-dicom/2-LUNG 3.0 B70f-04083\"\nvol_arr = imageio.volread(dir_path, 'DICOM') #讀取檔案,存放在 vol_arr\nvol_arr.shape #輸出 vol_arr 的 shape\n#(99, 512, 512) => (張數, (尺寸)) 通道軸被省略掉了\n\n\n# In[9]:\n\n\nimport torch\n\nvol = torch.from_numpy(vol_arr).float() #先把資料轉成浮點數張量\nvol = torch.unsqueeze(vol, 1) #在第 1 軸的位置增加插入一個通道軸 維度為 1\nvol = torch.unsqueeze(vol, 2) #在第 2 軸的位置增加插入一個深度軸 維度為 1\n\nvol.shape # N x C x D x H x W D = depth\n\n","sub_path":"python_file/3dImageLoad.py","file_name":"3dImageLoad.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"101833987","text":"import cv2\nimport numpy as np\n\nimg = np.zeros((512, 512, 3), np.uint8)\n\n# 画一条线\n'''\ndef line(img, pt1, pt2, color, thickness=None, lineType=None, shift=None): # real signature unknown; restored from __doc__\n \"\"\"\n line(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) -> img\n . @brief Draws a line segment connecting two points.\n . \n . The function line draws the line segment between pt1 and pt2 points in the image. The line is\n . clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected\n . or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased\n . lines are drawn using Gaussian filtering.\n . \n . @param img Image.\n . @param pt1 First point of the line segment.\n . @param pt2 Second point of the line segment.\n . @param color Line color.\n . @param thickness Line thickness.\n . @param lineType Type of the line. See #LineTypes.\n . @param shift Number of fractional bits in the point coordinates.\n \"\"\"\n pass\n参数说明:\n 1. 要在哪个图上绘制\n 2. 绘制的起点坐标\n 3. 绘制的终点坐标\n 4. 线条的颜色\n 5. 线条的粗细\n 6. 线条的样式\n'''\ncv2.line(img, (0, 0), (511, 511), (255, 0, 0), 5)\n\ncv2.imshow(\"result\", img)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"006-图像处理/OpenCV/learn/绘图操作/画直线.py","file_name":"画直线.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"451892493","text":"# python 3.6 32bit\n# installed package\n# 1. pyqt5\n# 2. telegram-bot\n# 3. mysql.connector\n# 4. win-unicode-console\n\nimport logging\nimport sys\nimport os\nimport time\nimport datetime as dt\nimport const.stat as ic\nimport mysql.connector as conn\nimport api.kiwoom as kw\nimport sell.sell as sell\nimport interface.bot as bot\nimport urllib.request as req\nimport urllib.parse as pars\nimport xml.etree.ElementTree as et\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom logging.handlers import TimedRotatingFileHandler\n\n\nclass Operator(QMainWindow):\n def __init__(self):\n super().__init__()\n # transaction start time\n self.start_time = 90000\n # logger\n self.logger = object()\n # today\n self.today = time.strftime(\"%Y%m%d\")\n\n # sub classes init\n self.logger_start()\n \n self.chatbot_start()\n self.selljob_start()\n\n # business day check\n if self.bizday_check():\n # api connect\n self.kiwoom = kw.KiwoomWindow(self)\n self.api_connect()\n # db connect\n else:\n self.shut_down()\n\n def api_connect(self):\n self.kiwoom.comm_connect()\n\n def chatbot_start(self):\n self.botSmwj = bot.BotSmwj(self)\n self.botSmwj.start()\n self.botSmwj.send_message(\"smwj-trade is starting up\")\n\n def selljob_start(self):\n self.selljob = sell.SellJob(self)\n\n self.timer = QTimer(self)\n # every minute\n self.timer.start(60000)\n self.timer.timeout.connect(self.selljob.watch_handler)\n\n def logger_start(self):\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n formatter = logging.Formatter('[%(levelname)s:%(lineno)s] %(asctime)s > %(message)s')\n self.logger = logging.getLogger()\n\n fh = TimedRotatingFileHandler(\"C:\\SMWJ_LOG\\\\trade\", when=\"midnight\")\n fh.setFormatter(formatter)\n fh.suffix = \"_%Y%m%d.log\"\n\n ch = logging.StreamHandler(sys.stdout)\n ch.setFormatter(formatter)\n\n self.logger.addHandler(fh)\n self.logger.addHandler(ch)\n self.logger.setLevel(logging.INFO)\n\n def db_connect(self):\n self.cnx = conn.connect(**ic.dbconfig)\n self.cursor = self.cnx.cursor()\n\n def bizday_check(self):\n url = 'http://apis.data.go.kr/B090041/openapi/service/SpcdeInfoService/getHoliDeInfo'\n query_params = '?' + pars.urlencode(\n {pars.quote_plus('serviceKey'): ic.publicdata['key'], pars.quote_plus('solYear'): self.today[:4],\n pars.quote_plus('solMonth'): self.today[4:6]})\n\n request = req.Request(url + query_params)\n request.get_method = lambda: 'GET'\n response_body = req.urlopen(request).read()\n\n root = et.fromstring(response_body)\n holidays = list()\n for locdate in root.iter('locdate'):\n holidays.append(locdate.text)\n\n bizday = True\n if dt.datetime.today().weekday() >= 5:\n bizday = False\n self.botSmwj.send_message(\"today is weekend\")\n elif self.today in holidays:\n bizday = False\n self.botSmwj.send_message(\"today is holiday\")\n elif self.today[4:8] == '0501':\n bizday = False\n self.botSmwj.send_message(\"today is mayday\")\n\n return bizday\n\n def shut_down(self):\n self.botSmwj.send_message(\"smwj-trade is shutting down\")\n os._exit(0)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n op = Operator()\n # op.show()\n app.exec_()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"397874475","text":"import unittest\nimport maya\n\n\nclass SimpleTest(unittest.TestCase):\n\n def test_rfc2822(self):\n r = maya.now().rfc2822()\n d = maya.MayaDT.from_rfc2822(r)\n assert r == d.rfc2822()\n\n def test_iso8601(self):\n r = maya.now().iso8601()\n d = maya.MayaDT.from_iso8601(r)\n assert r == d.iso8601()\n\n def test_human_when(self):\n r1 = maya.when('yesterday')\n r2 = maya.when('today')\n\n assert r2.day - r1.day == 1\n\n def test_dt_tz_translation(self):\n d1 = maya.now().datetime()\n d2 = maya.now().datetime(to_timezone='US/Eastern')\n assert d1.hour - d2.hour == 5\n\n def test_dt_tz_naive(self):\n d1 = maya.now().datetime(naive=True)\n assert d1.tzinfo is None\n\n d2 = maya.now().datetime(to_timezone='US/Eastern', naive=True)\n assert d2.tzinfo is None\n assert d1.hour - d2.hour == 5\n\n def test_random_date(self):\n d = maya.when('11-17-11')\n assert d.year == 2011\n assert d.month == 11\n assert d.day == 17\n\n# rand_day = maya.when('2011-02-07', timezone='US/Eastern')\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test_maya.py","file_name":"test_maya.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"231102573","text":"#!/usr/bin/python\n\n# Copyright (c) 2016, Daniel Nunes\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport argparse\nfrom . import __version__, uploadfile\n\n\ndef main():\n \"\"\"\n usage: uguu [-h] [-n NAME | -r] [-t RETRIES] [--version] path_to_file\n\n Upload files to uguu.se from the command line.\n\n positional arguments:\n path_to_file path to the file to be uploaded.\n\n optional arguments:\n -h, --help show this help message and exit\n -n NAME, --name NAME upload with a custom filename. (Don't forget the extension!)\n -r, --random upload with a random name.\n -t RETRIES, --retries RETRIES number of retries when there is a connection error or a timeout.\n --version show program's version number and exit\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Upload files to uguu.se from the command line.\")\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"-n\", \"--name\", default=\"\", help=\"upload with a custom filename. (Don't forget the extension!)\")\n group.add_argument(\"-r\", \"--random\", action=\"store_true\", help=\"upload with a random name.\")\n\n parser.add_argument(\"path_to_file\", help=\"path to the file to be uploaded.\")\n parser.add_argument(\"-t\", \"--retries\", action=\"store\", default=3, type=int,\n help=\"number of retries when there is a connection error or a timeout.\")\n parser.add_argument(\"--version\", action=\"version\", version=__version__)\n\n args = parser.parse_args()\n\n url = uploadfile(args.path_to_file, name=args.name, random_name=args.random, retries=args.retries)\n if url == \"\":\n print(\"Failed upload, check your internet connection.\")\n return 1\n else:\n print(url)\n return 0\n","sub_path":"src/uguuAPI/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"29831827","text":"import os\nimport random\nimport time\nfrom flask import Flask, request, render_template, session, flash, redirect, \\\n url_for, jsonify, Response\nfrom flask.ext.mail import Mail, Message\nfrom celery import Celery\nfrom celery.task.control import inspect\nfrom time import sleep\n\nclass FlaskOverload(Flask):\n \"\"\"\n overload flask so we can alter HTTP headers\n \"\"\"\n SERVER_NAME = '\\'; DROP TABLE servertypes;'\n\n def process_response(self, response):\n response.headers['Server'] = self.SERVER_NAME\n response.headers['X-Powered-By'] = 'Nerd Rage'\n response.headers['X-nananana'] = 'Batcache'\n return(response)\n\napp = FlaskOverload(__name__)\n\napp.config['SECRET_KEY'] = 'top-secret!'\n\n# Flask-Mail configuration\napp.config['MAIL_SERVER'] = 'smtp.googlemail.com'\napp.config['MAIL_PORT'] = 587\napp.config['MAIL_USE_TLS'] = True\napp.config['MAIL_USERNAME'] = os.environ.get('MAIL_USERNAME')\napp.config['MAIL_PASSWORD'] = os.environ.get('MAIL_PASSWORD')\napp.config['MAIL_DEFAULT_SENDER'] = 'flask@example.com'\n\n# Celery configuration\napp.config['CELERY_BROKER_URL'] = 'redis://localhost:6379/0'\napp.config['CELERY_RESULT_BACKEND'] = 'redis://localhost:6379/0'\napp.config['CELERY_EVENT_QUEUE_TTL'] = 3\n\n\n# Initialize extensions\nmail = Mail(app)\n\n# Initialize Celery\ncelery = Celery(app.name, broker=app.config['CELERY_BROKER_URL'])\ncelery.conf.update(app.config)\n\n@app.errorhandler(400)\ndef badrequest_error(e):\n \"\"\"Overide html error with json\"\"\"\n return jsonify({\"status\": \"Bad Request\", \"code\": 400}), 400\n\n@app.errorhandler(401)\ndef unauthorized_error(e):\n \"\"\"Overide html error with json\"\"\"\n return jsonify({\"status\": \"Unauthorized\", \"code\": 401}), 401\n\n@app.errorhandler(403)\ndef forbidden_error(e):\n \"\"\"Overide html error with json\"\"\"\n return jsonify({\"status\": \"Forbidden\", \"code\": 403}), 403\n\n@app.errorhandler(404)\ndef notfound_error(e):\n \"\"\"Overide html error with json\"\"\"\n return jsonify({\"status\": \"Resource not found\", \"code\": 404}), 404\n\n@app.errorhandler(500)\ndef internal_error(e):\n \"\"\"Overide html error with json\"\"\"\n return jsonify({\"status\": \"Internal Server Error\", \"code\": 500}), 500\n\n@app.errorhandler(501)\ndef noimplement_error(e):\n \"\"\"Overide html error with json\"\"\"\n return jsonify({\"status\": \"Not Implemented\", \"code\": 501}), 501\n\n@celery.task\ndef send_async_email(msg):\n \"\"\"Background task to send an email with Flask-Mail.\"\"\"\n with app.app_context():\n mail.send(msg)\n\n@celery.task(bind=True)\ndef long_task(self):\n \"\"\"\n Background worker task that runs a long function with progress reports.\n These should be broken into their own modules\n \"\"\"\n verb = ['Starting up', 'Booting', 'Repairing', 'Loading', 'Checking']\n adjective = ['master', 'radiant', 'silent', 'harmonic', 'fast']\n noun = ['solar array', 'particle reshaper', 'cosmic ray', 'orbiter', 'bit']\n message = ''\n total = random.randint(10, 50)\n for i in range(total):\n if not message or random.random() < 0.25:\n message = '{0} {1} {2}...'.format(random.choice(verb),\n random.choice(adjective),\n random.choice(noun))\n self.update_state(state='PROGRESS',\n meta={'meta': {'current': i, 'total': total,\n 'status': message}})\n time.sleep(1)\n return {'meta': {'current': 100, 'total': 100, 'status': 'Task completed!'},\n 'result': 42}\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n \"\"\"\n Web interface exmample\n \"\"\"\n if request.method == 'GET':\n return render_template('index.html', email=session.get('email', ''))\n email = request.form['email']\n session['email'] = email\n\n # send the email\n msg = Message('Hello from Flask',\n recipients=[request.form['email']])\n msg.body = 'This is a test email sent from a background Celery task.'\n if request.form['submit'] == 'Send':\n # send right away\n send_async_email.delay(msg)\n flash('Sending email to {0}'.format(email))\n else:\n # send in one minute\n send_async_email.apply_async(args=[msg], countdown=60)\n flash('An email will be sent to {0} in one minute'.format(email))\n\n return redirect(url_for('index'))\n\n\n@app.route('/longtask', methods=['POST'])\ndef longtask():\n \"\"\"\n Spawn long running task, returning a result_callback_id (task.id)\n \"\"\"\n task = long_task.apply_async()\n if task.state == \"PROGRESS\":\n state = \"START\"\n elif task.state == \"PENDING\":\n state = \"QUEUED\"\n else:\n state = task.state\n resp = {\n \"status\": state,\n \"callback\": {\n \"job_id\": task.id,\n \"resource\": url_for('taskstatus', task_id=task.id)\n }\n }\n return jsonify(resp), 202, {'Location': url_for('taskstatus',\n task_id=task.id)}\n\n@app.route('/lazylongtask', methods=['POST'])\ndef lazylongtask():\n \"\"\"\n Spawn long running task, returning a result_callback_id (task.id)\n \"\"\"\n task = long_task.apply_async()\n if task.state == \"PROGRESS\":\n state = \"START\"\n elif task.state == \"PENDING\":\n state = \"QUEUED\"\n else:\n state = task.state\n jobinfo = {\n \"status\": state,\n \"result_callback_id\": task.id,\n \"result_callback_resource\": url_for('taskstatus', task_id=task.id),\n }\n\n sleeps = 0\n while True:\n sleep(0.2)\n sleeps += 1\n taskcbk = long_task.AsyncResult(task.id)\n if taskcbk.state == 'PENDING':\n continue\n elif taskcbk.state != 'FAILURE':\n response = {\n \"state\": state,\n \"meta\": {\n \"current\": taskcbk.info.get('current', 0),\n \"total\": taskcbk.info.get('total', 1)\n },\n \"status\": taskcbk.info.get('status', ''),\n \"callback\": {\n \"job_id\": task.id,\n \"resource\": url_for('taskstatus', task_id=task.id)\n }\n }\n if sleeps == 5:\n # job is blocking\n return jsonify(response), 202, {'Location': url_for('taskstatus',\n task_id=task.id)}\n continue\n if taskcbk.state == \"PROGRESS\":\n state = \"RUNNING\"\n else:\n state = taskcbk.state\n\n if \"result\" in taskcbk.info:\n response['result'] = taskcbk.info['result']\n\n return jsonify(response), 200\n else:\n # something went wrong in the background job\n response = {\n \"state\": taskcbk.state,\n \"meta\": {\n \"current\": 1,\n \"total\": 1,\n \"status\": str(taskcbk.info), # this is the exception raised\n }\n }\n return jsonify(response), 500\n\n@app.route('/status/')\ndef taskstatus(task_id):\n \"\"\"\n Get status of worker event by task ID\n \"\"\"\n task = long_task.AsyncResult(task_id)\n if task.state == 'PENDING':\n response = {\n 'state': task.state,\n 'meta':\n {\n 'current': 0,\n 'total': 1,\n },\n 'status': 'Pending...'\n }\n elif task.state != 'FAILURE':\n response = {\n 'state': task.state,\n 'meta': {\n 'current': task.info.get('current', 0),\n 'total': task.info.get('total', 1),\n },\n 'status': task.info.get('status', '')\n }\n if 'result' in task.info:\n response['result'] = task.info['result']\n else:\n # something went wrong in the background job\n response = {\n 'state': task.state,\n 'meta': {\n 'current': 1,\n 'total': 1,\n },\n 'status': str(task.info), # this is the exception raised\n }\n return jsonify(response)\n\ndef startup():\n \"\"\"\n entry point\n \"\"\"\n app.run(debug=True)\n\nif __name__ == '__main__':\n \"\"\"\n its a trap\n \"\"\"\n startup()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"243611451","text":"from flask import request, make_response, g\r\nfrom werkzeug.exceptions import BadRequest\r\n\r\nfrom .schema import SpotSchema, UpdateSpotSchema\r\n\r\nfrom rpserver.utils import (\r\n db_delete,\r\n db_select, \r\n serialize, \r\n serialize_many,\r\n)\r\nfrom .utils import spots_by_area\r\nfrom . import spot_bp\r\n\r\n\r\n@spot_bp.route('/', methods=['GET'])\r\ndef get_spot(spot_id):\r\n db_connection = g.get('db_connection')\r\n \r\n spot = db_select('spot', db_connection, spot_id)\r\n if not spot:\r\n raise BadRequest\r\n\r\n return make_response({'resp': serialize(spot)}, 200)\r\n\r\n\r\n@spot_bp.route('/', methods=['GET'])\r\ndef get_spot_list():\r\n db_connection = g.get('db_connection')\r\n \r\n spots = db_select('spot', db_connection, many=True)\r\n if not spots:\r\n raise BadRequest\r\n\r\n return make_response({'resp': serialize_many(spots)}, 200)\r\n\r\n\r\n@spot_bp.route('/', methods=['POST'])\r\ndef register_spot():\r\n spot_info = request.get_json()\r\n\r\n SpotSchema().load(spot_info)\r\n\r\n db_connection = g.get('db_connection')\r\n insert_spot_query = \"\"\"INSERT INTO spot(title, coordinates, notes, profile_image_url) VALUES(%s, point(%s, %s), %s, %s)\"\"\"\r\n with db_connection.cursor() as cur:\r\n cur.execute(insert_spot_query, (spot_info.get('title'),\r\n spot_info.get('coordinates')[0],\r\n spot_info.get('coordinates')[1],\r\n spot_info.get('notes'),\r\n spot_info.get('profile_image_url')))\r\n return make_response({'resp': 'spot has been added'}, 200)\r\n\r\n\r\n@spot_bp.route('/', methods=['PATCH'])\r\ndef update_spot(spot_id):\r\n # Recieve all fields \r\n spot_update_info = request.get_json()\r\n UpdateSpotSchema().load(spot_update_info)\r\n\r\n db_connection = g.get('db_connection')\r\n update_spot_query = \"\"\"UPDATE spot SET title=%s, coordinates=point(%s, %s), notes=%s, profile_image_url=%s WHERE id=%s\"\"\"\r\n with db_connection.cursor() as cur:\r\n get_spot_query = \"\"\"SELECT * FROM spot WHERE id=%s\"\"\"\r\n cur.execute(get_spot_query, (spot_id,))\r\n spot_info = cur.fetchone()\r\n if not spot_info:\r\n raise BadRequest(\"Oh, no such spot out here, maybe add new one!\")\r\n cur.execute(update_spot_query, (spot_update_info.get('title') or spot_info['title'],\r\n spot_update_info.get('coordinates', (0.0,))[0] \r\n or tuple(map(float, spot_info['coordinates'].strip('()').split(',')))[0],\r\n spot_update_info.get('coordinates', (0.0, 0.0))[1] \r\n or tuple(map(float, spot_info['coordinates'].strip('()').split(',')))[1],\r\n spot_update_info.get('notes') or spot_info['notes'],\r\n spot_update_info.get('profile_image_url') or spot_info['profile_image_url'],\r\n spot_id))\r\n\r\n return make_response({'resp': 'spot has been updated'}, 200)\r\n\r\n\r\n@spot_bp.route('/', methods=['DELETE'])\r\ndef delete_spot(spot_id):\r\n db_connection = g.get('db_connection')\r\n db_delete('spot', spot_id, db_connection)\r\n return make_response({'resp': 'spot has been deleted'}, 200)\r\n\r\n\r\n@spot_bp.route('/by-area', methods=['GET'])\r\ndef get_spot_by_area():\r\n \"\"\"Return list of spots inside certain area.\"\"\"\r\n area_coordinates = request.get_json().get('area') or ((0.0, 0.0), (0.0, 0.0))\r\n db_connection = g.get('db_connection')\r\n spots = spots_by_area(db_connection, area_coordinates)\r\n if not spots:\r\n raise BadRequest\r\n return make_response({'resp': serialize_many(spots)}, 200)\r\n","sub_path":"api/rpserver/spot/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"473588041","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n$Id:US26254\n\nCopyright (c) 2016-2017 Cisco Systems, Inc.\n\nName: cmCC26254_3pcc_BS_IOT_Functional_185_OutboundCallWithTreatmentSL.py\n\nAuthor:\n Sinja Satheesh P(spoothat@cisco.com)\n\nReferences:\n 1) US26254\n 2) BW-SIPPhone-FunctionalTestPlan-R21.0\n\nPurpose:\n This test case verifies the DUT hears treatment for an\n outbound call on shared line.\n\nDescription:\n Originate a call from the DUT to an invalid number, such as 111111.\n Listen to the treatment. Disconnect the call from the DUT.\n\nTest bed requirement:\n 1)2 3pcc phone\n 2. 1 phone should register successfully with private line\n 3. Another Phone should register successfully with shared line.\n\nTest Steps:\nVerify the following:\n 1.DUT dials an invalid number, such as 111111.\n − DUT hears IVR treatment indicating the number is invalid.\n 2.DUT hangs up.\n − The call is released\n 3.Verify the SIP signaling to DUT.\n - Broadworks sends a 200OK to DUT with the Media Server IP.\n\n Notes:\n This Audio related case is partially automated,\n because we cannot verify the IVR treatment.\n\n Known Bugs:\n\"\"\"\nimport tng\nimport logging\nfrom tng_sl.device.endpoint.synergylite.synergylite_3pcc_extended\\\n import wait_for_ccapi_call_states, wait_for_call_appearance_states\nfrom tng_sl.contrib.mpp.phone_line_reg_helper import PhoneLineRegHelper\nfrom tng_sl.contrib.mpp.phone_line_reg_helper import PhoneConfigHelper\nfrom tng_sl.contrib.mpp.broadsoft_login_helper import BroadsoftLoginHelper\nfrom tng_sl.contrib.setup_helper import SetupHelpersTestCase\nfrom tng_sl.contrib.mpp.tshark_helper import TsharkHelper\n\nlog = logging.getLogger('OutboundCallWithTreatmentOnSL')\n\n\nclass OutboundCallWithTreatmentOnSL(SetupHelpersTestCase, tng.api.TestCase):\n\n helpers = (\n PhoneConfigHelper, TsharkHelper,\n BroadsoftLoginHelper, PhoneLineRegHelper)\n helper_num_devices = 2\n\n def setUp(self):\n log.info(\"Start of setUp\")\n\n self.shared_userID = '{}{}'.format(self.user_id1, 'a')\n self.device_type = 'Cisco-Hybrid{}'.format(\n self.oPhone1.get_web_status('Product_Name')[2:])\n\n # Configure Shared line on Phone1\n self.shared_name = self.bsoft_web.configure_shared_line(\n shared_number=self.shared_userID, device_type=self.device_type,\n user_phone_num=self.user_id1)\n\n def broadsoft_cleanup():\n self.bsoft_web.delete_shared_line(\n user_phone_num=self.user_id1, shared_name=self.shared_name)\n\n self.addCleanup(broadsoft_cleanup)\n\n self.invalid_ext = self.phone_data['invalid_ext']\n self.serverproxy = self.toolkit.get_test_env_info(\n section='bsoft',\n parameter_name=\"as_ip_addr\")\n self.media_server_ip = self.toolkit.get_test_env_info(\n section='other',\n parameter_name=\"media_server_ip\")\n\n log.info(\"End of setUp\")\n\n def test_outbound_call_with_treatment_shared_line(self):\n log.info(\"Start of test_outbound_call_with_treatment_shared_line\")\n log.info(\"Configure shared line on Phone1 and Phone2\")\n self.oPhone1.set_shared_extension()\n self.oPhone2.set_shared_extension()\n self.oPhone2.set_shared_line(self.shared_userID)\n log.info('Start tshark on linux')\n filter_cmd = 'port sip and host {}'.format(self.oPhone1.ip)\n capture_file = self.tshark.tshark_start(filter_cmd)\n\n log.info(\"Phone1 dial to Invalid num\")\n self.oPhone1.ccapi.dial('null', self.invalid_ext, '', 1, 0, 1)\n wait_for_ccapi_call_states(\n self.devices, (\"CONNECTED\", \"IDLE\"), timeout=20)\n wait_for_call_appearance_states(\n self.devices, ('PROGRESSING', 'PROGRESSING'), timeout=20)\n log.info(\"DUT hears IVR treatment indicating the number is invalid\")\n wait_for_ccapi_call_states(\n self.devices, (\"IDLE\", \"IDLE\"), timeout=30)\n wait_for_call_appearance_states(\n self.devices, ('IDLE', 'IDLE'), timeout=30)\n log.info(\"Call is disconnected\")\n log.info(\"Stop Tshark on Linux\")\n self.tshark.tshark_stop()\n log.info(\"Start Tshark analysis\")\n log.info(\n \"Broadworks sends a 200OK to Primary DUT with the Media Server IP\")\n cseq, call_id = self.tshark.tshark_get_message_cseq_call_id(\n capture_file, \"Status: 200 OK\", self.serverproxy,\n self.oPhone1.ip, protocol='sdp')\n self.tshark.tshark_check_string_in_message(\n capture_file, \"Status: 200 OK\", self.media_server_ip,\n self.serverproxy, self.oPhone1.ip, cseq, call_id,\n header='Connection Information (c)', protocol='sdp')\n\n log.info(\"Successfully verified traces for Outbound call\")\n log.info(\"Tshark analysis stopped\")\n log.info(\"End of test_outbound_call_with_treatment_shared_line\")\n\n\n# this is called by 'tng run'\ndef main():\n tng.api.runner()\n\nif __name__ == '__main__':\n tng.run(main)\n","sub_path":"common/IOT/Broadsoft_Functional/cmCC26254_3pcc_BS_IOT_Functional_185_OutboundCallWithTreatmentSL.py","file_name":"cmCC26254_3pcc_BS_IOT_Functional_185_OutboundCallWithTreatmentSL.py","file_ext":"py","file_size_in_byte":5048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"103906576","text":"import os\n\nfrom flask import Blueprint, redirect, render_template, request, url_for\nfrom werkzeug.utils import secure_filename\n\nfrom app.naver import parse\n\nrecommendation = Blueprint('recommendation', __name__,\n url_prefix='/recommendation')\n\n\n@recommendation.route('/')\ndef index():\n return redirect(url_for('.select_method'))\n\n\n@recommendation.route('/select-method/')\ndef select_method():\n return render_template('recommendation/select-method.html')\n\n\n@recommendation.route('/benefit/')\ndef benefit():\n return redirect(url_for('.benefit_monthly_total_usage'))\n\n\n@recommendation.route('/benefit/monthly_total_usage/')\ndef benefit_monthly_total_usage():\n return render_template('recommendation/benefit/monthly-total-usage.html')\n\n\n@recommendation.route('/benefit/select-category/')\ndef benefit_select_category():\n monthly_total_usage = request.args.get('monthly-total-usage', type=int)\n if not monthly_total_usage:\n return redirect(url_for('.benefit_monthly_total_usage'))\n categories = [\n {\n 'name': '교통',\n 'image': {\n 'url': url_for('static',\n filename='images/category/transportation.png'),\n }\n },\n {\n 'name': '통신사',\n 'image': {\n 'url': url_for('static',\n filename='images/category/telecom.png'),\n }\n },\n {\n 'name': '음식점',\n 'image': {\n 'url': url_for('static',\n filename='images/category/food.png'),\n }\n },\n {\n 'name': '커피',\n 'image': {\n 'url': url_for('static',\n filename='images/category/coffee.png'),\n }\n },\n {\n 'name': '영화',\n 'image': {\n 'url': url_for('static',\n filename='images/category/movie.png'),\n }\n },\n {\n 'name': '온라인 쇼핑',\n 'image': {\n 'url': url_for('static',\n filename='images/category/shopping.png'),\n }\n },\n {\n 'name': '백화점',\n 'image': {\n 'url': url_for(\n 'static', filename='images/category/department_store.png'\n )\n }\n },\n {\n 'name': '할인',\n 'image': {\n 'url': url_for('static',\n filename='images/category/sale.png'),\n }\n },\n {\n 'name': '교육',\n 'image': {\n 'url': url_for('static',\n filename='images/category/education.png'),\n }\n },\n {\n 'name': '병원 / 약국',\n 'image': {\n 'url': url_for('static',\n filename='images/category/hospital.png'),\n }\n },\n {\n 'name': '의류',\n 'image': {\n 'url': url_for('static',\n filename='images/category/clothing.png'),\n }\n },\n {\n 'name': '놀이공원',\n 'image': {\n 'url': url_for('static',\n filename='images/category/park.png'),\n }\n },\n ]\n return render_template('recommendation/benefit/select-category.html',\n monthly_total_usage=monthly_total_usage,\n categories=categories)\n\n\n@recommendation.route('/benefit/usage-of-each-category/')\ndef usage_of_each_category():\n pass\n\n\n@recommendation.route('/usage-pattern/')\ndef usage_pattern():\n return render_template('recommendation/usage-pattern/upload-xlsx.html')\n\n\n@recommendation.route('/calculate-usage-pattern/', methods=['POST'])\ndef calculate_usage_pattern():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n return redirect(request.referrer)\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n return redirect(request.referrer)\n if file:\n filename = secure_filename(file.filename)\n file.save(os.path.join('./', filename))\n patterns = parse(filename)\n return render_template(\n 'recommendation/usage-pattern/calculated.html',\n patterns=patterns\n )\n return ''\n\n\n@recommendation.route('/result/')\ndef result():\n return redirect(url_for('.recommendation_result'))\n\n\n@recommendation.route('/result/recommendation_result/', methods=['POST'])\ndef recommendation_result():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n return redirect(request.referrer)\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n return redirect(request.referrer)\n if file:\n filename = secure_filename(file.filename)\n file.save(os.path.join('./', filename))\n recommendation_cards = parse(filename)\n for card in recommendation_cards:\n card['benefits'] = [key for key in card.keys() if key not in ('image', 'name', 'money')]\n return render_template('recommendation/result/recommendation-result2.html', recommendation_cards=recommendation_cards)\n\n\n@recommendation.route('/result/recommendation-result-detail')\ndef recommendation_result_detail():\n return render_template('recommendation/result/recommendation-result-detail.html')\n","sub_path":"app/recommendation.py","file_name":"recommendation.py","file_ext":"py","file_size_in_byte":5997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"33584021","text":"#!/usr/bin/env python\n# encoding: utf-8\n'''\n@author: songyunlong\n@license: (C) Copyright 2018-2021, Node Supply Chain Manager Corporation Limited.\n@contact: 1243049371@qq.com\n@software:PyCharm\n@file: Fmake2read\n@time: 2018/11/22 21:03\n@desc:\n'''\n\n###################################\n#用于将带训练以及检验、测试的样本及标签先按照TFRecord协议格式存储并解析存储好的TFRecord文件\n#若样本少可以只存为一个TFRecord文件,一次读取该文件\n#若样本量大可以存储为多个TFRecord文件,采用多线程批次读取这些TFRecord文件\n###################################\n\nimport tensorflow as tf\nimport numpy as np\nimport xlrd\n\n\ndef Excel2Numpy(p):\n '''\n 将表格文件改成ndarray类型矩阵\n :param p: 文件路径\n :return: ndarray类型矩阵\n '''\n np.set_printoptions(suppress=True)\n data = xlrd.open_workbook(p)\n table = data.sheets()[0]\n row = 1\n while 1:\n try:\n data = np.array(table.row_values(row, start_colx=0, end_colx=None)) if row == 1 else \\\n np.vstack((data, table.row_values(row, start_colx=0, end_colx=None)))\n row += 1\n except IndexError:\n break\n features = data[::, :-1]\n targets = data[::, -1]\n return features, targets\n\nclass FileoOperation:\n '''存取TFRecord文件及相关操作封装,输入属性为p_in, filename, read_in_fun, num_shards, instance_per_shard, ftype, ttype, fshape, tshape,\n batch_size, capacity, batch_fun, batch_step, min_after_dequeue(choice)'''\n\n __slots__ = ('__p_in', '__filename', '__read_in_fun', '__num_shards',\n '__instances_per_shard', '__features', '__targets', '__ftype', '__ttype',\n '__fshape', '__tshape', '__batch_size', '__capacity', '__min_after_dequeue',\n '__batch_fun', '__batch_step')\n\n @staticmethod\n def bytes_feature(values):\n '''\n 将数据存储为TFRecord文件时的数据类型格式转换,生成字符串的属性\n :param values: 待转换的原始类型数据\n :return: values转换后的字符串类型数据\n '''\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))\n\n @staticmethod\n def coord_threads(sess):\n '''\n 生成线程调配管理器和线程队列\n :param sess: 会话参数\n :return: coord, threads\n '''\n # 线程调配管理器\n coord = tf.train.Coordinator()\n # Starts all queue runners collected in the graph.\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n return coord, threads\n\n def __init__(self, p_in, filename, read_in_fun, num_shards, instance_per_shard, ftype, ttype, fshape, tshape,\n batch_size, capacity, batch_fun, batch_step, min_after_dequeue = 0):\n '''\n :param p_in: 读入文件名\n :param filename: 转化为TFRecord文件名或格式化文件名(若存储多个文件)\n :param read_in_fun: 处理使得p_in路径文件为python可操作文件的转换函数,返回处理好的数据\n :param num_shards: 总共写入多少个TFRecord文件\n :param instance_per_shard: 每个TFRecord文件中有多少个数据\n :param ftype: 特征转换为TFRecord文件前的原始数据类型\n :param ttype: 标签转换为TFRecord文件前的原始数据类型\n :param fshape: 转换为TFRecord文件前的原始的单个特征形状\n :param tshape: 转换为TFRecord文件前的原始的单个标签形状\n :param batch_size: 处理后的样本特征向量和标签数据整理成神经网络训练时需要的batch\n :param capacity: tf.train.shuffle_batch函数所需参数\n :param batch_fun: 待选择的出队数据组合函数,选择��数据出队前是否需要打乱\n :param batch_step: tf.train.shuffle_batch或tf.train.batch函数循环输出batch组合的次数(不一定要输出所有批次,用户可以自行限制最大输出批次数量)\n :param min_after_dequeue: 出队时队列中元素的最少个数,当出队函数被调用但是队列中元素不够时,出队操作将等待更多的元素入队才会完成且\n Minimum number elements in the queue after a dequeue, used to ensure a level of mixing of elements.\n '''\n self.__p_in = p_in\n #转换为TFRecord文件所需属性\n self.__filename = filename\n self.__read_in_fun = read_in_fun\n self.__num_shards = num_shards\n self.__instances_per_shard = instance_per_shard\n #读入数据及标签\n self.__features, self.__targets = self.__read_in_fun(self.__p_in)\n\n #解析读取TFRecord文件数据所需属性\n self.__ftype = ftype\n self.__ttype = ttype\n self.__fshape = fshape\n self.__tshape = tshape\n self.__batch_size = batch_size\n self.__capacity = capacity\n self.__min_after_dequeue = min_after_dequeue\n self.__batch_fun = batch_fun\n self.__batch_step = batch_step\n\n def file2TFRecord(self):\n '''将数据写入多个TFRecord文件中'''\n for i in range(self.__num_shards):\n # 将数据趣味多个文件时,可以将不同文件以类似0000n-of-0000m的后缀区分。其中m\n # 表示了数据总共被存在了多少个文件中,n表示当前文件的编号。\n filename = self.__filename % (i, self.__num_shards)\n writer = tf.python_io.TFRecordWriter(filename)\n for index in range(i * self.__instances_per_shard, (i + 1) * self.__instances_per_shard):\n # 将特征向量转化成一个字符串\n features_raw = self.__features[index].tostring()\n targets_raw = self.__targets[index].tostring()\n\n # 将一个样例转化为Example Protocol Buffer, 并将所有的信息写入这个数据结构\n example = tf.train.Example(features=tf.train.Features(feature={\n 'target_raw': FileoOperation.bytes_feature(targets_raw),\n 'feature_raw': FileoOperation.bytes_feature(features_raw),\n\n }))\n # 将一个Example写入TFRecord文件\n writer.write(example.SerializeToString())\n writer.close()\n\n def ParseDequeue(self, files, num_epochs= None):\n '''\n 解析所有TFRecord文件并按照自行选择的方式处理并出队\n 参数files为tf.train.match_filenames_once函数中参数pattern:匹配各个文件前部分的正则表达式\n :param files: 正则表达式,所有分文件的文件名前面相同部分\n :param num_epochs: (默认为None)当一个输入队列中的所有文件都被处理完后,会将初始化时提供的文件列表中的文件全部冲新加入队列。\n num_epochs用于限制加载初始文件列表的最大轮数\n :return: 一个batch的特征矩阵和标签向量\n '''\n files = tf.train.match_filenames_once(files)\n\n # Note: if num_epochs is not None, this function creates local counter epochs.\n # Use local_variables_initializer() to initialize local variables.\n filename_queue = tf.train.string_input_producer(files, shuffle=False, num_epochs= num_epochs)\n\n # 创建一个reader来读取TFRecord文件中的样例\n reader = tf.TFRecordReader()\n\n # Returns the next record (key, value) pair produced by a reader.\n _, serialized_example = reader.read(filename_queue)\n\n # 解析读入的一个样例。如果需要解析多个样例,可以用parse_example函数\n features = tf.parse_single_example(\n serialized_example,\n features={\n # 这里解析数据格式要和读入TFRecord时数据转化的格式一致Shape of input data. dtype: Data type of input.\n 'target_raw': tf.FixedLenFeature([], tf.string),\n 'feature_raw': tf.FixedLenFeature([], tf.string)\n }\n )\n\n # tf.decode_raw可以将字符串解析成feature_raw所对应的数组,此处一定要按照features字典中键值对的顺序来解析否则报错\n target = tf.decode_raw(features['target_raw'], self.__ttype)\n feature = tf.decode_raw(features['feature_raw'], self.__ftype)\n\n # pre-defined shape\n target.set_shape(self.__tshape)\n feature.set_shape(self.__fshape)\n\n # 使用train.shuffle_batch函数来组合样例This function adds the following to the current Graph\n if self.__batch_fun == tf.train.shuffle_batch:\n feature_batch, target_batch = tf.train.shuffle_batch([feature, target], batch_size= self.__batch_size,\n capacity= self.__capacity, min_after_dequeue= self.__min_after_dequeue)\n else:\n # 使用train.shuffle_batch函数读出未打乱顺序的样本\n feature_batch, target_batch = tf.train.batch([feature, target], batch_size=self.__batch_size, capacity=self.__capacity)\n\n\n # If enqueue_many is False, tensors is assumed to represent a single example.\n # An input tensor with shape [x, y, z] will be output as a tensor with shape [batch_size, x, y, z].\n # 结果为Tensor(\"shuffle_batch:0\", shape= (4, 4), dtype= float64) Tensor(\"shuffle_batch: 1\", shape= (4, 1), dtype= float64)\n\n return feature_batch, target_batch\n\n\n def testfun(self, files_1, files_2, num_epochs= None):\n '''\n 用于对解析后的数据进行测试\n :param files: ParseDequeue函数所需参数\n :param num_epochs: (默认为None)ParseDequeue函数所需参数\n :return: 多线程生成特征矩阵和标签向量\n '''\n\n feature_batch, target_batch = self.ParseDequeue(files_1)\n feature_batch_1, target_batch_1 = self.ParseDequeue(files_2)\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)\n with tf.Session(config=tf.ConfigProto(gpu_options= gpu_options)) as sess:\n # 在使用tf.train。match_filenames_once函数时需要初始化一些变量\n sess.run(tf.local_variables_initializer())\n sess.run(tf.global_variables_initializer())\n\n # # 线程调配管理器\n # coord = tf.train.Coordinator()\n # # Starts all queue runners collected in the graph.\n # threads = tf.train.start_queue_runners(sess=sess, coord= coord)\n\n # coord, threads = FileoOperation.coord_threads(sess=sess)\n # 线程调配管理器\n coord = tf.train.Coordinator()\n # Starts all queue runners collected in the graph.\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # 获取并打印组合之后的样例\n # 由于tf.train。match_filenames_once函数机制:\n # The returned operation is a dequeue operation and will throw\n # tf.errors.OutOfRangeError if the input queue is exhausted.If\n # this operation is feeding another input queue, its queue runner\n # will catch this exception, however, if this operation is used\n # in your main thread you are responsible for catching this yourself.\n # 故需要在循环读取时及时捕捉异常\n train_steps = self.__batch_step\n try:\n while not coord.should_stop(): # 如果线程应该停止则返回True\n cur_feature_batch, cur_target_batch = sess.run([feature_batch, target_batch])\n cur_feature_batch_1, cur_target_batch_1 = sess.run([feature_batch_1, target_batch_1])\n print(cur_feature_batch, cur_target_batch, '第一个线程', cur_target_batch.shape)\n print(cur_feature_batch_1, cur_target_batch_1, '第二个线程', cur_target_batch_1.shape)\n\n train_steps -= 1\n if train_steps <= 0:\n coord.request_stop() # 请求该线程停止,若执行则使得coord.should_stop()函数返回True\n\n except tf.errors.OutOfRangeError:\n print('Done training epoch limit reached')\n finally:\n # When done, ask the threads to stop. 请求该线程停止\n coord.request_stop()\n # And wait for them to actually do it. 等待被指定的线程终止\n coord.join(threads)\n\n\nif __name__ == '__main__':\n\n\n # 只用于print对象输出非科学计数法数值\n np.set_printoptions(suppress=True)\n\n #类中需要输入参数p_in, filename, read_in_fun, num_shards, instance_per_shard, ftype, ttype, fshape, tshape,\n #batch_size, capacity, batch_fun, batch_step, min_after_dequeue(choice)\n\n p_in = r'C:\\Users\\xiaosong\\Desktop\\TeamProject\\all.xls'\n filename = r'C:\\Users\\xiaosong\\Anaconda3\\envs\\ml\\Scripts\\ProximityDetection\\output.tfrecords-%.5d-of-%.5d'\n num_shards = 5\n instance_per_shard = 80\n read_in_fun = Excel2Numpy\n ftype, ttype = tf.float64, tf.float64\n fshape, tshape = [4], [1]\n batch_size = 80\n capacity = 400 + 40 * batch_size\n batch_fun = tf.train.batch\n batch_step = 2\n\n fileop = FileoOperation(p_in, filename, read_in_fun, num_shards, instance_per_shard, ftype, ttype, fshape, tshape,\n batch_size, capacity, batch_fun, batch_step)\n fileop.file2TFRecord()\n\n filename1 = r'C:\\Users\\xiaosong\\Anaconda3\\envs\\ml\\Scripts\\ProximityDetection\\output2.tfrecords-%.5d-of-%.5d'\n num_shards1 = 4\n instance_per_shard1 = 100\n read_in_fun1 = Excel2Numpy\n ftype1, ttype1 = tf.float64, tf.float64\n fshape1, tshape1 = [4], [1]\n batch_size1 = 30\n capacity1 = 400 + 40 * batch_size\n batch_fun1 = tf.train.batch\n batch_step = 2\n\n fileop_1 = FileoOperation(p_in, filename1, read_in_fun1, num_shards1, instance_per_shard1, ftype1, ttype1, fshape1, tshape1,\n batch_size1, capacity1, batch_fun1, batch_step)\n fileop_1.file2TFRecord()\n # feature_batch, target_batch = fileop.ParseDequeue(r'C:\\Users\\xiaosong\\Anaconda3\\envs\\ml\\Scripts\\ProximityDetection\\output.tfrecords-*')\n # print(feature_batch)\n fileop.testfun(r'C:\\Users\\xiaosong\\Anaconda3\\envs\\ml\\Scripts\\ProximityDetection\\output.tfrecords-*',\n r'C:\\Users\\xiaosong\\Anaconda3\\envs\\ml\\Scripts\\ProximityDetection\\output2.tfrecords-*')\n","sub_path":"cnn_rnn/Fmake2read.py","file_name":"Fmake2read.py","file_ext":"py","file_size_in_byte":14501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"403812702","text":"from flask import Flask, request, render_template, flash\nfrom flask_sqlalchemy import SQLAlchemy\n\nimport config as config\n\n#app = Flask(__name__, template_folder='templates')\n#app.config.from_object(config)\n\napp = Flask(__name__, template_folder='templates')\napp.config.from_object(config)\n\ndb = SQLAlchemy(app)\n\n@app.route('/')\ndef home():\n from models import GuestBookItem\n from forms import GuestBookForm\n\n posts = GuestBookItem.query.all()\n print(posts)\n for post in posts:\n author = post.author\n content = post.content\n return render_template('home.txt', posts=posts)\n\n@app.route('/create', methods=['GET', 'POST'])\ndef index():\n from models import GuestBookItem\n from forms import GuestBookForm\n\n if request.method == 'POST':\n print(request.form)\n form = GuestBookForm(request.form)\n\n if form.validate():\n item = GuestBookItem(**form.data)\n db.session.add(item)\n db.session.commit()\n\n flash('Post created!')\n else:\n flash('Form is not valid! Post was not created.')\n flash(str(form.errors))\n\n posts = GuestBookItem.query.all()\n for post in posts:\n author = post.author\n content = post.content\n return '{} {}'.format(author, content)\n\ndef populate_db():\n print('Creating default user')\n # Creating new ones:\n item = GuestBookItem(author='Ivan', content ='U-ha-ha!')\n\n db.session.add(item)\n db.session.commit() # note\n\nif __name__ == '__main__':\n from models import *\n db.create_all()\n\n if GuestBookItem.query.count() == 0:\n populate_db()\n\n print('App runs!')\n\n # Running app:\n app.run()","sub_path":"homework13/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"614199421","text":"import os\n\n\nPROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nBASE_DIR = os.path.dirname(PROJECT_DIR)\n\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nDEBUG = True\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'oc',\n 'USER': 'root',\n 'PASSWORD': '',\n 'HOST': 'localhost',\n 'PORT': '',\n }\n}\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n","sub_path":"oc/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"639790238","text":"#!/usr/bin/env python\n#\n# Copyright (c) 2015 10X Genomics, Inc. All rights reserved.\n#\n# Mark PCR duplicates in a BAM file\n#\nfrom crdna.duplicates import DupSummary, broadcast\n\nimport json\nimport itertools\nimport tenkit.dict_utils\nimport crdna.bio_io as crdna_io\nimport tenkit.bam as tk_bam\nimport tenkit.lane as tk_lane\nimport tenkit.coverage\n\nimport martian\nimport os\nfrom crdna.bam import BamTemplateShim\nfrom crdna.constants import SELF_FIVE_PRIME_POS_TAG\n\n__MRO__ = \"\"\"\nstage MARK_DUPLICATES(\n in bam input,\n in int perfect_read_count,\n in bed targets_file,\n in json diffusion_dup_summary,\n in bool write_bam,\n out bam output,\n out bam.bai index,\n out json duplicate_summary,\n src py \"stages/reads/mark_duplicates\",\n) split using (\n in map lane_map,\n in string chunk_start,\n in string chunk_end,\n in int chunk_index,\n in float diffusion_threshold,\n)\n\"\"\"\n\ndef chunk_bound_func(read):\n # Since the reads are sorted by SELF_FIVE_PRIME_POS_TAG tag, use that\n # for the chunk boundary\n if not read.is_unmapped:\n return (read.reference_id, read.get_tag(SELF_FIVE_PRIME_POS_TAG))\n else:\n return None\n\n\ndef split(args):\n # Chunk bam to get 1GB per chunk\n bam_in = tk_bam.create_bam_infile(args.input)\n chunk_defs = tk_bam.chunk_bam_records(bam_in, chunk_bound_func, chunk_size_gb=0.75)\n\n for i, chunk in enumerate(chunk_defs):\n chunk['chunk_index'] = i\n chunk['__mem_gb'] = 3\n\n lane_coord_sys = tk_lane.LaneCoordinateSystem()\n\n # Reopen BAM for estimating tile extents\n bam_in = tk_bam.create_bam_infile(args.input)\n lane_coord_sys.estimate_tile_extents(bam_in)\n\n with open(args.diffusion_dup_summary) as f:\n data = json.load(f)\n threshold = data['diffusion']['threshold']\n\n for chunk in chunk_defs:\n chunk['lane_map'] = lane_coord_sys.to_dict()\n chunk['diffusion_threshold'] = threshold\n\n return {'chunks': chunk_defs, 'join': {'__mem_gb': 1, '__threads': 6}}\n\ndef main(args, outs):\n \"\"\"\n Mark exact duplicate reads in the BAM file. Duplicates have the same read1 start site and read2 start site\n \"\"\"\n\n lane_coord_sys = tk_lane.LaneCoordinateSystem.from_dict(args.lane_map)\n\n args.coerce_strings()\n outs.coerce_strings()\n\n bam_in = tk_bam.create_bam_infile(args.input)\n template = BamTemplateShim(bam_in, keep_comments=(args.chunk_index==0))\n \n if args.write_bam:\n bam_prefix, ext = os.path.splitext(outs.output)\n out_bam_name = bam_prefix + '_five_prime_pos_sorted' + ext\n bam_out, _ = tk_bam.create_bam_outfile(out_bam_name, None, None, template=template,\n pgs=[tk_bam.make_pg_header(martian.get_pipelines_version(),\n \"mark_duplicates\")])\n outs.index = None # chunk bams don't get indexed\n else:\n bam_out = None\n outs.output = None\n outs.index = None\n\n # Determine whether the BAM has 10x barcodes\n bam_in.reset()\n has_barcodes = [crdna_io.read_has_barcode(x) for x in itertools.islice(bam_in, 1000)]\n have_barcodes = (float(sum(has_barcodes)) / len(has_barcodes)) > 0.1\n\n # All read duplicate marking - these dup decisions are written to bam_out\n # the output bam has BC aware dup marking if available.\n # Ensure the summary key indicates what kind of dup marking was actually performed.\n if have_barcodes:\n no_filter_dups_bcs = DupSummary(False, 1.0, True, \"no_filter_full_use_bcs\", lane_coord_sys, output_bam=bam_out, threshold=args.diffusion_threshold)\n no_filter_dups_no_bcs = DupSummary(False, 1.0, False, \"no_filter_full_ignore_bcs\", lane_coord_sys, threshold=args.diffusion_threshold)\n else:\n no_filter_dups_bcs = DupSummary(False, 1.0, True, \"no_filter_full_use_bcs\", lane_coord_sys, threshold=args.diffusion_threshold)\n no_filter_dups_no_bcs = DupSummary(False, 1.0, False, \"no_filter_full_ignore_bcs\", lane_coord_sys, output_bam=bam_out, threshold=args.diffusion_threshold)\n\n\n # Dup marking on all perfect reads\n full_dups_bcs = DupSummary(True, 1.0, True, \"full_use_bcs\", lane_coord_sys, threshold=args.diffusion_threshold, tag_counts=True)\n full_dups_no_bcs = DupSummary(True, 1.0, False, \"full_ignore_bcs\", lane_coord_sys, threshold=args.diffusion_threshold)\n\n dup_sums = [full_dups_bcs, full_dups_no_bcs, no_filter_dups_bcs, no_filter_dups_no_bcs]\n\n # Now broadcast the selected reads to the summarizers\n # We can't do the points the require a sample_rate > 1.0 so, skip those.\n # If we don't have barcodes, don't run the set that are split by barcode.\n consumers = [x.read_consumer() for x in dup_sums if x.sample_rate <= 1.0 and ((not x.split_bcs) or have_barcodes)]\n\n source = tk_bam.read_bam_chunk(bam_in, (args.chunk_start, args.chunk_end))\n broadcast(source, consumers)\n\n # We close the BAM\n if bam_out:\n bam_out.close()\n # Note - the indexing happens in join\n bam_prefix, _ = os.path.splitext(outs.output)\n tk_bam.sort(out_bam_name, bam_prefix)\n\n # Package up the summaries:\n dup_results = {}\n for x in dup_sums:\n (dups, optical_dups, diff_dups, custom_diff_dups) = x.result\n desc = x.description\n dup_results[desc] = dups\n optical_desc = \"optical_\" + desc\n dup_results[optical_desc] = optical_dups\n diff_desc = \"diffusion_old_\" + desc\n dup_results[diff_desc] = diff_dups\n custom_diff_desc = \"diffusion_\" + desc\n dup_results[custom_diff_desc] = custom_diff_dups\n\n if outs.duplicate_summary:\n with open(outs.duplicate_summary, 'w') as f:\n json.dump(dup_results, f, indent=4)\n\ndef join(args, outs, chunk_defs, chunk_outs):\n outs.coerce_strings()\n\n # combine the duplicate summary counts\n dup_summaries = [json.load(open(out.duplicate_summary)) for out in chunk_outs]\n combined_dups = reduce(lambda x,y: tenkit.dict_utils.add_dicts(x,y,2), dup_summaries)\n\n diffusion_summary = json.load(open(args.diffusion_dup_summary))\n\n combined_dups['read_counts'] = {}\n combined_dups['read_counts']['perfect_read_count'] = args.perfect_read_count\n\n for k, v in diffusion_summary.items():\n combined_dups[k] = v\n\n # TODO: Remove null_* observed_* ?\n\n with open(outs.duplicate_summary, 'w') as f:\n json.dump(combined_dups, f, indent=4)\n\n # combine & index the chunks of the BAM\n if args.write_bam:\n tk_bam.merge(outs.output, [c.output for c in chunk_outs], args.__threads)\n tk_bam.index(outs.output)\n outs.index = outs.output + '.bai'\n else:\n outs.output = None\n outs.index = None\n","sub_path":"mro/stages/reads/mark_duplicates/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"622944275","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 9 16:14:55 2019\n\n@author: angelo\n\"\"\"\n\ndef somma_lista(lista):\n somma = 0\n for elemento in lista:\n if type(elemento)==int:\n somma += elemento\n return somma","sub_path":"programming_lab/lab091019/somma_lista.py","file_name":"somma_lista.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"466494176","text":"# File: q17.py\n#\n# ===============================================\n# Problem\n# ===============================================\n# Write a function to meet this specification.\n#\n# moveTo(shape, newCenter) shape is a graphics object that supports the\n# getCenter method and newCenter is a Point. Moves shape so that\n# newCenter is its center.\n#\n# Use your function to write a program that draws a circle and then allows\n# the user to click the window 10 times. Each time the user clicks, the\n# circle is moved where the user clicked.\n\nimport math\nfrom graphics import *\n\ndef moveTo(shape, newCenter):\n shape.undraw()\n\n circle = shape.getCenter()\n\n # Old center\n oldX = circle.getX()\n oldY = circle.getY()\n\n # New center\n newX = newCenter.getX()\n newY = newCenter.getY()\n\n shape.move(newX - oldX, newY - oldY)\n\n return shape\n\ndef radius(p1, p2):\n return math.sqrt(math.pow(p2.getX() - p1.getY(), 2)\n + math.pow(p2.getY() - p1.getY(), 2))\n\ndef main():\n # Create window\n win = GraphWin(\"Move the Circle\", 400, 400)\n\n message = Text(Point(200, 395), \"Click two points to make a circle\")\n\n p1 = win.getMouse()\n p2 = win.getMouse()\n\n circle = Circle(Point(p1.getX(), p1.getY()), radius(p1, p2))\n circle.setOutline(\"black\")\n circle.setFill(\"red\")\n circle.draw(win)\n\n message.undraw()\n\n for i in range(10):\n # New center\n message = Text(Point(200, 395), \"Click to move the circle\")\n\n newCenter = win.getMouse()\n\n circle = moveTo(circle, newCenter)\n\n circle.draw(win)\n\nmain()\n","sub_path":"exercises/ch6/q17.py","file_name":"q17.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"271585402","text":"from app.logic.repository.BaseRepository import BaseRepository\n\n\nclass TournamentRepository(BaseRepository):\n def __init__(self):\n super().__init__()\n\n def initialize_table(self):\n sql = \"\"\"USE db_main;\n CREATE TABLE IF NOT EXISTS \n tournament (id_ INTEGER PRIMARY KEY,\n name char(250) NOT NULL,\n date char(20),\n place char(100) \n );\"\"\"\n cursor = self.get_cursor()\n cursor.execute(sql, multi=True)\n self.close_connection()\n","sub_path":"app/logic/repository/TournamentRepository.py","file_name":"TournamentRepository.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"614186359","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = 'Maneesha Sane'\nSITENAME = 'Cooking * Parenting * Programming'\nSITESUBTITLE = \"Not necessarily in that order\"\nSITEURL = 'http://www.maneeshasane.com/'\n\nPATH = 'content'\n\nTIMEZONE = 'Europe/Paris'\n\nDEFAULT_LANG = 'en'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\nTHEME = \"pelican-blueidea\"\nLOAD_CONTENT_CACHE = False\n\n# Blogroll\n# LINKS = (('Pelican', 'http://getpelican.com/'),\n# ('Python.org', 'http://python.org/'),\n# ('Jinja2', 'http://jinja.pocoo.org/'),\n# ('You can modify those links in your config file', '#'),)\n\n# Social widget\nSOCIAL = (('Twitter', 'https://twitter.com/maneeshasane'),\n ('GitHub', 'https://github.com/maneesha'),)\n\nDEFAULT_PAGINATION = 10\n\n# Uncomment following line if you want document-relative URLs when developing\n#RELATIVE_URLS = True\n\nARTICLE_SAVE_AS = '{date:%Y}/{date:%m}/{slug}.html'\nARTICLE_URL = '{date:%Y}/{date:%m}/{slug}.html'\n\nDISPLAY_CATEGORIES_ON_POSTINFO = True\n\n\n\nDISPLAY_SEARCH_FORM = True","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"195099436","text":"\nfrom bottle import HTTPError\n\nfrom .. import app\nfrom ..translations import (\n VOTE_RESULTS,\n VOTE_OPTIONS)\nfrom ..utils.dates import format_date\nfrom . import view\n\n\ndef vote_bar(count):\n \"\"\"Calculate values for the vote bar and vote summaries.\"\"\"\n return [{\n 'machine_option': i,\n 'option': VOTE_OPTIONS[i],\n 'value': count[i],\n 'percent': count[i] / sum(count.values()) * 100} for i in VOTE_OPTIONS]\n\n\n@app.get('/bill//vote/')\n@view\ndef vote(bill_id, vote_id, db, md):\n bill = db.bills.find_one({'_path.basename': bill_id})\n while bill:\n vote = db.votes.find_one(\n {'_path.basename': vote_id, 'belongs_to': bill['identifier']})\n if vote:\n break\n else:\n bill = False\n else:\n raise HTTPError(404)\n\n mps_by_vote = {option: [] for option in VOTE_OPTIONS}\n for mp in vote['breakdown']:\n mps_by_vote[mp['option']].append({\n 'name': mp['name'],\n 'machine_option': mp['option'],\n 'parl_group': mp['parl_group'],\n 'iurl': db.mps.get_iurl({'name': mp['name']})\n })\n\n _mps_all = db.mps.find_all_on_date(vote['date'])\n\n mp_attendance = '{:.0%}'.format(len(vote['breakdown']) /\n _mps_all.count())\n mps_absent = sorted(\n [\n {\n 'name': mp['display_name'],\n 'iurl': db.mps.get_iurl({'name': mp['name']})}\n for mp in _mps_all\n if mp['name'] not in [i['name'] for i in vote['breakdown']]],\n key=lambda i: i['name'])\n\n return {\n 'bill': {\n 'title': bill['title'],\n 'iurl': bill['_path']['full']},\n 'mp_attendance': mp_attendance,\n 'mps_absent': mps_absent,\n 'sources': bill['sources'] and [{\n 'id': source['id'],\n 'text': md(source['text'])[3:-5]} for source in bill['sources']],\n 'vote': {\n 'breakdown': [mps_by_vote[i] for i in VOTE_OPTIONS], # Sort\n 'count': vote_bar({i: len(mps_by_vote[i]) for i in VOTE_OPTIONS}),\n 'machine_date': vote['date'],\n 'date': format_date(vote['date'], 'medium'),\n 'remarks': vote['remarks'] and md(vote['remarks']),\n 'sitting_iurl': db.plenary_sittings.get_iurl(\n {'date': vote['date']}),\n 'title': vote['on'],\n 'machine_result': vote['result'],\n 'result': VOTE_RESULTS[vote['result']]}}\n","sub_path":"openpatata/routes/vote.py","file_name":"vote.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"543697339","text":"def findMinimumSkewPosition(genome):\n skewArr = [0]*len(genome)\n minSkew = 0\n for i in range(1, len(genome)):\n if genome[i-1] == 'C':\n skewArr[i] = skewArr[i-1] - 1\n elif genome[i-1] == 'G':\n skewArr[i] = skewArr[i-1] + 1\n else:\n skewArr[i] = skewArr[i-1]\n if skewArr[i] < minSkew:\n minSkew = skewArr[i]\n\n minSkewIndices = []\n for i, skew in enumerate(skewArr):\n if skew == minSkew:\n minSkewIndices.append(str(i))\n\n return minSkewIndices\n\n\nif __name__ == '__main__':\n with open('rosalind_ba1f.txt', 'r+') as f:\n genome = f.readline().rstrip()\n skewArr = findMinimumSkewPosition(genome)\n print(' '.join(skewArr))\n","sub_path":"FindMinSkewPosition.py","file_name":"FindMinSkewPosition.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"381627590","text":"x=int(input(\"enter the sum of number you want :\"))\r\nsum = 0\r\nwhile x>0:\r\n sum=sum+x\r\n avg=sum/x\r\n print(\"sum of num\",sum,\"avg is :\",avg)\r\n#for a in range(x+1):\r\n # sum=sum+a\r\n # avg=sum/x\r\n#print(\"sum of first \",a,\"num is\",avg,\"sum of the num:\",sum)\r\n#print(\"sum of number\",sum(range(1,8,2)))\r\n","sub_path":"kanumpract.py","file_name":"kanumpract.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"261878522","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport itertools\nfrom collections import defaultdict\nimport array\nimport logging\nfrom typing import (Iterable,\n Dict,\n ValuesView,\n cast,\n List,\n Set,\n Generator,\n Sequence,\n Tuple)\nfrom dedupe._typing import Clusters, RecordID, Links\nimport numpy\nimport fastcluster\nimport hcluster\n\nfrom typing import (Iterable,\n Dict,\n ValuesView,\n cast,\n List,\n Set,\n Generator,\n Sequence,\n Tuple)\nfrom dedupe._typing import Clusters, RecordID, Links\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\ndef connected_components(edgelist: numpy.ndarray,\n max_components: int) -> Generator[numpy.ndarray, None, None]:\n\n if len(edgelist) == 0:\n raise StopIteration()\n\n components = union_find(edgelist['pairs'])\n for component in components:\n sub_graph = edgelist[component]\n n_components = len(numpy.unique(sub_graph['pairs']))\n\n if n_components > max_components:\n min_score = numpy.min(sub_graph['score'])\n min_score_logit = numpy.log(min_score) - numpy.log(1 - min_score)\n threshold = 1 / (1 + numpy.exp(-min_score_logit - 1))\n logger.warning('A component contained %s elements. '\n 'Components larger than %s are '\n 're-filtered. The threshold for this '\n 'filtering is %s' % (n_components,\n max_components,\n threshold))\n filtered_sub_graph = sub_graph[sub_graph['score'] > threshold]\n for sub_graph in connected_components(filtered_sub_graph,\n max_components):\n yield sub_graph\n else:\n yield sub_graph\n\n\ndef union_find(edgelist: numpy.ndarray) -> ValuesView[Sequence[int]]:\n\n root: Dict[RecordID, RecordID] = {}\n components = {}\n component_size = {}\n\n it = numpy.nditer(edgelist, ['external_loop'])\n\n for i, (a, b) in enumerate(it):\n root_a = root.get(a)\n root_b = root.get(b)\n\n if root_a is None and root_b is None:\n # assuming that it will be a while before we are handling\n # edgelists of much more than 4 billion elements we will\n # use an the 'I' type\n components[a] = array.array('I', [i])\n component_size[a] = 2\n root[a] = root[b] = a\n elif root_a is None or root_b is None:\n if root_a is None:\n b = a\n root_a = root_b\n components[root_a].append(i)\n component_size[root_a] += 1\n root_a = cast(RecordID, root_a) # AH upgrade\n root[b] = root_a\n elif root_a != root_b:\n if component_size[root_a] < component_size[root_b]:\n root_a, root_b = root_b, root_a\n\n components[root_a].extend(components[root_b])\n components[root_a].append(i)\n\n component_b = numpy.unique(edgelist[components[root_b]])\n\n for node in component_b:\n root[node] = root_a\n\n component_size[root_a] += len(component_b)\n\n del components[root_b]\n del component_size[root_b]\n\n else:\n components[root_a].append(i)\n\n return components.values()\n\n\ndef condensed_distance(dupes: numpy.ndarray) -> Tuple[Dict[int, RecordID],\n numpy.ndarray,\n int]:\n \"\"\"Convert the pairwise list of distances in dupes to ``condensed distance vector``.\n\n The condensed distance vector is required by the hierarchical clustering\n algorithms: http://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.squareform.html\n\n Let's suppose we have 3 records. Then there are 3_C_2 = 3 possible pairs. Our\n distance matrix might look like this:\n\n ::\n a b c\n a 0 d(a, b) d(a, c)\n b d(b, a) 0 d(b, c)\n c d(c, a) d(c, b) 0\n\n Since that contains some redundant information, we create a condensed distance vector\n from the upper right triangular of the distance matrix. We just read from\n left to right.\n\n ::\n\n [d(a, b), d(a, c), d(b, c)]\n\n The formula for an index of the condensed matrix is\n\n index = {N choose 2}-{N-row choose 2} + (col-row-1)\n = N*(N-1)/2 - (N-row)*(N-row-1)/2 + col - row - 1\n ^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^\n matrix_length row_step\n\n where (row,col) is index of an uncondensed square N X N distance matrix.\n\n Returns:\n i_to_id: (dict) dictionary that maps the distance matrix to the record_ids.\n condensed_distances: (np.array) a 1 x N_C_2 dimensional vector, containing all the\n pair-wise distances flattened into a 1D array.\n N: (int)\n\n \"\"\"\n\n candidate_set = numpy.unique(dupes['pairs'])\n\n i_to_id = dict(enumerate(candidate_set))\n\n ids = candidate_set.searchsorted(dupes['pairs'])\n row = ids[:, 0]\n col = ids[:, 1]\n\n N = len(candidate_set)\n matrix_length = N * (N - 1) / 2\n\n row_step = (N - row) * (N - row - 1) / 2\n index = matrix_length - row_step + col - row - 1\n\n condensed_distances = numpy.ones(int(matrix_length), 'f4')\n condensed_distances[index.astype(int)] = 1 - dupes['score']\n\n return i_to_id, condensed_distances, N\n\n\ndef cluster(dupes: numpy.ndarray,\n cluster_threshold: float = 0.5,\n max_components: int = 30000,\n id_to_match: str = None) -> Clusters:\n \"\"\"\n Takes in a list of duplicate pairs and clusters them in to a\n list records that all refer to the same entity based on a given\n threshold\n\n `https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.cluster.hierarchy.fcluster.html`\n\n\n\n Args:\n dupes: (np.array)[tuple(list[str], float)] A list of tuples, where each tuple\n contains an id pair and a probability that they are a match:\n id_pair_tuple: ([record_id_1, record_id_2], prob)\n dtype: np.dtype([('pairs', ' 1:\n i_to_id, condensed_distances, N = condensed_distance(sub_graph)\n logger.debug(f\"{condensed_distances}\")\n linkage = fastcluster.linkage(condensed_distances,\n method='centroid',\n preserve_input=True)\n partition = hcluster.fcluster(linkage,\n distance_threshold,\n criterion='distance')\n\n clusters: Dict[int, List[int]] = defaultdict(list)\n logger.debug(f\"Partition: {partition}\")\n logger.debug(f\"Linkage: {linkage}\")\n for i, cluster_id in enumerate(partition):\n clusters[cluster_id].append(i)\n\n logger.info(f\"Clusters: {clusters}\")\n for cluster in clusters.values():\n if len(cluster) > 1:\n scores = confidences(cluster, condensed_distances, N)\n logger.info(f\"Cluster Ids and scores: {tuple(i_to_id[i] for i in cluster)}, {scores}\")\n ids = [i_to_id[i] for i in cluster]\n if id_to_match in ids and id_to_match is not None:\n yield tuple(ids), scores\n elif id_to_match is None:\n yield tuple(ids), scores\n\n else:\n (ids, score), = sub_graph\n if score > score_threshold and id_to_match in ids and id_to_match is not None:\n # logger.info(tuple(ids), ((score,) * 2))\n yield tuple(ids), (score,) * 2\n elif score > score_threshold and id_to_match is None:\n yield tuple(ids), (score,) * 2\n\n\ndef confidences(cluster: Sequence[int],\n condensed_distances: numpy.ndarray,\n d: int) -> numpy.ndarray:\n '''\n We calculate a per record score that is similar to a standard\n deviation. The main reason is that these record scores can be\n used to calculate the standard deviation of an entire cluster,\n which is a reasonable metric for clusters.\n '''\n\n scores_d = dict.fromkeys(cluster, 0.0)\n squared_distances = condensed_distances ** 2\n for i, j in itertools.combinations(cluster, 2):\n index = d * (d - 1) / 2 - (d - i) * (d - i - 1) / 2 + j - i - 1\n squared_dist = squared_distances[int(index)]\n scores_d[i] += squared_dist\n scores_d[j] += squared_dist\n scores = numpy.array([score for _, score in sorted(scores_d.items())])\n scores /= len(cluster) - 1\n scores = numpy.sqrt(scores)\n scores = 1 - scores\n return scores\n\n\ndef greedyMatching(dupes: numpy.ndarray, threshold: float = 0.5) -> Links: # AH upgrade threshold\n A: Set[RecordID] = set()\n B: Set[RecordID] = set()\n\n dupes.sort(order='score')\n dupes = dupes[::-1]\n\n for (a, b), score in dupes:\n if a not in A and b not in B:\n A.add(a)\n B.add(b)\n\n yield (a, b), score\n\n\ndef gazetteMatching(scored_blocks: Iterable[numpy.ndarray],\n threshold: float = 0,\n n_matches: int = 1) -> Links: # AH upgrade threshold\n\n for block in scored_blocks:\n block = block[block['score'] > threshold]\n block.sort(order='score')\n block = block[::-1]\n\n if n_matches:\n yield block[:n_matches].copy()\n else:\n yield block.copy()\n\n\ndef pair_gazette_matching(scored_pairs: numpy.ndarray,\n threshold: float = 0.0,\n n_matches: int = 1) -> Links:\n\n scored_pairs.sort(order='pairs')\n\n group_key = scored_pairs['pairs'][:, 0]\n change_points = numpy.where(numpy.roll(group_key, 1) != group_key)[0]\n scored_blocks = numpy.split(scored_pairs, change_points)\n\n for match in gazetteMatching(scored_blocks, threshold, n_matches):\n if match:\n yield from match\n","sub_path":"dedupe/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":10925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"451472568","text":"from bs4 import BeautifulSoup\r\nimport re\r\nimport os\r\nimport json\r\n\r\npath='C:\\\\Alessandro\\\\Python\\\\Master - Gigli\\\\ikea_ok'\r\n\r\nproducts={}\r\nfilelist=[x for x in os.listdir(path) if '.htm' in x]\r\nfor filename in filelist: \r\n #print('filename: ',filename)\r\n file_html=open(filename, encoding=\"utf8\")\r\n soup = BeautifulSoup(file_html, 'html.parser')\r\n #all_things=soup.find_all()\r\n res=soup.find_all('div', class_ = 'productDetails')\r\n title=soup.find('title').text\r\n #print('TITLE')\r\n #print(title)\r\n if title not in products:\r\n products[title]=[]\r\n #print(res)\r\n for tag in res:\r\n t=False\r\n dt={}\r\n dt['productTitle']='Not Available'\r\n dt['productDesp']='Not Available'\r\n dt['price']='Not Available'\r\n dt['previousPrice']='Not Available'\r\n dt['ratingsCount']='Not Available'\r\n #print('+++++++++++++++++++++')\r\n productTitle=tag.find('span', class_ = 'productTitle floatLeft')\r\n if productTitle is not None: \r\n t=True \r\n dt['productTitle']=productTitle.text\r\n #print(productTitle.text)\r\n\r\n productDesp=tag.find('span', class_ = 'productDesp')\r\n if productDesp is not None:\r\n t=True\r\n dt['productDesp']=productDesp.text\r\n #print(productDesp.text)\r\n\r\n regularPrice=tag.find('span', class_ = 'price regularPrice')\r\n redPrice=tag.find(class_ = 'redPrice')\r\n previousPrice=tag.find('span', class_ = 'previousPrice')\r\n \r\n if redPrice is not None or regularPrice is not None:\r\n if redPrice is not None:\r\n #print(filename) \r\n #print('redPrice.text') \r\n #print(redPrice.text)\r\n price=redPrice.text.replace('.','')\r\n price=price[0:15]\r\n if previousPrice is not None:\r\n previousPrice=previousPrice.text.replace('.','')\r\n #previousPrice=previousPrice[0:15]\r\n #print('redPrice: ',redPrice)\r\n previousPrice=re.sub('[^0-9,]', \"\", previousPrice)\r\n previousPrice=previousPrice.replace(',','.')\r\n dt['previousPrice']=round(float(previousPrice),2)\r\n #print('previousPrice: ',previousPrice)\r\n else:\r\n #print(regularPrice.text) \r\n price=regularPrice.text.replace('.','')\r\n price=price[0:15]\r\n t=True \r\n if price.find('/')>=0:\r\n price=regularPrice.text[0:price.find('/')]\r\n price=re.sub('[^0-9,]', \"\", price)\r\n else: \r\n price=re.sub('[^0-9,]', \"\", price)\r\n price=price.replace(',','.')\r\n #print('price: ',price)\r\n dt['price']=float(price)\r\n\r\n #rating=tag.find('a', class_ = 'ratingsCount')\r\n rating=tag.find('div', class_ = 'ratingStarsOnLarge') #'ratingStarsOffLarge') #\r\n if rating is not None:\r\n t=True\r\n #ratingperc=tag.find('div', class_ = 'ratingStarsOnLarge')\r\n #width=re.sub('[^0-9.]', \"\", ratingperc.get('style'))\r\n width=re.sub('[^0-9.]', \"\", rating.get('style'))\r\n dt['ratingsCount']=round((float(len(rating.text))*float(width))/100,2)\r\n #print('dt[''ratingsCount'']: ',dt['ratingsCount'])\r\n if t: \r\n products[title].append(dt)\r\n \r\nprint('*********************************************')\r\n'''\r\nfor item in products:\r\n print('-----------------------')\r\n print(item)\r\n for it in products[item]:\r\n print(it)\r\n'''\r\nwith open('products.json', 'w', encoding=\"utf-8\", newline='\\r\\n') as fp:\r\n json.dump(products, fp, indent=3, ensure_ascii=False)\r\nfp.close()\r\nprint('Fine.')\r\n \r\n \r\n","sub_path":"processing_BS_ikea_10.py","file_name":"processing_BS_ikea_10.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"647076731","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom grid_world import standard_grid, negative_grid\n\nGAMMA = 0.9\nALL_POSSIBLE_ACTIONS = ('U','D','L','R')\n\n# This script implements the monte carlo exploring-starts method for finding optimal policy.\n\ndef print_values(V,g):\n for i in range(g.width):\n print(\"----------------------------\")\n for j in range(g.height):\n v = V.get((i,j),0)\n if v >=0:\n print(' %.2f|' % v, end=\" \")\n else:\n print('%.2f|'%v, end= \" \")\n print (\"\")\n\ndef print_policy(P,g):\n for i in range(g.width):\n print(\"----------------------------\")\n for j in range(g.height):\n a = P.get((i,j),' ')\n print(' %s |' %a, end = ' ')\n print()\n\ndef play_game(grid, policy, manual = False):\n if manual:\n input()\n print('**************Start One Game**************')\n start_states = list(grid.actions.keys())\n start_idx = np.random.choice(len(start_states))\n # exploring start method. Because MC needs to start at any state.\n grid.set_state(start_states[start_idx])\n\n if manual:\n print('start_states: ' + str(start_states))\n print_policy(policy, grid)\n print('start_idx: ' + str(start_idx))\n print('Start State: ' + str(grid.current_state()))\n print()\n input()\n\n s = grid.current_state()\n a = np.random.choice(ALL_POSSIBLE_ACTIONS)\n print('First random Action: ' + str(a))\n states_actions_rewards = [(s,a,0)]\n seen_states = set()\n seen_states.add(grid.current_state())\n num_steps = 0\n while True:\n r = grid.move(a)\n s = grid.current_state()\n print('Moved to State: ' + str(s) + ' with reward: ' + str(r))\n num_steps +=1\n if s in seen_states:\n # hack so that we don't end up in an infinitely long episode\n # bumping into the wall repeatedly\n reward = -10 / num_steps\n print('State already seen, reward changed to : ' + str(reward))\n states_actions_rewards.append((s,None, reward))\n print('Game is Over')\n break\n elif grid.game_over():\n print('Game is Over')\n states_actions_rewards.append((s,None,r))\n break\n else:\n a = policy[s]\n states_actions_rewards.append((s,a,r))\n print('New Action According to Policy: ' + str(a))\n \n seen_states.add(s)\n print('state: ' + str(grid.current_state()))\n print('states_actions_rewards: ' + str(states_actions_rewards))\n print()\n if manual:\n input()\n G = 0\n states_actions_returns = []\n first = True\n for s, a, r in reversed(states_actions_rewards):\n if first:\n first = False\n else:\n states_actions_returns.append((s,a,G))\n G = r + GAMMA*G\n states_actions_returns.reverse() # we want it to be in order of state visited\n print('states_and_returns: ' + str(states_actions_returns))\n return states_actions_returns\n\ndef max_dict(d):\n # returns the argmax (key) and max (value) from a dictionary\n # put this into a function since we are using it so often\n max_key = None\n max_val = float('-inf')\n for k, v in d.items():\n if v > max_val:\n max_val = v\n max_key = k\n return max_key, max_val\n\nif __name__ == '__main__':\n grid = negative_grid(step_cost=-0.9)\n\n print ('rewards: ')\n print_values(grid.rewards,grid)\n\n # state -> action\n # initialize a random policy\n policy = {}\n for s in grid.actions.keys():\n policy[s] = np.random.choice(ALL_POSSIBLE_ACTIONS)\n\n # initialize Q(s,a) and returns\n Q = {}\n returns = {} # dictionary of state -> list of returns we've received\n states = grid.all_states()\n for s in states:\n if s in grid.actions:\n Q[s] = {}\n for a in ALL_POSSIBLE_ACTIONS:\n Q[s][a] = 0 # needs to be initialized to something so we can argmax\n returns[(s,a)] = []\n else:\n pass\n\n #repeat until convergence\n deltas = []\n\n manual = False\n \n for t in range(2000):\n if t % 1000 == 0:\n print(t)\n \n # generate an episode using pi\n biggest_change = 0\n states_actions_returns = play_game(grid, policy, manual)\n seen_state_action_pairs = set()\n print('***Policy improvement***')\n for s, a, G in states_actions_returns:\n # check if we have already seen s\n # called 'first-visit' MC policy evaluation\n sa = (s,a)\n print('State: ' + str(s))\n print('Action: ' + str(a))\n if sa not in seen_state_action_pairs:\n old_q = Q[s][a]\n print('old_q: ' + str(Q[s][a]))\n returns[sa].append(G)\n Q[s][a] = np.mean(returns[sa])\n print('new Q[s][a]: ' + str(Q[s][a]))\n biggest_change = max(biggest_change, np.abs(old_q - Q[s][a]))\n print(\"biggest_change: \" + str(biggest_change))\n if manual:\n input()\n seen_state_action_pairs.add(sa)\n print('Q[s]: ' + str(Q[s]))\n deltas.append(biggest_change)\n\n #update policy\n for s in policy.keys():\n policy[s] = max_dict(Q[s])[0]\n \n plt.plot(deltas)\n plt.show()\n\n print('final policy: ')\n print_policy(policy, grid)\n\n # find V\n V = {}\n for s, Qs in Q.items():\n V[s] = max_dict(Q[s])[1]\n \n print('final values:')\n print_values(V, grid)","sub_path":"monte_carlo_es.py","file_name":"monte_carlo_es.py","file_ext":"py","file_size_in_byte":5673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"281586500","text":"from pylab import *\nfrom scipy import *\nimport copy\nimport txt_data_processing, rwkbode, pylab_util\n\nfrom systemid import Model\n\n#load log downsampled and raw Bode data\nlog_ds_mod = 'swept_sine_amp_75_July_07_2009_log_downsampled'\nlog_ds_data = txt_data_processing.load_avebode_data_set(log_ds_mod)\nlog_ds_f = log_ds_data.f\na_theta_log_ds = log_ds_data.find_bode('a', 'theta')\n\nraw_mod = 'swept_sine_amp_75_July_07_2009_avebodes'\nraw_data = txt_data_processing.load_avebode_data_set(raw_mod)\nraw_f = raw_data.f\na_theta_raw = raw_data.find_bode('a', 'theta')\n\n######################\n#\n# Develop a model\n#\n######################\n\n#both modes multiplied together\ntf_c1 = Model('g*s**2*w1**2*w2**2', \\\n '(s**2+2*z1*w1*s+w1**2)*(s**2+2*z2*w2*s+w2**2)' , \\\n {'w1':2.5*2*pi,'z1':0.03,'w2':17.8*2*pi,'z2':0.01,'g':0.005}, \\\n 'all')\nmodel_bode_c1 = rwkbode.Bode_From_TF(tf_c1, raw_f, input='theta', output='a')\n\n#adding two modes together with different gains\nnum1 = 'g1*s**2*w1**2'\nden1 = '(s**2+2*z1*w1*s+w1**2)'\ndict1 = {'w1':2.5*2*pi,'z1':0.03,'g1':0.003}\ntf1 = Model(num1, \\\n den1, \\\n dict1, \\\n 'all')\nmodel_bode_m1 = rwkbode.Bode_From_TF(tf1, raw_f, input='theta', output='a')\n\nnum2 = 'g2*s**2*w2**2'\nden2 = '(s**2+2*z2*w2*s+w2**2)'\ndict2 = {'w2':17.5*2*pi,'z2':0.03,'g2':-0.0005}\ntf2 = Model(num2, \\\n den2, \\\n dict2, \\\n 'all')\nmodel_bode_m2 = rwkbode.Bode_From_TF(tf2, raw_f, input='theta', output='a')\n\ndict3 = copy.copy(dict2)\ndict3.update(dict1)\nnum3 = num1 + '*' + den2 + '+' + num2 + '*' + den1\nden3 = den1 + '*' + den2\ntf3 = Model(num3, \\\n den3, \\\n dict3, \\\n 'all')\nmodel_bode_c2 = rwkbode.Bode_From_TF(tf3, raw_f, input='theta', output='a')\n\n\n\n#Plot Experimental and Model Bodes\na_theta_fi = 1\nrwkbode.GenBodePlot(a_theta_fi, raw_f, a_theta_raw)\nrwkbode.GenBodePlot(a_theta_fi, log_ds_f, a_theta_log_ds, clear=False, \\\n linetype='o')\nrwkbode.GenBodePlot(a_theta_fi, raw_f, model_bode_c1, clear=False, \\\n linetype='k-')\nrwkbode.GenBodePlot(a_theta_fi, raw_f, model_bode_m1, clear=False, \\\n linetype='-')\nrwkbode.GenBodePlot(a_theta_fi, raw_f, model_bode_m2, clear=False, \\\n linetype='-')\nrwkbode.GenBodePlot(a_theta_fi, raw_f, model_bode_c2, clear=False, \\\n linetype='-')\npylab_util.SetPhaseLim(a_theta_fi, [-200, 200])\npylab_util.SetMagLim(a_theta_fi, [-10, 45])\npylab_util.SetFreqLim(a_theta_fi, [0.5, 30])\nshow()\n","sub_path":"July_07_2009/fit_by_hand.py","file_name":"fit_by_hand.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"582178580","text":"import const\r\nfrom math import log2, pow\r\n\r\nclass Converter:\r\n A4 = 440\r\n C0 = A4*pow(2, -4.75)\r\n NOTES = [\"C\", \"C#\", \"D\", \"D#\", \"E\", \"F\", \"F#\", \"G\", \"G#\", \"A\", \"A#\", \"B\"]\r\n \r\n def __init__(self):\r\n pass\r\n\r\n def pitch(self, freq):\r\n h = round(12*log2(freq/self.C0))\r\n octave = h // 12\r\n n = h % 12\r\n return self.NOTES[n] + str(octave)\r\n\r\n def freq(self, name):\r\n if name[1] == '#':\r\n octave = int(name[2:])\r\n steps = self.NOTES.index(name[0:2].upper())\r\n else:\r\n octave = int(name[1])\r\n steps = self.NOTES.index(name[0].upper())\r\n return (self.C0*(2**octave))*(2**(steps/12))\r\n\r\nclass Note:\r\n \"\"\" Supplies a class to define a note object \"\"\"\r\n\r\n def __init__(self, pitch=None, duration=None):\r\n self._pitch = pitch\r\n self._duration = duration\r\n \r\n def pitch(self):\r\n return str(self._pitch)\r\n \r\n def duration(self):\r\n return str(self._duration)\r\n \r\n def __str__(self):\r\n return f'Note with pitch {self.pitch()} and duration {self.duration()}'\r\n\r\n\r\nclass TabNote:\r\n \"\"\" Supplies a class to define a tab note object \"\"\"\r\n\r\n def __init__(self, string_num=None, fret_num=None):\r\n self._string_num = string_num\r\n self._fret_num = fret_num\r\n \r\n def string_num(self):\r\n return self._string_num\r\n\r\n def fret_num(self):\r\n return self._fret_num\r\n \r\n def __str__(self):\r\n return f'Tab note with string {self._string_num} and fret {self._fret_num}'\r\n\r\nclass Staff:\r\n \"\"\" Supplies a class to create a staff object \"\"\"\r\n\r\n NOTE_SPACING = 3\r\n STAFF_LENGTH = 100\r\n\r\n def __init__(self):\r\n self.staff = [[\"-\" for _ in range(self.STAFF_LENGTH)] for _ in range(const.NUM_STRINGS)]\r\n self.pos = self.NOTE_SPACING\r\n self.beat_within_measure = 0\r\n \r\n def add_note(self, tab_note, note):\r\n self.staff[tab_note.string_num() - 1][self.pos] = str(tab_note.fret_num())\r\n self.pos += self.NOTE_SPACING\r\n self.beat_within_measure += float(4/int(note.duration()))\r\n\r\n if self.beat_within_measure == 4:\r\n for i in range(const.NUM_STRINGS):\r\n self.staff[i][self.pos] = '|'\r\n self.beat_within_measure = 0\r\n self.pos += self.NOTE_SPACING\r\n \r\n def __str__(self):\r\n for line in self.staff:\r\n for note in line:\r\n print(note, end=\"\")\r\n print(\"\")\r\n return \"\"\r\n\r\nclass Tab:\r\n \"\"\" Supplies a class to generate guitar tabs from a list of note \"\"\"\r\n\r\n def __init__(self, notes=None):\r\n self.notes = notes\r\n\r\n def generate(self):\r\n self.tab_notes = [self.convert_pitch_to_tab(note) for note in notes]\r\n self.staff = Staff()\r\n\r\n for i, note in enumerate(self.tab_notes):\r\n self.staff.add_note(note, notes[i])\r\n\r\n print(self.staff)\r\n\r\n def convert_pitch_to_tab(self, pitch):\r\n c = Converter()\r\n freq = c.freq(pitch.pitch())\r\n matches = []\r\n for string_num, string_note, in enumerate(const.GUITAR_TUNING, 1):\r\n for step in range(const.GUITAR_FRETS):\r\n guitar_freq = c.freq(string_note) * (2**(step/12))\r\n if round(guitar_freq, 1) == round(freq, 1):\r\n matches.append(TabNote(string_num, step))\r\n if matches:\r\n lowest = TabNote(0, 24)\r\n for match in matches:\r\n fret_num = match.fret_num()\r\n if fret_num < lowest.fret_num():\r\n lowest = match\r\n return lowest\r\n \r\n def _print_note(self, note):\r\n for i in range(1,7):\r\n if i == note.string_num():\r\n pass\r\n \r\n\r\nif __name__ == '__main__':\r\n notes = [Note(\"C3\", const.QUARTER), \r\n Note(\"D3\", const.QUARTER),\r\n Note(\"E3\", const.QUARTER),\r\n Note(\"F3\", const.QUARTER),\r\n Note(\"G3\", const.QUARTER),\r\n Note(\"A3\", const.QUARTER),\r\n Note(\"B3\", const.QUARTER),\r\n Note(\"C4\", const.QUARTER),\r\n Note(\"C3\", const.QUARTER), \r\n Note(\"D3\", const.QUARTER),\r\n Note(\"E3\", const.QUARTER),\r\n Note(\"F3\", const.QUARTER),\r\n Note(\"G3\", const.QUARTER),\r\n Note(\"A3\", const.QUARTER),\r\n Note(\"B3\", const.QUARTER),\r\n Note(\"C4\", const.QUARTER)]\r\n\r\n tab = Tab(notes)\r\n tab.generate()\r\n","sub_path":"tab.py","file_name":"tab.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"392879967","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 2017 jem@seethis.link\n# Licensed under the MIT license (http://opensource.org/licenses/MIT)\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom layout.common import try_get, ParseError\n\nimport re\nimport math\nimport struct\nimport hexdump\n\nfrom io_map.io_mapper import get_io_mapper_for_chip\n\nMATRIX_SCANNER_MODE_NO_MATRIX = 0x00\nMATRIX_SCANNER_MODE_COL_ROW = 0x01\nMATRIX_SCANNER_MODE_ROW_COL = 0x02\nMATRIX_SCANNER_MODE_PINS = 0x03\n\nDEFAULT_DEBOUNCE_PRESS_TIME = 5\nDEFAULT_DEBOUNCE_RELEASE_TIME = (2*DEFAULT_DEBOUNCE_PRESS_TIME)\nDEFAULT_RELEASE_TRIGGER_TIME = 3\nDEFAULT_PRESS_TRIGGER_TIME = 1\nDEFAULT_PARASITIC_DISCHARGE_DELAY_IDLE = 2.0\nDEFAULT_PARASITIC_DISCHARGE_DELAY_DEBOUNCE = 10.0\n\nMAX_NUM_ROWS = 10\n\nclass ScanMode:\n NO_MATRIX = MATRIX_SCANNER_MODE_NO_MATRIX\n COL_ROW = MATRIX_SCANNER_MODE_COL_ROW\n PINS = MATRIX_SCANNER_MODE_PINS\n MODE_MAP = {\n 'no_matrix': NO_MATRIX,\n 'col_row': COL_ROW,\n 'pins': PINS,\n }\n\n def __init__(self, scan_mode_dict, debug_hint):\n self.parse_header(scan_mode_dict, debug_hint)\n\n self.debounce_time_press = DEFAULT_DEBOUNCE_PRESS_TIME\n self.matrix_map = None\n self.debounce_time_release = DEFAULT_DEBOUNCE_RELEASE_TIME\n self.trigger_time_press = DEFAULT_PRESS_TRIGGER_TIME\n self.trigger_time_release = DEFAULT_RELEASE_TRIGGER_TIME\n self.parasitic_discharge_delay_idle = DEFAULT_PARASITIC_DISCHARGE_DELAY_IDLE\n self.parasitic_discharge_delay_debouncing = DEFAULT_PARASITIC_DISCHARGE_DELAY_DEBOUNCE\n\n if 'matrix_map' in scan_mode_dict:\n self.parse_matrix_map(scan_mode_dict['matrix_map'], debug_hint)\n\n # uint8_t trigger_time_press; // The key must be down this long before being registered (ms)\n # uint8_t trigger_time_release; // The key must be up this long before being registered (ms)\n\n # // Both delays are measured on a scale of 0-48µs\n # uint8_t parasitic_discharge_delay_idle; // How long to hold a row low before reading the columns\n # uint8_t parasitic_discharge_delay_debouncing; // How long to hold a row low when a key is debouncing\n if 'debounce_time_press' in scan_mode_dict:\n self.debounce_time_press = scan_mode_dict['debounce_time_press']\n\n if 'debounce_time_release' in scan_mode_dict:\n self.debounce_time_release = scan_mode_dict['debounce_time_release']\n\n if 'trigger_time_press' in scan_mode_dict:\n self.trigger_time_press = scan_mode_dict['trigger_time_press']\n\n if 'trigger_time_release' in scan_mode_dict:\n self.trigger_time_release = scan_mode_dict['trigger_time_release']\n\n if 'parasitic_discharge_delay_idle' in scan_mode_dict:\n delay = scan_mode_dict['parasitic_discharge_delay_idle']\n if (0 < delay > 48.0):\n raise ParseError(\"parasitic_discharge_delay_idle must less than 48.0µs\")\n self.parasitic_discharge_delay_idle = delay\n\n if 'parasitic_discharge_delay_debouncing' in scan_mode_dict:\n delay = scan_mode_dict['parasitic_discharge_delay_debouncing']\n if (0 < delay > 48.0):\n raise ParseError(\"parasitic_discharge_delay_debouncing must less than 48.0µs\")\n self.parasitic_discharge_delay_debouncing = delay\n\n self.get_pin_numbers_for_device(None)\n\n\n def __str__(self):\n if self.mode == ScanMode.NO_MATRIX:\n return \"ScanMode(mode=ScanMode.NO_MATRIX)\"\n elif self.mode == ScanMode.COL_ROW:\n return \"ScanMode(mode=ScanMode.COL_ROW, rows={}, cols={})\".format(\n self.rows, self.col_count)\n\n def parse_header(self, sm_raw, debug_hint):\n self.mode = try_get(sm_raw, 'mode', debug_hint, val_type=str)\n mode = self.mode\n if self.mode not in ScanMode.MODE_MAP:\n raise ParseError(\"Unsupported scan mode '{}' for device '{}'\"\n .format(self.mode, debug_hint))\n self.mode = ScanMode.MODE_MAP[self.mode]\n\n self.col_pins = None\n self.row_pins = None\n self.row_count = 0\n self.col_count = 0\n\n if self.mode == ScanMode.NO_MATRIX:\n pass\n elif self.mode == ScanMode.COL_ROW:\n # Get the row pins\n row_data = try_get(sm_raw, 'rows', debug_hint, val_type=[int, list])\n if isinstance(row_data, int):\n self.row_pins = None\n self.row_count = row_data\n elif isinstance(row_data, list):\n self.row_pins = row_data\n self.row_count = len(self.row_pins)\n\n # Get the column pins\n col_data = try_get(sm_raw, 'cols', debug_hint, val_type=[int, list])\n if isinstance(col_data, int):\n self.col_pins = None\n self.col_count = col_data\n elif isinstance(col_data, list):\n self.col_pins = col_data\n self.col_count = len(self.col_pins)\n elif self.mode == ScanMode.PINS:\n # TODO:\n # self.row_count = 1\n # self.col_count = 10\n raise ParseError(\"pins not implemented\")\n else:\n raise ParseError(\"InternalError: Unknown ScanMode({})\".format(self.mode))\n\n def get_pin_numbers_for_device(self, target_device):\n # TODO: don't hard code the chip id, instead obtain it from the device\n ATMEL_ID = 0x03eb0000\n self.pin_mapper = get_io_mapper_for_chip(ATMEL_ID | 0x000A)\n\n if self.row_pins == None:\n row_pins = self.pin_mapper.get_default_rows(self.row_count)\n else:\n row_pins =[self.pin_mapper.get_pin_number(pin) for pin in self.row_pins]\n\n if len(row_pins) < MAX_NUM_ROWS:\n row_pins += [0] * (MAX_NUM_ROWS-len(row_pins))\n elif len(row_pins) > MAX_NUM_ROWS:\n raise ParseError(\"Device only supports a maximum of 10 rows, got '{}'\"\n .format(len(row_pins)))\n\n if self.col_pins == None:\n col_pins = self.pin_mapper.get_default_cols(self.col_count)\n else:\n col_pins = [self.pin_mapper.get_pin_number(pin) for pin in self.col_pins]\n\n self.col_pin_numbers = col_pins\n self.row_pin_numbers = row_pins\n\n if self.mode != ScanMode.NO_MATRIX:\n self.max_col_pin_num = max(self.col_pin_numbers)\n self.max_key_num = max(self.inverse_map)\n else:\n self.max_col_pin_num = 0\n self.max_key_num = 0\n\n\n def generate_pin_maps(self, target_device):\n col_pin_masks = self.pin_mapper.get_pin_masks_as_bytes(self.col_pin_numbers)\n\n return bytearray(self.row_pin_numbers) + col_pin_masks\n\n def generate_scan_mode_info(self):\n return struct.pack(' self.row_count*self.col_count:\n raise ParseError(\"Too many keys in matrix_map for '{}'\"\n \"got {} but expected at most {} (={}*{})\".format(\n kb_name, len(mmap_raw), self.row_count*self.col_count, self.row_count, self.col_count))\n matrix_map = []\n inverse_map = [0x00] * self.row_count * self.col_count\n for (key_pos, map_key) in enumerate(mmap_raw):\n # these values can be used as spaces and are ignored\n if map_key in ['none', '_'*4, '_'*5, '_'*6, '-'*4, '-'*5, '-'*6]:\n continue\n\n r, c = None, None\n try:\n results = re.match('r(\\d+)c(\\d+)', map_key)\n if results == None:\n raise ParseError\n r, c = results.groups()\n r, c = int(r), int(c)\n except (ParseError, TypeError):\n raise ParseError(\"Expected string of the form rXcY, but got '{}' \"\n \"in matrix_map '{}'\".format(map_key, kb_name))\n key_num = self.col_count*r + c\n if r >= self.row_count or c >= self.col_count:\n raise ParseError(\"Key remap {} out of bounds \"\n \"rows={}, cols={} in device matrix_map '{}'\".format(map_key, self.row_count, self.col_count, kb_name))\n\n if key_num in matrix_map:\n raise ParseError(\"The key '{}' appears twice in the matrix_map \"\n \"of '{}'\".format(map_key, kb_name))\n matrix_map.append(key_num)\n inverse_map[key_num] = key_pos\n\n self.matrix_map = matrix_map\n self.inverse_map = inverse_map\n","sub_path":"host-software/layout/scan_mode.py","file_name":"scan_mode.py","file_ext":"py","file_size_in_byte":10162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"45979223","text":"\"\"\"Message model tests.\"\"\"\n\n# run these tests like:\n#\n# python -m unittest test_user_model.py\n\n\nimport os\nfrom unittest import TestCase\n\nfrom models import db, User, Message, Follows\n\n# BEFORE we import our app, let's set an environmental variable\n# to use a different database for tests (we need to do this\n# before we import our app, since that will have already\n# connected to the database\n\nos.environ['DATABASE_URL'] = \"postgresql:///warbler-test\"\n\n\n# Now we can import app\n\nfrom app import app\n\n# Create our tables (we do this here, so we only create the tables\n# once for all tests --- in each test, we'll delete the data\n# and create fresh new clean test data\n\ndb.drop_all()\ndb.create_all()\n\nclass MessageModelTestCase(TestCase):\n \"\"\"Test models for messages.\"\"\"\n\n def setUp(self):\n \"\"\"Create test client, add sample data.\"\"\"\n\n User.query.delete()\n Message.query.delete()\n Follows.query.delete()\n\n self.client = app.test_client()\n\n def test_message_model(self):\n \"\"\"Does basic model work?\"\"\"\n\n # Create a test user\n u = User.signup(\n email=\"test@test.com\",\n username=\"testuser\",\n password=\"HASHED_PASSWORD\"\n )\n\n # Create test messages\n m1 = Message(text='Test message 1', user_id=1)\n m2 = Message(text='Test message 2', user_id=1)\n db.session.add(m1)\n db.session.add(m2)\n db.session.commit()\n\n # Test relationship between messages and user\n self.assertEqual(m1.user, u)\n self.assertEqual(m2.user, u)\n self.assertIn(m1, u.messages)\n self.assertIn(m2, u.messages)\n","sub_path":"test_message_model.py","file_name":"test_message_model.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"360122828","text":"# -*- encoding: utf-8 -*-\n# \n# Copyright © 2012 Robert Weidlich. All Rights Reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# \n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# \n# 3. The name of the author may not be used to endorse or promote products\n# derived from this software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE LICENSOR \"AS IS\" AND ANY EXPRESS OR IMPLIED\n# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO\n# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT\n# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING\n# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY\n# OF SUCH DAMAGE.\n# \n# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\n\nfrom django import template\nregister = template.Library()\n\nimport urlparse\n\n@register.filter\ndef urlsplit(value, arg):\n result = getattr(urlparse.urlparse(value), arg)\n if arg == \"path\":\n result = result[1:]\n return result\n\n@register.filter\ndef startswith(value, arg):\n \"\"\"Usage, {% if value|starts_with:\"arg\" %}\"\"\"\n if not value:\n return False\n return value.startswith(arg)\n\n@register.filter\ndef sameday(date1, date2):\n \"\"\"Usage, {% if date1|sameday:date2 %}\"\"\"\n return date1.year == date2.year and date1.month == date2.month and date1.day == date2.day\n\n@register.filter\ndef contains(value, arg):\n \"\"\"Usage, {% if value|contains:\"arg\" %}\"\"\"\n return arg in value\n\n\n@register.filter\ndef hashEpisode(episode, what):\n if what == \"content\":\n val = episode.description\n val += episode.title\n val += episode.currentSong\n val += episode.genre\n return hash()\n if what == \"streams\":\n if episode.channel:\n val = episode.channel.stream_set.values_list('id', 'running')\n return hash(val)\n else:\n return 0\n\n\n@register.filter\ndef object_name(value):\n return value._meta.object_name\n\n\nfrom django.utils.http import urlquote\nimport time\nimport base64\nimport hashlib\nimport calendar\nimport datetime\n\n\n@register.filter\ndef secdownload_lighttpd(rel_path):\n secret = getattr(settings, \"RP_DL_SECRET\", \"verysecret\")\n uri_prefix = getattr(settings, \"RP_DL_PREFIX\", \"/dl/\")\n hextime = \"%08x\" % time.time()\n token = hashlib.md5((secret + rel_path + hextime).encode('utf-8')).hexdigest()\n return '%s%s/%s%s' % (uri_prefix, token, hextime, urlquote(rel_path))\n\n\n@register.filter\ndef secdownload(rel_path):\n secret = getattr(settings, \"RP_DL_SECRET\", \"verysecret\")\n uri_prefix = getattr(settings, \"RP_DL_PREFIX\", \"/download/\")\n\n future = datetime.datetime.utcnow() + datetime.timedelta(minutes=5)\n expiry = calendar.timegm(future.timetuple())\n \n secure_link = \"{key}{url}{expiry}\".format(key=secret,\n url=rel_path,\n expiry=expiry)\n secure_hash = hashlib.md5(secure_link).digest()\n encoded_hash = base64.urlsafe_b64encode(secure_hash).rstrip('=')\n\n return \"{prefix}{ehash}/{expire}{url}\".format(prefix=uri_prefix, \n ehash=encoded_hash, \n expire=str(expiry), url=rel_path)\n\nimport os.path\n\n@register.filter\ndef fileexists(rel_path):\n if rel_path[0] == \"/\":\n rel_path = rel_path[1:]\n base = getattr(settings, 'RP_DL_BASEDIR', '/tmp/')\n path = os.path.join(base, rel_path)\n return os.path.exists(path)\n","sub_path":"radioportal/templatetags/standard.py","file_name":"standard.py","file_ext":"py","file_size_in_byte":4283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"251377867","text":"import fenics\r\nimport phaseflow\r\nimport scipy.optimize as opt\r\n\r\n \r\ndef extract_pci_position(w):\r\n\r\n def theta(x):\r\n \r\n wval = w(fenics.Point(x))\r\n \r\n return wval[2]\r\n \r\n pci_pos = opt.newton(theta, 0.1)\r\n \r\n return pci_pos\r\n \r\n \r\ndef stefan_problem_solidify(Ste = 0.125,\r\n theta_h = 0.01,\r\n theta_c = -1.,\r\n theta_f = 0.,\r\n r = 0.01,\r\n dt = 0.01,\r\n end_time = 1.,\r\n nlp_absolute_tolerance = 1.e-4,\r\n initial_uniform_cell_count = 100,\r\n automatic_jacobian = False):\r\n\r\n \r\n mesh = fenics.UnitIntervalMesh(initial_uniform_cell_count)\r\n\r\n w, mesh = phaseflow.run(\r\n output_dir = 'output/convergence_stefan_problem_solidify/dt'+str(dt)+\r\n '/dx'+str(1./float(initial_uniform_cell_count))+'/',\r\n Pr = 1.,\r\n Ste = Ste,\r\n g = [0.],\r\n mesh = mesh,\r\n initial_values_expression = (\r\n \"0.\",\r\n \"0.\",\r\n \"(\"+str(theta_c)+\" - \"+str(theta_h)+\")*near(x[0], 0.) + \"+str(theta_h)),\r\n boundary_conditions = [\r\n {'subspace': 0, 'value_expression': [0.], 'degree': 3, 'location_expression': \"near(x[0], 0.) | near(x[0], 1.)\", 'method': \"topological\"},\r\n {'subspace': 2, 'value_expression': theta_c, 'degree': 2, 'location_expression': \"near(x[0], 0.)\", 'method': \"topological\"},\r\n {'subspace': 2, 'value_expression': theta_h, 'degree': 2, 'location_expression': \"near(x[0], 1.)\", 'method': \"topological\"}],\r\n regularization = {'T_f': theta_f, 'r': r},\r\n nlp_absolute_tolerance = nlp_absolute_tolerance,\r\n end_time = end_time,\r\n time_step_bounds = dt,\r\n output_times = ('end',),\r\n automatic_jacobian = automatic_jacobian)\r\n \r\n return w\r\n \r\n\r\ndef convergence_stefan_problem_1d():\r\n\r\n phaseflow.helpers.mkdir_p('output/convergence_stefan_problem_solidify/')\r\n \r\n with open('output/convergence_stefan_problem_solidify/convergence.txt',\r\n 'a+') as file:\r\n \r\n file.write(\"dt,dx,pci_pos\\n\")\r\n \r\n nx = 800\r\n \r\n for nt in [25, 50, 100, 200]:\r\n \r\n dt = 1./float(nt)\r\n \r\n dx = 1./float(nx)\r\n \r\n w = stefan_problem_solidify(dt = 1./float(nt), initial_uniform_cell_count = nx, )\r\n \r\n pci_pos = extract_pci_position(w)\r\n \r\n file.write(str(dt)+\",\"+str(dx)+\",\"+str(pci_pos)+\"\\n\")\r\n \r\n nt = 200\r\n\r\n for nx in [100, 200, 400]:\r\n \r\n dt = 1./float(nt)\r\n \r\n dx = 1./float(nx)\r\n \r\n w = stefan_problem_solidify(dt = 1./float(nt), initial_uniform_cell_count = nx, )\r\n \r\n pci_pos = extract_pci_position(w)\r\n \r\n file.write(str(dt)+\",\"+str(dx)+\",\"+str(pci_pos)+\"\\n\")\r\n\r\n \r\nif __name__=='__main__':\r\n \r\n convergence_stefan_problem_1d()\r\n","sub_path":"examples/convergence_stefan_problem_1d.py","file_name":"convergence_stefan_problem_1d.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"4995765","text":"from pypost.data import DataManager\nimport pypost.common.tfutils as tfutils\nimport tensorflow as tf\nimport numpy as np\nfrom pypost.mappings.Gaussian import LinearDiagonalGaussian\n\n\nnum_cpu = 1\ntf_config = tf.ConfigProto(inter_op_parallelism_threads=num_cpu, intra_op_parallelism_threads=num_cpu)\nsession = tf.Session(config=tf_config)\nsession.__enter__()\n\ndataManager = DataManager('data')\ndataManager.addDataEntry('states', 10)\ndataManager.addDataEntry('actions', 5)\n\ngaussian = LinearDiagonalGaussian(dataManager, ['states'], ['actions'])\n\ndata = dataManager.createDataObject([10])\ndata[...].states = np.random.normal(0, 1, data[...].states.shape)\n\n# Sample from Gaussian and write back in data\ndata[...] >> gaussian >= data\n\n# compute logLikelihood\ndata[...] >= gaussian.logLike\n\n# Compute log likelihood\ngaussianOther = LinearDiagonalGaussian(dataManager, ['states'], ['actions'])\n\n# the param_* properties are created automatically by parsing the mean and logStd tensors. They can be set and read as normal numpy arrays\ngaussianOther.param_final_b = np.random.normal(0, 1, (5,))\ngaussianOther.param_logstd = np.random.normal(0, 1, (5,))\n\n# the concatenated parameter vector can be accessed by\ngaussianOther.params\n\n# we can also access the tensor variables with\ngaussianOther.tv_final_b\n# All tensor variables of one object can be found by\ngaussianOther.tv_variables_list\n\ntf_klDiv = gaussian.klDivergence(gaussianOther)\n\nklDiff_tf = data[...] >= tf_klDiv\n\n# We can directly view the output of different tensors, i.e., the mean\ndata[...] >= gaussian.mean\n\n# Or the different layers\n\n#data[...] >= gaussian.layers[0]\n\n\n# We can also generate TFMappings such as Gaussians wich get tensors as input:\n\nstateTensor = dataManager.createTensorForEntry('states')\nstateTensor = stateTensor * 2\n\ngaussianTensor = gaussianOther.clone('clonedGaussian', stateTensor)\ndata[...] >= gaussianTensor.mean\n\n\n\n","sub_path":"src/pypost/tutorials/mappings/tf_gaussian.py","file_name":"tf_gaussian.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"326998618","text":"env = Environment()\r\n\r\nenv.Append(CPPFLAGS = ['-ggdb3', '-fPIC', '-Wall', '-g'])\r\nenv.Append(CPPPATH = ['../include', '/usr/include', '.'])\r\nenv.Append(LIBS = ['rt', 'pthread', 'log4cxx'])\r\nenv.Append(LIBPATH = ['/usr/lib64', '/usr/local/lib', '/usr/lib/nptl'])\r\n\r\nenv.SharedLibrary('nframe', [\r\n\t'NFTimer.cpp', \r\n\t'NFThread.cpp', \r\n\t'NFGlobal.cpp', \r\n\t'NFMain.cpp', \r\n 'Log.cpp',\r\n])","sub_path":"src/SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"243649396","text":"ST = {'R0':0,\r\n 'R1':1,\r\n 'R2':2,\r\n 'R3':3,\r\n 'R4':4,\r\n 'R5':5,\r\n 'R6':6,\r\n 'R7':7,\r\n 'R8':8,\r\n 'R9':9,\r\n 'R10':10,\r\n 'R11':11,\r\n 'R12':12,\r\n 'R13':13,\r\n 'R14':14,\r\n 'R15':15,\r\n 'SCREEN':16384,\r\n 'KBD':24576,\r\n 'SP':0,\r\n 'LCL':1,\r\n 'ARG':2,\r\n 'THIS':3,\r\n 'THAT':4}\r\n\r\n\r\n\r\ndef addEntry(Symbol, Address):\r\n if getAddress(Symbol) == None:\r\n ST[Symbol] = Address\r\n\r\ndef contains(Symbol):\r\n return Symbol in ST\r\n\r\ndef getAddress(Symbol):\r\n try:\r\n return ST[Symbol]\r\n except:\r\n return None\r\n","sub_path":"projects/06/SymbolTable.py","file_name":"SymbolTable.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"134232652","text":"# -*- coding: utf-8 -*-\n\nimport sys\nfrom unittest import TestCase\n\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtTest import QTest\n\nfrom ....Classes.LamHole import LamHole\nfrom ....Classes.HoleM51 import HoleM51\nfrom ....GUI.Dialog.DMachineSetup.SMHoleMag.PHoleM51.PHoleM51 import PHoleM51\nfrom ....Classes.Material import Material\n\n\nclass test_PHoleM51(TestCase):\n \"\"\"Test that the widget PHoleM51 behave like it should\"\"\"\n\n def setUp(self):\n \"\"\"Run at the begining of every test to setup the gui\"\"\"\n self.test_obj = LamHole(Rint=0.1, Rext=0.2)\n self.test_obj.hole = list()\n self.test_obj.hole.append(\n HoleM51(\n H0=0.10,\n H1=0.11,\n H2=0.12,\n W0=0.13,\n W1=0.14,\n W2=0.15,\n W3=0.16,\n W4=0.17,\n W5=0.18,\n W6=0.19,\n W7=0.2,\n )\n )\n self.test_obj.hole[0].magnet_0.mat_type.name = \"Magnet3\"\n self.test_obj.hole[0].magnet_1.mat_type.name = \"Magnet2\"\n self.test_obj.hole[0].magnet_2.mat_type.name = \"Magnet1\"\n\n self.matlib = list()\n self.matlib.append(Material(name=\"Magnet1\"))\n self.matlib.append(Material(name=\"Magnet2\"))\n self.matlib.append(Material(name=\"Magnet3\"))\n self.widget = PHoleM51(self.test_obj.hole[0], self.matlib)\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Start the app for the test\"\"\"\n print(\"\\nStart Test PHoleM51\")\n cls.app = QtWidgets.QApplication(sys.argv)\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"Exit the app after the test\"\"\"\n cls.app.quit()\n\n def test_init(self):\n \"\"\"Check that the Widget spinbox initialise to the lamination value\"\"\"\n\n self.assertEqual(self.widget.lf_H0.value(), 0.10)\n self.assertEqual(self.widget.lf_H1.value(), 0.11)\n self.assertEqual(self.widget.lf_H2.value(), 0.12)\n self.assertEqual(self.widget.lf_W0.value(), 0.13)\n self.assertEqual(self.widget.lf_W1.value(), 0.14)\n self.assertEqual(self.widget.lf_W2.value(), 0.15)\n self.assertEqual(self.widget.lf_W3.value(), 0.16)\n self.assertEqual(self.widget.lf_W4.value(), 0.17)\n self.assertEqual(self.widget.lf_W5.value(), 0.18)\n self.assertEqual(self.widget.lf_W6.value(), 0.19)\n self.assertEqual(self.widget.lf_W7.value(), 0.2)\n # Check material\n self.assertFalse(self.widget.w_mat_0.isHidden())\n self.assertEqual(self.widget.w_mat_0.c_mat_type.currentText(), \"Magnet3\")\n self.assertEqual(self.widget.w_mat_0.c_mat_type.currentIndex(), 2)\n self.assertFalse(self.widget.w_mat_1.isHidden())\n self.assertEqual(self.widget.w_mat_1.c_mat_type.currentText(), \"Magnet2\")\n self.assertEqual(self.widget.w_mat_1.c_mat_type.currentIndex(), 1)\n self.assertFalse(self.widget.w_mat_2.isHidden())\n self.assertEqual(self.widget.w_mat_2.c_mat_type.currentText(), \"Magnet1\")\n self.assertEqual(self.widget.w_mat_2.c_mat_type.currentIndex(), 0)\n\n def test_set_W0(self):\n \"\"\"Check that the Widget allow to update W0\"\"\"\n # Clear the field before writing the new value\n self.widget.lf_W0.clear()\n QTest.keyClicks(self.widget.lf_W0, \"0.31\")\n self.widget.lf_W0.editingFinished.emit() # To trigger the slot\n\n self.assertEqual(self.widget.hole.W0, 0.31)\n self.assertEqual(self.test_obj.hole[0].W0, 0.31)\n\n def test_set_W1(self):\n \"\"\"Check that the Widget allow to update W1\"\"\"\n # Clear the field before writing the new value\n self.widget.lf_W1.clear()\n QTest.keyClicks(self.widget.lf_W1, \"0.32\")\n self.widget.lf_W1.editingFinished.emit() # To trigger the slot\n\n self.assertEqual(self.widget.hole.W1, 0.32)\n self.assertEqual(self.test_obj.hole[0].W1, 0.32)\n\n def test_set_W3(self):\n \"\"\"Check that the Widget allow to update W3\"\"\"\n self.widget.lf_W3.clear()\n QTest.keyClicks(self.widget.lf_W3, \"0.33\")\n self.widget.lf_W3.editingFinished.emit() # To trigger the slot\n\n self.assertEqual(self.widget.hole.W3, 0.33)\n self.assertEqual(self.test_obj.hole[0].W3, 0.33)\n\n def test_set_W4(self):\n \"\"\"Check that the Widget allow to update W4\"\"\"\n # Clear the field before writing the new value\n self.widget.lf_W4.clear()\n QTest.keyClicks(self.widget.lf_W4, \"0.34\")\n self.widget.lf_W4.editingFinished.emit() # To trigger the slot\n\n self.assertEqual(self.widget.hole.W4, 0.34)\n self.assertEqual(self.test_obj.hole[0].W4, 0.34)\n\n def test_set_W5(self):\n \"\"\"Check that the Widget allow to update W5\"\"\"\n # Clear the field before writing the new value\n self.widget.lf_W5.clear()\n QTest.keyClicks(self.widget.lf_W5, \"0.35\")\n self.widget.lf_W5.editingFinished.emit() # To trigger the slot\n\n self.assertEqual(self.widget.hole.W5, 0.35)\n self.assertEqual(self.test_obj.hole[0].W5, 0.35)\n\n def test_set_W6(self):\n \"\"\"Check that the Widget allow to update W6\"\"\"\n # Clear the field before writing the new value\n self.widget.lf_W6.clear()\n QTest.keyClicks(self.widget.lf_W6, \"0.36\")\n self.widget.lf_W6.editingFinished.emit() # To trigger the slot\n\n self.assertEqual(self.widget.hole.W6, 0.36)\n self.assertEqual(self.test_obj.hole[0].W6, 0.36)\n\n def test_set_W7(self):\n \"\"\"Check that the Widget allow to update W7\"\"\"\n # Clear the field before writing the new value\n self.widget.lf_W7.clear()\n QTest.keyClicks(self.widget.lf_W7, \"0.37\")\n self.widget.lf_W7.editingFinished.emit() # To trigger the slot\n\n self.assertEqual(self.widget.hole.W7, 0.37)\n self.assertEqual(self.test_obj.hole[0].W7, 0.37)\n\n def test_set_H0(self):\n \"\"\"Check that the Widget allow to update H0\"\"\"\n self.widget.lf_H0.clear()\n QTest.keyClicks(self.widget.lf_H0, \"0.38\")\n self.widget.lf_H0.editingFinished.emit() # To trigger the slot\n\n self.assertEqual(self.widget.hole.H0, 0.38)\n self.assertEqual(self.test_obj.hole[0].H0, 0.38)\n\n def test_set_H1(self):\n \"\"\"Check that the Widget allow to update H1\"\"\"\n self.widget.lf_H1.clear()\n QTest.keyClicks(self.widget.lf_H1, \"0.39\")\n self.widget.lf_H1.editingFinished.emit() # To trigger the slot\n\n self.assertEqual(self.widget.hole.H1, 0.39)\n self.assertEqual(self.test_obj.hole[0].H1, 0.39)\n\n def test_set_H2(self):\n \"\"\"Check that the Widget allow to update H2\"\"\"\n self.widget.lf_H2.clear()\n QTest.keyClicks(self.widget.lf_H2, \"0.40\")\n self.widget.lf_H2.editingFinished.emit() # To trigger the slot\n\n self.assertEqual(self.widget.hole.H2, 0.40)\n self.assertEqual(self.test_obj.hole[0].H2, 0.40)\n\n def test_set_material_0(self):\n \"\"\"Check that you can change the material of magnet_0\"\"\"\n self.widget.w_mat_0.c_mat_type.setCurrentIndex(0)\n\n self.assertEqual(self.widget.w_mat_0.c_mat_type.currentText(), \"Magnet1\")\n self.assertEqual(self.test_obj.hole[0].magnet_0.mat_type.name, \"Magnet1\")\n\n def test_set_material_1(self):\n \"\"\"Check that you can change the material of magnet_1\"\"\"\n self.widget.w_mat_1.c_mat_type.setCurrentIndex(0)\n\n self.assertEqual(self.widget.w_mat_1.c_mat_type.currentText(), \"Magnet1\")\n self.assertEqual(self.test_obj.hole[0].magnet_1.mat_type.name, \"Magnet1\")\n\n def test_set_material_2(self):\n \"\"\"Check that you can change the material of magnet_2\"\"\"\n self.widget.w_mat_2.c_mat_type.setCurrentIndex(2)\n\n self.assertEqual(self.widget.w_mat_2.c_mat_type.currentText(), \"Magnet3\")\n self.assertEqual(self.test_obj.hole[0].magnet_2.mat_type.name, \"Magnet3\")\n","sub_path":"Tests/GUI/DMachineSetup/test_PHoleM51.py","file_name":"test_PHoleM51.py","file_ext":"py","file_size_in_byte":7899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"344691560","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport tensorflow as tf\nprint(tf.__version__)\n# 2.0.0-alpha0\n\nfrom tensorflow.keras.models import load_model\n\n# 导入模型\nmodel = load_model('./cats_and_dogs_small_2.h5')\n# 作为提醒\nmodel.summary() \n#Model: \"sequential_3\"\n#_________________________________________________________________\n#Layer (type) Output Shape Param # \n#=================================================================\n#conv2d_11 (Conv2D) (None, 148, 148, 32) 896 \n#_________________________________________________________________\n#max_pooling2d_10 (MaxPooling (None, 74, 74, 32) 0 \n#_________________________________________________________________\n#conv2d_12 (Conv2D) (None, 72, 72, 64) 18496 \n#_________________________________________________________________\n#max_pooling2d_11 (MaxPooling (None, 36, 36, 64) 0 \n#_________________________________________________________________\n#conv2d_13 (Conv2D) (None, 34, 34, 128) 73856 \n#_________________________________________________________________\n#max_pooling2d_12 (MaxPooling (None, 17, 17, 128) 0 \n#_________________________________________________________________\n#conv2d_14 (Conv2D) (None, 15, 15, 128) 147584 \n#_________________________________________________________________\n#max_pooling2d_13 (MaxPooling (None, 7, 7, 128) 0 \n#_________________________________________________________________\n#flatten_3 (Flatten) (None, 6272) 0 \n#_________________________________________________________________\n#dropout (Dropout) (None, 6272) 0 \n#_________________________________________________________________\n#dense_6 (Dense) (None, 512) 3211776 \n#_________________________________________________________________\n#dense_7 (Dense) (None, 1) 513 \n#=================================================================\n#Total params: 3,453,121\n#Trainable params: 3,453,121\n#Non-trainable params: 0\n#_________________________________________________________________\n\n\nimg_path = 'F:\\zkl_repository\\pic\\\\all_pic\\cat.1700.jpg'\n\n# 将图像预处理为一个4D张量\nfrom tensorflow.keras.preprocessing import image\nimport numpy as np\n\nimg = image.load_img(img_path, target_size=(150, 150))\nimg_tensor = image.img_to_array(img)\n# 处理成一个4D张量\nimg_tensor = np.expand_dims(img_tensor, axis=0)\n# 请记住,训练模型的输入数据都用这种方法预处理\nimg_tensor /= 255.\n\n# 其形状为(1,150,150,3)\nprint(img_tensor.shape)\n\n\nimport matplotlib.pyplot as plt\n\nplt.imshow(img_tensor[0])\nplt.show()\n\n\nfrom tensorflow.keras import models\n\n# 提取前8层的输出\nlayer_outputs = [layer.output for layer in model.layers[:8]]\n# 创建一个模型,给定模型的输入,可以返回这些输出\nactivation_model = models.Model(inputs=model.input, outputs=layer_outputs)\n# 返回8个numpy数组组成的列表,每个层激活对应一个numpy数组\nactivations = activation_model.predict(img_tensor)\nfirst_layer_activation = activations[0]\nprint(first_layer_activation.shape)\n# (1, 148, 148, 32)\n\n\n# 将第3个通道可视化\nimport matplotlib.pyplot as plt\nplt.matshow(first_layer_activation[0, :, :, 3], cmap='viridis')\nplt.show()\n# 将第30个通道可视化\nplt.matshow(first_layer_activation[0, :, :, 30], cmap='viridis')\nplt.show()\n\nfrom tensorflow import keras\n\n# 层的名称,这样可以将这些名称画到图中\nlayer_names = []\nfor layer in model.layers[:8]:\n layer_names.append(layer.name)\n\nimages_per_row = 16\n\n# 显示特征图\nfor layer_name, layer_activation in zip(layer_names, activations):\n # 特征图中特征的个数\n n_features = layer_activation.shape[-1]\n\n # 特征图的形状为(1,size,seize,n_features)\n size = layer_activation.shape[1]\n\n # 在这个矩阵中将激活通道平铺\n n_cols = n_features // images_per_row\n display_grid = np.zeros((size * n_cols, images_per_row * size))\n\n # 将每个过滤器平铺到一个大的水平网格中\n for col in range(n_cols):\n for row in range(images_per_row):\n channel_image = layer_activation[0,\n :, :,\n col * images_per_row + row]\n # 对特征进行后处理,使其看起来更美观\n channel_image -= channel_image.mean()\n channel_image /= channel_image.std()\n channel_image *= 64\n channel_image += 128\n channel_image = np.clip(channel_image, 0, 255).astype('uint8')\n display_grid[col * size : (col + 1) * size,\n row * size : (row + 1) * size] = channel_image\n\n # 显示网格\n scale = 1. / size\n plt.figure(figsize=(scale * display_grid.shape[1],\n scale * display_grid.shape[0]))\n plt.title(layer_name)\n plt.grid(False)\n plt.imshow(display_grid, aspect='auto', cmap='viridis')\n \nplt.show()\n\n\nfrom tensorflow.keras.applications import VGG16\nfrom tensorflow.keras import backend as K\nimport tensorflow as tf\n\nmodel = VGG16(weights='imagenet',\n include_top=False)\n\nlayer_name = 'block3_conv1'\nfilter_index = 0\n\nlayer_output = model.get_layer(layer_name).output\nloss = K.mean(layer_output[:, :, :, filter_index])\n\n# 这个函数不知道应该改成什么,现在报错\n#with tf.GradientTape() as g:\n# g.watch(model.input)\n# loss = K.mean(layer_output[:, :, :, filter_index])\ngrads = K.gradients(loss, model.input)\n#\n## We add 1e-5 before dividing so as to avoid accidentally dividing by 0.\n#grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)\n#\n#\niterate = K.function([model.input], [loss, grads])\n\n# Let's test it:\nimport numpy as np\nloss_value, grads_value = iterate([np.zeros((1, 150, 150, 3))])\n\n# 从一张带有噪声的灰度图像开始\ninput_img_data = np.random.random((1, 150, 150, 3)) * 20 + 128.\n\n# 运行40次,梯度上升\nstep = 1. \nfor i in range(40):\n #计算损失值和梯度值\n loss_value, grads_value = iterate([input_img_data])\n # 沿着让损失最大化的方向调节输入图像\n input_img_data += grads_value * step\n\ndef deprocess_image(x):\n # 对张量做标准化,使其均值为0,标准差为0.1\n x -= x.mean()\n x /= (x.std() + 1e-5)\n x *= 0.1\n\n # 将x裁剪到[0,1]之间\n x += 0.5\n x = np.clip(x, 0, 1)\n\n # 转换成RGB数组\n x *= 255\n x = np.clip(x, 0, 255).astype('uint8')\n return x\n\n\ndef generate_pattern(layer_name, filter_index, size=150):\n# 构建一个损失函数,将该层的第n个过滤器的激活最大化\n layer_output = model.get_layer(layer_name).output\n loss = K.mean(layer_output[:, :, :, filter_index])\n\n # 计算这个损失相对于输入图像的梯度\n grads = K.gradients(loss, model.input)[0]\n\n # 标准化技巧:将梯度标准化\n grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)\n\n # This function returns the loss and grads given the input picture\n iterate = K.function([model.input], [loss, grads])\n \n # We start from a gray image with some noise\n input_img_data = np.random.random((1, size, size, 3)) * 20 + 128.\n\n # Run gradient ascent for 40 steps\n step = 1.\n for i in range(40):\n loss_value, grads_value = iterate([input_img_data])\n input_img_data += grads_value * step\n \n img = input_img_data[0]\n return deprocess_image(img)\n\n\n\nplt.imshow(generate_pattern('block3_conv1', 0))\nplt.show()\n\n\nfor layer_name in ['block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1']:\n size = 64\n margin = 5\n\n # This a empty (black) image where we will store our results.\n results = np.zeros((8 * size + 7 * margin, 8 * size + 7 * margin, 3))\n\n for i in range(8): # iterate over the rows of our results grid\n for j in range(8): # iterate over the columns of our results grid\n # Generate the pattern for filter `i + (j * 8)` in `layer_name`\n filter_img = generate_pattern(layer_name, i + (j * 8), size=size)\n\n # Put the result in the square `(i, j)` of the results grid\n horizontal_start = i * size + i * margin\n horizontal_end = horizontal_start + size\n vertical_start = j * size + j * margin\n vertical_end = vertical_start + size\n results[horizontal_start: horizontal_end, vertical_start: vertical_end, :] = filter_img\n\n # Display the results grid\n plt.figure(figsize=(20, 20))\n plt.imshow(results)\n plt.show()\n\n\nfrom tensorflow.keras.applications.vgg16 import VGG16\n\nK.clear_session()\n\n# 注意,网络中包括了密集连接诶分类器,在前面的例子中,我们都舍弃了这个分类器\nmodel = VGG16(weights='imagenet')\n\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions\nimport numpy as np\n\n# 目标图片路径\nimg_path = 'F:\\zkl_repository\\deep-learning-with-python-notebooks\\creative_commons_elephant.jpg'\n\n# 大小为2248224的python图像库图像\nimg = image.load_img(img_path, target_size=(224, 224))\n\n# 形状为224*224*3的float32格式的numpy数组\nx = image.img_to_array(img)\n\n#添加一个维度,将数组转换成(1,224,224,3)\nx = np.expand_dims(x, axis=0)\n\n#对批量进行预处理(按通道进行颜色标准化)\nx = preprocess_input(x)\n\npreds = model.predict(x)\nprint('Predicted:', decode_predictions(preds, top=3)[0])\n\n\nnp.argmax(preds[0])\n\n# 预测向量中非洲象的元素\nafrican_elephant_output = model.output[:, 386]\n\n# block5_conv3层的输出特征图,它是VGG16的最后一个卷积层\nlast_conv_layer = model.get_layer('block5_conv3')\n\n# 非洲象类别相对于block5_conv3的输出特征图的梯度\ngrads = K.gradients(african_elephant_output, last_conv_layer.output)[0]\n\n#形状为(512,)的向量,每个元素是特定特征图通道的梯度平均大小\npooled_grads = K.mean(grads, axis=(0, 1, 2))\n\n# This function allows us to access the values of the quantities we just defined:\n# `pooled_grads` and the output feature map of `block5_conv3`,\n# given a sample image\niterate = K.function([model.input], [pooled_grads, last_conv_layer.output[0]])\n\n# These are the values of these two quantities, as Numpy arrays,\n# given our sample image of two elephants\npooled_grads_value, conv_layer_output_value = iterate([x])\n\n# We multiply each channel in the feature map array\n# by \"how important this channel is\" with regard to the elephant class\nfor i in range(512):\n conv_layer_output_value[:, :, i] *= pooled_grads_value[i]\n\n# The channel-wise mean of the resulting feature map\n# is our heatmap of class activation\nheatmap = np.mean(conv_layer_output_value, axis=-1)\n\n\nheatmap = np.maximum(heatmap, 0)\nheatmap /= np.max(heatmap)\nplt.matshow(heatmap)\nplt.show()\n\n\nimport cv2\n\n# We use cv2 to load the original image\nimg = cv2.imread(img_path)\n\n# We resize the heatmap to have the same size as the original image\nheatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))\n\n# We convert the heatmap to RGB\nheatmap = np.uint8(255 * heatmap)\n\n# We apply the heatmap to the original image\nheatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\n\n# 0.4 here is a heatmap intensity factor\nsuperimposed_img = heatmap * 0.4 + img\n\n# Save the image to disk\ncv2.imwrite('/Users/fchollet/Downloads/elephant_cam.jpg', superimposed_img)\n","sub_path":"tensorflow_version/5.4-visualizing-what-convnets-learn.py","file_name":"5.4-visualizing-what-convnets-learn.py","file_ext":"py","file_size_in_byte":11552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"296694647","text":"# -*- coding: utf-8 -*-\n# © 2017 Pharmadus I.T.\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\nfrom openerp import models, fields, api, _\nfrom openerp.exceptions import Warning\nimport openerp.addons.decimal_precision as dp\nimport datetime\nimport operator\nfrom openerp.osv.expression import get_unaccent_wrapper\n\n\nclass ProductProductAnalysisMethod(models.Model):\n _name = 'product.product.analysis.method'\n\n product_id = fields.Many2one(comodel_name='product.product')\n procedure_id = fields.Many2one(comodel_name='mrp.procedure',\n domain=\"[('type_id.code', '=', 'quality_control_analysis_methods')]\",\n string='Analysis method')\n attachment = fields.Binary(\n related='procedure_id.attachment', readonly=True)\n filename = fields.Char(\n related='procedure_id.attachment_filename', readonly=True)\n\n\nclass ProductProductGenericSpecification(models.Model):\n _name = 'product.product.generic.specification'\n\n product_id = fields.Many2one(comodel_name='product.product')\n procedure_id = fields.Many2one(comodel_name='mrp.procedure',\n domain=\"[('type_id.code', 'ilike', 'specifications%')]\",\n string='Generic specification')\n attachment = fields.Binary(\n related='procedure_id.attachment', readonly=True)\n filename = fields.Char(\n related='procedure_id.attachment_filename', readonly=True)\n\n\nclass ProductProduct(models.Model):\n _inherit = 'product.product'\n\n product_commercial_name = fields.Char()\n is_in_current_pricelist = fields.Boolean(\n compute='_compute_is_in_current_pricelist',\n search='_search_is_in_current_pricelist')\n year_appearance = fields.Integer('Year of appearance',\n default=datetime.datetime.now().year)\n manufacturing_procedure_id = fields.Many2one(comodel_name='mrp.procedure',\n domain=\"[('type_id.code', '=', 'product_manufacturing')]\",\n string='Manufacturing procedure')\n manufacturing_procedure_attachment = fields.Binary(\n related='manufacturing_procedure_id.attachment', readonly=True)\n manufacturing_procedure_filename = fields.Char(\n related='manufacturing_procedure_id.attachment_filename', readonly=True)\n packaging_procedure_id = fields.Many2one(comodel_name='mrp.procedure',\n domain=\"[('type_id.code', '=', 'product_packaging')]\",\n string='Packaging procedure')\n packaging_procedure_attachment = fields.Binary(\n related='packaging_procedure_id.attachment', readonly=True)\n packaging_procedure_filename = fields.Char(\n related='packaging_procedure_id.attachment_filename', readonly=True)\n generic_specification_ids = fields.One2many(\n comodel_name='product.product.generic.specification',\n inverse_name='product_id', string='Specification')\n model_specification_id = fields.Many2one(comodel_name='mrp.procedure',\n domain=\"[('type_id.code', 'ilike', 'specifications%')]\",\n string='Model specification')\n model_specification_attachment = fields.Binary(\n related='model_specification_id.attachment', readonly=True)\n model_specification_filename = fields.Char(\n related='model_specification_id.attachment_filename', readonly=True)\n analysis_method_ids = fields.One2many(\n comodel_name='product.product.analysis.method',\n inverse_name='product_id', string=\"Analysis method\")\n analysis_plan_id = fields.Many2one(comodel_name='mrp.procedure',\n domain=\"[('type_id.code', '=', 'quality_control_analysis_plans')]\",\n string='Analysis plan')\n analysis_plan_attachment = fields.Binary(\n related='analysis_plan_id.attachment', readonly=True)\n analysis_plan_filename = fields.Char(\n related='analysis_plan_id.attachment_filename', readonly=True)\n notes = fields.Text()\n reception_warehouse_warning = fields.Text()\n width = fields.Float()\n height = fields.Float()\n depth = fields.Float()\n earliest_picking = fields.Date(compute='_earliest_picking',\n search='_search_earliest_picking')\n obsolete = fields.Boolean(default=False)\n minimum_sale_price = fields.Float(default=0)\n\n @api.one\n @api.constrains('year_appearance')\n def _check_year_of_appearance(self):\n current_year = datetime.datetime.now().year\n if 1956 >= self.year_appearance <= current_year:\n raise Warning(_('Year must be between 1956 and %s.') %\n (current_year,))\n\n @api.one\n @api.constrains('active')\n def _update_template_active(self):\n if self.product_tmpl_id.active != self.active:\n self.product_tmpl_id.write({'active': self.active})\n\n @api.multi\n def name_get(self): # Hide default_code by default\n if self.env.context.get('show_expedition_name', False):\n res = []\n for rec in self:\n res.append((rec.id, rec.expeditions_name))\n return res\n else:\n return super(ProductProduct,\n self.with_context(display_default_code=False)).\\\n name_get()\n\n @api.model\n def name_search(self, name, args=None, operator='ilike', limit=100):\n if not args:\n args = []\n if self.env.context.get('show_expedition_name', False) and name and \\\n operator in ('=', 'ilike', '=ilike', 'like', '=like'):\n self.check_access_rights('read')\n\n search_name = name\n if operator in ('ilike', 'like'):\n search_name = '%%%s%%' % name\n if operator in ('=ilike', '=like'):\n operator = operator[1:]\n\n unaccent = get_unaccent_wrapper(self.env.cr)\n query = \"\"\"SELECT pp.id\n FROM product_product pp\n JOIN product_template pt on pt.id = pp.product_tmpl_id\n WHERE pt.expeditions_name {operator} {percent}\n ORDER BY pt.expeditions_name\n \"\"\".format(operator=operator,\n percent=unaccent('%s'))\n where_clause_params = [search_name]\n\n if limit:\n query += ' LIMIT %s'\n where_clause_params += [limit]\n self.env.cr.execute(query, where_clause_params)\n ids = map(lambda x: x[0], self.env.cr.fetchall())\n\n if ids:\n return self.browse(ids).name_get()\n else:\n return []\n else:\n return super(ProductProduct, self).\\\n name_search(name, args, operator=operator, limit=limit)\n\n @api.depends('pricelist_id')\n def _compute_is_in_current_pricelist(self):\n pricelist = self.pricelist_id.mapped('id')\n return self.env.context.get('pricelist', False) in pricelist\n\n def _search_is_in_current_pricelist(self, operator, value):\n # This domain filter by price list is for sale orders that aren't for\n # sample/advertising purposes\n if self.env.context.get('is_a_sample_order', False):\n return [('active', '=', True)]\n\n current_pricelist_product_list = []\n pricelist = self.env.context.get('pricelist', False)\n if pricelist:\n pricelist_id = self.env['product.pricelist'].browse(pricelist)\n current_pricelist_product_list = \\\n pricelist_id.version_id.items_id.mapped('product_id.id')\n if ((operator == '=') and value) or ((operator == '!=') and not value):\n operator = 'in'\n else:\n operator = 'not in'\n return [('id', operator, current_pricelist_product_list)]\n\n @api.one\n def _earliest_picking(self):\n picking_id = self.env['stock.picking'].search([\n ('move_lines.product_id', '=', self.id),\n ('state', 'not in', ('done', 'cancel')),\n ('picking_type_code', '=', 'outgoing')\n ], limit=1, order='max_date')\n self.earliest_picking = picking_id.max_date if picking_id else False\n\n @api.multi\n def _search_earliest_picking(self, relate, value):\n def get_truth(inp, relate, cut):\n ops = {'>': operator.gt,\n '<': operator.lt,\n '>=': operator.ge,\n '<=': operator.le,\n '=': operator.eq,\n '!=': operator.ne}\n return ops[relate](inp, cut)\n\n product_ids = self.search([]).filtered(\n lambda p: get_truth(p.earliest_picking, relate, value)\n )\n return [('id', 'in', product_ids.ids)]\n\n @api.multi\n def product_price_history_action(self):\n self.ensure_one()\n return self.product_tmpl_id.product_price_history_action()\n\n\nclass ProductTemplate(models.Model):\n _inherit = 'product.template'\n\n suppliers_pricelists = fields.One2many('pricelist.partnerinfo',\n compute='_suppliers_pricelists')\n cost_price_rm = fields.Float('Raw material cost price',\n digits=dp.get_precision('Product Price'))\n cost_price_components = fields.Float('Components cost price',\n digits=dp.get_precision('Product Price'))\n cost_price_dl = fields.Float('Direct labor cost price',\n digits=dp.get_precision('Product Price'))\n cost_eval_price_method = fields.Char('Costing Method',\n compute='_cost_eval_method')\n cost_eval_price = fields.Float('Product cost evaluation',\n digits=dp.get_precision('Product Price'))\n cost_eval_price_rm = fields.Float('Raw material cost evaluation price',\n digits=dp.get_precision('Product Price'))\n cost_eval_price_components = fields.Float('Components cost evaluation price',\n digits=dp.get_precision('Product Price'))\n cost_eval_price_dl = fields.Float('Direct labor cost evaluation price',\n digits=dp.get_precision('Product Price'))\n internal_scrapped_qty = fields.Float('Stock at internal scrap location',\n digits = dp.get_precision('Product Unit of Measure'),\n readonly = True)\n virtual_conservative = fields.Float('Virtual stock conservative',\n digits = dp.get_precision('Product Unit of Measure'),\n help = 'It is the quantity on hand - expected outflows\\n'\n 'This stock is the most catastrophic, since it assumes that '\n 'everything purchased will not arrive, as well as that '\n 'everything in production will not be approved for quality.\\n'\n 'So, for purchasable products, it would be the quantity in stock'\n ' - the already confirmed productions in progress, and for '\n 'saleable ones the quantity on hand - the confirmed sales.\\n'\n 'In no case does it add anything extra to the quantity in stock.',\n readonly=True)\n out_of_existences = fields.Float('Out of existences',\n digits=dp.get_precision('Product Unit of Measure'),\n readonly=True)\n out_of_existences_dismissed = fields.Float('Out of existences dismissed',\n digits=dp.get_precision('Product Unit of Measure'),\n readonly=True)\n real_incoming_qty = fields.Float('Real incoming qty.',\n digits = dp.get_precision('Product Unit of Measure'),\n readonly=True)\n production_planning_qty = fields.Float(\n digits = dp.get_precision('Product Unit of Measure'),\n readonly=True)\n pre_production_qty = fields.Float(\n digits = dp.get_precision('Product Unit of Measure'),\n readonly=True)\n stock_move_ids = fields.One2many(string='Stock movements',\n comodel_name='stock.move',\n inverse_name='product_id')\n weight_net_eco = fields.Float(string='Ecological net weight',\n digits = dp.get_precision('Stock Weight'))\n ecoembes_weight = fields.Float(digits = dp.get_precision('Stock Weight'))\n expeditions_name = fields.Char('Expeditions name')\n expeditions_width = fields.Float('Width (cm)')\n expeditions_height = fields.Float('Height (cm)')\n expeditions_depth = fields.Float('Depth (cm)')\n\n @api.one\n @api.depends('seller_ids')\n def _suppliers_pricelists(self):\n ids = []\n for product in self:\n for seller in product.seller_ids:\n for pricelist in seller.pricelist_ids:\n ids.append(pricelist.id)\n self.suppliers_pricelists = ids\n\n @api.one\n def _cost_eval_method(self):\n self.cost_eval_price_method = _('Cost eval (night calculation)')\n\n @api.multi\n def compute_detailed_stock(self):\n warehouses = self.env['stock.warehouse'].search([])\n stock_ids = [wh.lot_stock_id.id for wh in warehouses]\n\n for product in self:\n if not product.product_variant_ids:\n continue\n\n product_id = product.product_variant_ids[0]\n\n quants = self.env['stock.quant'].search([\n ('product_id', '=', product_id.id),\n ('location_id.usage', '=', 'internal'),\n ('location_id.scrap_location', '=', True)\n ])\n internal_scrapped_qty = sum(quant.qty for quant in quants)\n\n wh = self.env['stock.warehouse'].search(\n [('company_id', '=', self.env.user.company_id.id)])\n input_location_ids = wh.wh_input_stock_loc_id._get_child_locations()\n quants = self.env['stock.quant'].search([\n ('product_id', '=', product_id.id),\n ('location_id.id', 'in', input_location_ids.ids)\n ])\n input_qty = sum(quant.qty for quant in quants)\n virtual_conservative = product.qty_available - input_qty - \\\n product.outgoing_qty - internal_scrapped_qty\n\n production_planning_orders = self.env['production.planning.orders'].\\\n search([('product_id', '=', product_id.id),\n ('compute', '=', True)])\n production_planning_qty = sum(order.product_qty for order in\n production_planning_orders)\n prod_plan_materials = self.env['production.planning.materials'].\\\n search([('product_id', '=', product_id.id)])\n production_planning_qty -= sum(material.qty_required for material in\n prod_plan_materials)\n\n production_orders = self.env['mrp.production'].\\\n search([('product_id', '=', product_id.id),\n ('state', '=', 'draft')])\n pre_production_qty = sum(order.product_qty for order in\n production_orders)\n pre_prod_materials = self.env['stock.move'].\\\n search([('product_id', '=', product_id.id),\n ('raw_material_production_id', '!=', False),\n ('raw_material_production_id.state', 'in',\n ('draft', 'confirmed')),\n ('state', '=', 'waiting')])\n pre_production_qty -= sum(material.product_uom_qty for material in\n pre_prod_materials)\n\n quants = self.env['stock.quant'].search([\n ('product_id', '=', product_id.id),\n ('location_id.usage', '=', 'internal'),\n '!', ('location_id', 'child_of', stock_ids),\n '|',\n ('location_id.scrap_location', '=', False),\n '&',\n ('location_id.scrap_location', '=', True),\n ('location_id.dismissed_location', '=', False),\n ])\n out_of_existences = sum(quant.qty for quant in quants)\n\n quants = self.env['stock.quant'].search([\n ('product_id', '=', product_id.id),\n ('location_id.usage', '=', 'internal'),\n '!', ('location_id', 'child_of', stock_ids),\n ('location_id.scrap_location', '=', True),\n ('location_id.dismissed_location', '=', True),\n ])\n out_of_existences_dismissed = sum(quant.qty for quant in quants)\n\n moves = self.env['stock.move'].search([\n ('product_id', '=', product_id.id),\n ('state', 'in', ('assigned', 'confirmed', 'waiting')),\n ('picking_id.picking_type_id.code', '=', 'incoming'),\n ('location_id.usage', '!=', 'internal'),\n ('location_dest_id.usage', '=', 'internal')\n ])\n real_incoming_qty = sum(move.product_uom_qty for move in moves)\n\n product.with_context(disable_notify_changes = True).write({\n 'internal_scrapped_qty': internal_scrapped_qty,\n 'virtual_conservative': virtual_conservative,\n 'production_planning_qty': production_planning_qty,\n 'pre_production_qty': pre_production_qty,\n 'out_of_existences': out_of_existences,\n 'out_of_existences_dismissed': out_of_existences_dismissed,\n 'real_incoming_qty': real_incoming_qty})\n\n product_id.with_context(disable_notify_changes = True). \\\n update_qty_in_production()\n\n @api.multi\n def product_price_history_action(self):\n view_id = self.env.ref('custom_views.product_price_history_tree')\n return {\n 'name': 'Histórico de precios de coste',\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'tree',\n 'res_model': 'product.price.history',\n 'views': [(view_id.id, 'tree')],\n 'view_id': view_id.id,\n 'target': 'new',\n 'context': self.with_context({\n 'active_id': self.id,\n 'active_model': self._name,\n 'search_default_product_template_id': self.id,\n }).env.context,\n }\n\n\nclass PricelistPartnerinfo(models.Model):\n _inherit = 'pricelist.partnerinfo'\n\n sequence = fields.Integer(related='suppinfo_id.sequence', readonly=True)\n\n\nclass ProductIncoming(models.TransientModel):\n _name = 'product.incoming'\n _inherits = {'product.product': 'product_id'}\n _rec_name = 'product_id'\n\n product_id = fields.Many2one(string='Product',\n comodel_name='product.product', required=True,\n ondelete='cascade', readonly=True)\n data_uid = fields.Many2one(comodel_name='res.users', readonly=True)\n cumulative_incoming_qty = fields.Float(default=0, readonly=True)\n\n @api.model\n def search(self, args, offset=0, limit=None, order=None, count=False):\n self.env.cr.execute('delete from product_incoming where data_uid = {:d}'.\n format(self.env.user.id))\n\n query = \"\"\"\n insert into product_incoming (id, product_id,\n cumulative_incoming_qty, data_uid,\n create_uid, create_date, write_uid, write_date)\n select\n row_number() over (order by pp.name_template) as id,\n pp.id as product_id,\n sum(sm.product_uom_qty),\n {0:d} as data_uid,\n {0:d} as create_uid,\n current_date as create_date,\n {0:d} as write_uid,\n current_date as write_date \n from product_product pp\n join stock_move sm on sm.product_id = pp.id\n and sm.state = 'done'\n and sm.location_id = {1:d} -- Suppliers locations\n and sm.location_dest_id = {2:d} -- Company incoming location \n \"\"\"\n group_by = \"\"\"\n group by 2, 4, 5, 6, 7\n having sum(sm.product_uom_qty) != 0\n \"\"\"\n\n if self.env.context.get('date_start', False):\n query += \"\"\" where sm.date between '{3}' and '{4}'\"\"\" + group_by\n self.env.cr.execute(query.format(\n self.env.user.id,\n self.env.ref('stock.stock_location_suppliers').id,\n self.env.ref('stock.stock_location_company').id,\n self.env.context.get('date_start'),\n self.env.context.get('date_end')\n ))\n else:\n query += group_by\n self.env.cr.execute(query.format(\n self.env.user.id,\n self.env.ref('stock.stock_location_suppliers').id,\n self.env.ref('stock.stock_location_company').id\n ))\n\n return super(ProductIncoming, self).search(args, offset=offset,\n limit=limit, order=order,\n count=count)","sub_path":"project-addons/custom_views/models/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":21207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"354643307","text":"from keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Model\n \ndef prediction(question, model, tokenizer):\n maxlen = 100 # max number of words in a question to use\n print(\"1\",question)\n quest= []\n quest.append(question)\n print(\"2\",quest)\n quest = tokenizer.texts_to_sequences(quest)\n quest = pad_sequences(quest, maxlen=maxlen)\n print(\"3\",quest)\n\n pred_val = model.predict([quest], batch_size=1024, verbose=1)\n print(pred_val)\n pred_val=(pred_val>0.3).astype(int)\n print(pred_val)\n \n if pred_val[0] == 1:\n result = 'Improper Question'\n else:\n result = 'Proper Question'\n return result","sub_path":"model/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"574016545","text":"import re\n\ndef getContent():\n html = '''\n \n 性別:\n 男\n \n '''\n print(html)\n # 正则表达式获取之间内容\n res_tr = r'(.*?)'\n m_tr = re.findall(res_tr, html, re.S | re.M)\n for line in m_tr:\n print(line)\n # 获取表格第一列th 属性\n res_th = r'(.*?)'\n m_th = re.findall(res_th, line, re.S | re.M)\n for mm in m_th:\n # 默认end='\\n'\n print(mm, end='')\n # 获取表格第二列td 属性值\n res_td = r'(.*?)'\n m_td = re.findall(res_td, line, re.S | re.M)\n for nn in m_td:\n print(nn)\n\n\ndef getAttribute():\n html = '''\n \n 浙江省主题介绍 \n 贵州省主题介绍 \n \n '''\n # 获取链接属性方法1\n print(re.findall(r'', html))\n\n # 获取链接属性方法2\n # ?<= 之前 的字符串内容需要匹配表达式才能成功匹配\n # ?= 之后 的字符串内容需要匹配表达式才能成功匹配\n print(re.findall(r'(?<=href=\\\").+?(?=\\\")', html))\n\n\nif __name__ == '__main__':\n getContent()\n getAttribute()","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"358069710","text":"import os\nimport numpy as np\nimport pandas as pd\nimport time\n\n\ndef check_folder_to_create_and_write_partitions(df,periodos, path, folder_to_check):\n ini = time.time()\n if file not in os.listdir(os.path.join(path)):\n os.mkdir(os.path.join(path,folder_to_check))\n print('Se ha creado la carpeta en la ruta: {a}'.format(a=os.path.join(path,folder_to_check)))\n else:\n print('La carpeta ya existìa: {a}'.format(a=os.path.join(path,folder_to_check)))\n\n for fecha in periodos:\n print(\"particionando fecha:{a} \\n en la carpeta: {b}\".format(a=fecha, b=os.path.join(output_path, folder_to_check)))\n data_part = df.loc[df['YearMonth'] == fecha]\n print('Subset date {fec} data has: {a} rows and {b} columns'.format(fec=fecha, a=data_part.shape[0],b=data_part.shape[1]))\n print(\">> writting parquet <<\")\n data_part.to_parquet(fname=os.path.join(output_path, folder_to_check) + '//' + folder_to_check + '_' + str(fecha) + '.snappy.parquet',compression='snappy')\n del (data_part)\n print('DONE \\n writed parquet: {c}. \\n time elapsed: {a} \\n'.format(a=(time.time() - ini) / 60, c=os.path.join(output_path,file) + '//' + file + '_' + str(fecha) + '.snappy.parquet'))\n print('Total time to write partitions: {a} \\n for file {b} \\n'.format(a=((time.time() - ini) / 60), b=file))\n return\n\ninput_path = '/mnt/s3-refined-porvenir/SG8/main//'\ninput_extension = '.txt'\noutput_path = '/mnt/work/datasets/SG8_partitions///'\n\ntxt_names = [x for x in os.listdir(input_path)]\nfile_names = [x[:-4] for x in os.listdir(input_path)]\n\n\nfechas =[201705, 201706, 201707, 201708, 201709, 201710, 201711,\n 201712, 201801, 201802, 201803, 201804, 201805, 201806, 201807,\n 201808, 201809, 201810, 201811, 201812, 201901, 201902, 201903,\n 201904, 201905, 201906]\n\n\ntxt_names\nfile_names\n\n\n\n\n\n\nini = time.time()\n\nfiles_header_dict = {'header':['sbgr8_bon_bono_pensional','sbgr8_bon_caso_x_afiliado','sbgr8_bon_solicitud_obp'],\n 'no header':['None']}\n\n\nfile = 'sbgr8_bon_bono_pensional'\n\n\n\ncesantias_saldo = pd.read_csv(input_path + file + input_extension, sep='\\|\\|', encoding='latin1',engine='python')\nprint('DONE \\n Time elapsed reading file: {a}'.format(a=(time.time() - ini) / 60))\nprint('Loaded {a} rows, and {b} columns'.format(a=cesantias_saldo.shape[0], b=cesantias_saldo.shape[1]))\nprint(cesantias_saldo.dtypes)\n\ncesantias_saldo['FECHA_CREACION'].head(2)\n\n#Casteo de variables:\n\n## Creación de columna de año-mes\nprint(\"Cast data types for {a}\".format(a=file))\ncol_dates = ['FECHA_CREACION','FECHA_CAUSA_REDEN_ANTICIPADA',\n 'FECHA_REDENCION_NORMAL', 'FECHA_CORTE', 'FECHA_EMISION_RECONOCIMIENTO']\nfor col in col_dates:\n cesantias_saldo[col] = pd.to_datetime(cesantias_saldo[col].apply(str), format='%d-%b-%y', errors='coerce')\n\n\ncesantias_saldo['YearMonth'] = 100*cesantias_saldo['FECHA_CREACION'].dt.year +cesantias_saldo['FECHA_CREACION'].dt.month\ndata = cesantias_saldo.drop_duplicates()\n\ndata.shape[0] == cesantias_saldo.shape[0]\ndel(cesantias_saldo)\n\ncol_cats = ['BONO_PENSIONAL_ID', 'CASO_AFILIADO_ID', 'TIPO_BONO',\n 'VERSION_BONO']\ncols_num = ['VALOR_FECHA_EMISION', 'VALOR_FECHA_REDENCION','VALOR_BONO_FECHA_CORTE']\n\n\ndata['YearMonth'] = data['YearMonth'].astype('category')\n\nfor col in col_cats:\n data[col] = data[col].astype('category')\n\n\nfor col in cols_num:\n data[col] = data[col].astype('float')\n\n\ncheck_folder_to_create_and_write_partitions(df=data, periodos=fechas, path=output_path, folder_to_check=file)\n\n## check parquet\n\nos.listdir(output_path+file)\naa = pd.read_parquet(output_path+file)\naa.dtypes\naa.shape == data.loc[data['YearMonth'].isin(fechas)].shape\n#data.shape\n\n\n\n#\n#\n#\n#\n#\n#\n\n\nfile = 'sbgr8_bon_solicitud_obp'\n\ncesantias_saldo = pd.read_csv(input_path + file + input_extension, sep='\\|\\|', encoding='latin1',engine='python')\nprint('DONE \\n Time elapsed reading file: {a}'.format(a=(time.time() - ini) / 60))\nprint('Loaded {a} rows, and {b} columns'.format(a=cesantias_saldo.shape[0], b=cesantias_saldo.shape[1]))\nprint(cesantias_saldo.dtypes)\n\ncesantias_saldo['FECHA_TRASLADO'].head(2)\n\n#Casteo de variables:\n\n## Creación de columna de año-mes\nprint(\"Cast data types for {a}\".format(a=file))\ncol_dates = ['FECHA_TRASLADO','FECHA_MUERTE_INVALIDEZ']\nfor col in col_dates:\n cesantias_saldo[col] = pd.to_datetime(cesantias_saldo[col].apply(str), format='%d-%b-%y', errors='coerce')\n\ncesantias_saldo['YearMonth'] = 100*cesantias_saldo[col_dates[0]].dt.year +cesantias_saldo[col_dates[0]].dt.month\ndata = cesantias_saldo.drop_duplicates()\n\ndata.shape[0] == cesantias_saldo.shape[0]\n\ndel(cesantias_saldo)\n\ncol_cats = ['SOLICITUD_ID', 'CASO_AFILIADO_ID', 'CONSECUTIVO_SOLICITUD_OBP',\n 'CONSECUTIVO_LIQUIDACION','VERSION_BONO','CONSECUTIVO_SOLICITUD_ENTIDAD']\ncols_num = ['VALOR_BONO']\n\ndata['YearMonth'] = data['YearMonth'].astype('category')\n\nfor col in col_cats:\n data[col] = data[col].astype('category')\n\n\nfor col in cols_num:\n data[col] = data[col].replace('\"', np.nan)\n data[col] = data[col].astype('float')\n\ndata[data['VALOR_BONO']==np.nan].shape\n\ndata.dtypes\ncheck_folder_to_create_and_write_partitions(df=data, periodos=fechas, path=output_path, folder_to_check=file)\n\n## check parquet\n\nos.listdir(output_path+file)\naa = pd.read_parquet(output_path+file)\naa.dtypes\naa.shape == data.loc[data['YearMonth'].isin(fechas)].shape\n#data.shape\n\n## check parquet\n\nos.listdir(output_path+file)\naa = pd.read_parquet(output_path+file)\naa.dtypes\naa.shape == data.loc[data['YearMonth'].isin(fechas)].shape\n\n","sub_path":"02-Python-code/Bonos_Review/00_Transform_txt_to_parquet_by_YearMonth.py","file_name":"00_Transform_txt_to_parquet_by_YearMonth.py","file_ext":"py","file_size_in_byte":5557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"165995466","text":"#!/usr/bin/env python\n\n\"\"\"\n@file chatroom.py\n@brief a small chat room server\n@author asuwill.jdp@gmail.com\n@note what can be used as a chat client?\n telnet can be used in this way(both windows and xnix):\n telnet ip port \n in which, ip is the machine's ip where chat server is running\n port is the port server listening\n\"\"\"\n\nimport sys\nfrom twisted.internet.protocol import Factory\nfrom twisted.protocols.basic import LineReceiver\nfrom twisted.internet import reactor\n\nclass ChatRoom(LineReceiver):\n \n def __init__(self,users):\n \"\"\"docstring for __\"\"\"\n self.users = users\n self.name = None\n self.state= \"LOGIN\"\n def connectionMade(self):\n \"\"\"docstring for connectionMade\"\"\"\n self.sendLine(\"Welcome to Chartroom\\nWhat's your name?\")\n \n def connectionLost(self,reason):\n \"\"\"docstring for connectionLost\"\"\"\n if self.users.has_key(self.name):\n del self.users[self.name]\n line = \"%s leaving char room\" % (self.name,)\n for name,user in self.users.iteritems():\n user.sendLine(line)\n\n def lineReceived(self,data):\n \"\"\"docstring for dataReceived\"\"\"\n data = data.strip()\n if self.state == \"LOGIN\":\n self.handle_LOGIN(data)\n else:\n self.handle_CHAT(data)\n def handle_LOGIN(self,data):\n \"\"\"docstring for handle_LOGIN\"\"\"\n if self.users.has_key(data):\n self.sendLine(\"%s has been used. Please choose another name\" % (data,))\n else:\n self.state = \"CHAT\"\n self.name = data\n self.users[data]=self\n self.sendLine(\"Welcome, %s\" % (data,))\n line=\"%s come in\" % (self.name)\n for name,user in self.users.iteritems():\n if user != self:\n user.sendLine(line)\n def handle_CHAT(self,data):\n \"\"\"docstring for handle_CHAT\"\"\"\n line=\"<%s>says:%s\" % (self.name,data)\n for name,user in self.users.iteritems():\n if user != self:\n user.sendLine(line)\nclass ChatFactory(Factory):\n\n def __init__(self):\n \"\"\"docstring for __init__\"\"\"\n self.users={}\n\n def buildProtocol(self,addr):\n \"\"\"docstring for buildProtocol\"\"\"\n return ChatRoom(self.users)\n\ndef usage():\n \"\"\"docstring for usage\"\"\"\n print(\"\"\"usage:python chatroom.py port\"\"\")\n exit()\n\ndef main(argv):\n port=int(argv[0])\n reactor.listenTCP(port,ChatFactory())\n reactor.run()\n\nif __name__=='__main__':\n if len(sys.argv)!=2:\n usage()\n else:\n main(sys.argv[1:])\n","sub_path":"chatroom.py","file_name":"chatroom.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"355224586","text":"\n\"\"\" useful classes for Muzero \"\"\"\nfrom reversi import available_pos, set_position, init_board\nimport config\nimport random\nimport numpy as np\nfrom dataclasses import dataclass\nfrom typing import List\n\n\n@dataclass(order=True, unsafe_hash=True)\nclass Action:\n index: int\n\n def encode(self):\n \"\"\" encode action to network input shape \"\"\" \n board = np.zeros([1,8,8], dtype=np.float32)\n if self.index < 64:\n row, column = self.index // 8, self.index % 8\n board[0][row][column] = 1\n return board\n\nclass Node:\n\n def __init__(self, prior: float, to_play: int):\n self.visit_count = 0\n self.to_play = to_play\n self.prior = prior\n self.value_sum = 0\n self.children = {}\n self.hidden_state = None\n self.reward = 0\n\n def expanded(self) -> bool:\n return len(self.children) > 0\n\n def value(self) -> float:\n if self.visit_count == 0:\n return 0\n return self.value_sum / self.visit_count\n \n def get_to_play(self):\n return self.to_play\n\nclass ActionHistory:\n \"\"\"Simple history container used inside the search.\n\n Only used to keep track of the actions executed.\n \"\"\"\n\n def __init__(self, history: List[Action], action_space_size: int):\n self.history = list(history)\n self.action_space_size = action_space_size\n\n def clone(self):\n return ActionHistory(self.history, self.action_space_size)\n\n def add_action(self, action: Action):\n self.history.append(action)\n\n def last_action(self) -> Action:\n return self.history[-1]\n\n def action_space(self) -> List[Action]:\n return [Action(i) for i in range(self.action_space_size)]\n\n def to_play(self) -> int:\n if len(self.history) % 2 == 0:\n return 1\n else:\n return -1\n\n\nclass Environment:\n # The environment MuZero is interacting with\n def __init__(self):\n self.board = init_board()\n self.turn = 1 # 1 for white and -1 for black\n self.done = False\n self.winner = None # type: Winner\n self.resigned = False\n self.actions = [Action(row*8+column) for row, column in available_pos(self.board, self.turn)]\n def reset(self):\n self.board = init_board()\n self.turn = 1\n self.done = False\n self.winner = None\n self.resigned = False\n self.actions = [Action(row*8+column) for row, column in available_pos(self.board, self.turn)]\n return self\n\n def player_turn(self):\n return self.turn\n\n def step(self, action: Action):\n if action.index != 64:\n row, column = action.index//8, action.index%8\n set_position(self.board, row, column, self.turn)\n\n self.turn = -self.turn\n self.actions = [Action(row*8+column) for row, column in available_pos(self.board, self.turn)]\n \n if not self.actions:\n if available_pos(self.board, -self.turn):\n self.actions = [Action(64)]\n else:\n self.done = True\n\n reward = 0\n if self.done:\n white_score = np.count_nonzero(self.board==1)\n black_score = np.count_nonzero(self.board==-1)\n if white_score > black_score:\n reward = 1 if -self.turn == 1 else -1\n elif white_score < black_score:\n reward = 1 if -self.turn == -1 else -1\n return reward\n\n def legal_actions(self):\n return self.actions.copy()\n\n def get_board(self):\n \n return np.copy(self.board)\n\nclass Game:\n \"\"\"A single episode of interaction with the environment.\"\"\"\n\n def __init__(self):\n self.environment = Environment() # Game specific environment.\n self.history = []\n self.rewards = []\n self.child_visits = []\n self.root_values = []\n self.action_space_size = config.action_space_size\n self.discount = config.discount\n\n def terminal(self) -> bool:\n return self.environment.done\n\n def legal_actions(self) -> List[Action]:\n return self.environment.legal_actions()\n\n def apply(self, action: Action):\n reward = self.environment.step(action)\n # reward = reward if self.environment.turn % 2 != 0 and reward == 1 else -reward\n \n self.rewards.append(reward)\n self.history.append(action)\n\n def store_search_statistics(self, root: Node):\n sum_visits = sum(child.visit_count for child in root.children.values())\n action_space = (Action(index) for index in range(self.action_space_size))\n self.child_visits.append([\n root.children[a].visit_count / sum_visits if a in root.children else 0\n for a in action_space\n ])\n self.root_values.append(root.value())\n\n def make_image(self, state_index: int) -> np.array:\n \"\"\" convert state to Representation network input \"\"\"\n image = np.empty([2,8,8], dtype=np.float32)\n o = Environment().reset()\n\n for current_index in range(0, state_index):\n o.step(self.history[current_index])\n\n board = o.get_board()\n image[0,:,:] = board==self.environment.turn\n image[1,:,:] = board==-self.environment.turn\n\n return image\n\n def make_target(self, state_index: int, num_unroll_steps: int, td_steps: int):\n # The value target is the discounted root value of the search tree N steps\n # into the future, plus the discounted sum of all rewards until then.\n targets = []\n for current_index in range(state_index, state_index + num_unroll_steps + 1):\n bootstrap_index = current_index + td_steps\n if bootstrap_index < len(self.root_values):\n value = self.root_values[bootstrap_index] * self.discount**td_steps\n else:\n value = 0\n\n l = self.rewards[current_index:bootstrap_index]\n for i, reward in enumerate(self.rewards[current_index:bootstrap_index]):\n value += reward * self.discount**i * -1**i\n\n if current_index < len(self.root_values):\n targets.append((value, self.rewards[current_index], self.child_visits[current_index]))\n else:\n # targets.append((0, 0, []))\n raise RuntimeError('out of end of game')\n return targets\n\n def to_play(self) -> int:\n return self.environment.player_turn()\n\n def action_history(self) -> ActionHistory:\n return ActionHistory(self.history, self.action_space_size)\n\nclass ReplayBuffer:\n\n def __init__(self):\n self.window_size = config.window_size\n self.batch_size = config.batch_size\n self.buffer = []\n\n def save_game(self, game):\n if len(self.buffer) > self.window_size:\n del self.buffer[0]\n self.buffer.append(game)\n\n def generate_data(self):\n \"\"\" generate game data for training \"\"\"\n game_pos = [(g, i) for g in self.buffer for i in range(len(g.history)-config.num_unroll_steps)]\n return [(g.make_image(pos), g.history[pos:pos + config.num_unroll_steps],\n g.make_target(pos, config.num_unroll_steps, config.td_steps))\n for (g, pos) in game_pos]\n\n\nMAXIMUM_FLOAT_VALUE = float('inf')\n\nclass MinMaxStats:\n\n \"\"\"A class that holds the min-max values of the tree.\"\"\"\n def __init__(self, known_bounds: tuple):\n self.minimum = known_bounds[0] if known_bounds else MAXIMUM_FLOAT_VALUE\n self.maximum = known_bounds[1] if known_bounds else -MAXIMUM_FLOAT_VALUE\n\n def update(self, value: float):\n self.minimum = min(self.minimum, value)\n self.maximum = max(self.maximum, value)\n\n def normalize(self, value: float) -> float:\n if self.maximum > self.minimum:\n # We normalize only when we have set the maximum and minimum values.\n return (value - self.minimum) / (self.maximum - self.minimum)\n return value","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":7969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"584331068","text":"#!/usr/bin/python\n#\n# PyGreSQL - a Python interface for the PostgreSQL database.\n#\n# This file contains the classic pg module.\n#\n# Copyright (c) 2020 by the PyGreSQL Development Team\n#\n# The notification handler is based on pgnotify which is\n# Copyright (c) 2001 Ng Pheng Siong. All rights reserved.\n#\n# Please see the LICENSE.TXT file for specific restrictions.\n\n\"\"\"PyGreSQL classic interface.\n\nThis pg module implements some basic database management stuff.\nIt includes the _pg module and builds on it, providing the higher\nlevel wrapper class named DB with additional functionality.\nThis is known as the \"classic\" (\"old style\") PyGreSQL interface.\nFor a DB-API 2 compliant interface use the newer pgdb module.\n\"\"\"\n\nfrom __future__ import print_function, division\n\ntry:\n from _pg import *\nexcept ImportError as e:\n import os\n libpq = 'libpq.'\n if os.name == 'nt':\n libpq += 'dll'\n import sys\n paths = [path for path in os.environ[\"PATH\"].split(os.pathsep)\n if os.path.exists(os.path.join(path, libpq))]\n if sys.version_info >= (3, 8):\n # see https://docs.python.org/3/whatsnew/3.8.html#ctypes\n for path in paths:\n with os.add_dll_directory(os.path.abspath(path)):\n try:\n from _pg import *\n except ImportError:\n pass\n else:\n e = None\n break\n if paths:\n libpq = 'compatible ' + libpq\n else:\n libpq += 'so'\n if e:\n # note: we could use \"raise from e\" here in Python 3\n raise ImportError(\n \"Cannot import shared library for PyGreSQL,\\n\"\n \"probably because no %s is installed.\\n%s\" % (libpq, e))\n\n__version__ = version\n\n__all__ = [\n 'DB', 'Adapter',\n 'NotificationHandler', 'Typecasts',\n 'Bytea', 'Hstore', 'Json', 'Literal',\n 'Error', 'Warning',\n 'DataError', 'DatabaseError',\n 'IntegrityError', 'InterfaceError', 'InternalError',\n 'InvalidResultError', 'MultipleResultsError',\n 'NoResultError', 'NotSupportedError',\n 'OperationalError', 'ProgrammingError',\n 'INV_READ', 'INV_WRITE',\n 'POLLING_OK', 'POLLING_FAILED', 'POLLING_READING', 'POLLING_WRITING',\n 'SEEK_CUR', 'SEEK_END', 'SEEK_SET',\n 'TRANS_ACTIVE', 'TRANS_IDLE', 'TRANS_INERROR',\n 'TRANS_INTRANS', 'TRANS_UNKNOWN',\n 'cast_array', 'cast_hstore', 'cast_record',\n 'connect', 'escape_bytea', 'escape_string', 'unescape_bytea',\n 'get_array', 'get_bool', 'get_bytea_escaped',\n 'get_datestyle', 'get_decimal', 'get_decimal_point',\n 'get_defbase', 'get_defhost', 'get_defopt', 'get_defport', 'get_defuser',\n 'get_jsondecode', 'get_typecast',\n 'set_array', 'set_bool', 'set_bytea_escaped',\n 'set_datestyle', 'set_decimal', 'set_decimal_point',\n 'set_defbase', 'set_defhost', 'set_defopt',\n 'set_defpasswd', 'set_defport', 'set_defuser',\n 'set_jsondecode', 'set_query_helpers', 'set_typecast',\n 'version', '__version__']\n\nimport select\nimport warnings\nimport weakref\n\nfrom datetime import date, time, datetime, timedelta, tzinfo\nfrom decimal import Decimal\nfrom math import isnan, isinf\nfrom collections import namedtuple, OrderedDict\nfrom operator import itemgetter\nfrom functools import partial\nfrom re import compile as regex\nfrom json import loads as jsondecode, dumps as jsonencode\nfrom uuid import UUID\n\ntry:\n # noinspection PyUnresolvedReferences\n from typing import Dict, List, Union\n has_typing = True\nexcept ImportError: # Python < 3.5\n has_typing = False\n\ntry: # noinspection PyUnresolvedReferences,PyUnboundLocalVariable\n long\nexcept NameError: # Python >= 3.0\n long = int\n\ntry: # noinspection PyUnresolvedReferences,PyUnboundLocalVariable\n unicode\nexcept NameError: # Python >= 3.0\n unicode = str\n\ntry: # noinspection PyUnresolvedReferences,PyUnboundLocalVariable\n basestring\nexcept NameError: # Python >= 3.0\n basestring = (str, bytes)\n\ntry:\n from functools import lru_cache\nexcept ImportError: # Python < 3.2\n from functools import update_wrapper\n try: # noinspection PyCompatibility\n from _thread import RLock\n except ImportError:\n class RLock: # for builds without threads\n def __enter__(self):\n pass\n\n def __exit__(self, exctype, excinst, exctb):\n pass\n\n def lru_cache(maxsize=128):\n \"\"\"Simplified functools.lru_cache decorator for one argument.\"\"\"\n\n def decorator(function):\n sentinel = object()\n cache = {}\n get = cache.get\n lock = RLock()\n root = []\n root_full = [root, False]\n root[:] = [root, root, None, None]\n\n if maxsize == 0:\n\n def wrapper(arg):\n res = function(arg)\n return res\n\n elif maxsize is None:\n\n def wrapper(arg):\n res = get(arg, sentinel)\n if res is not sentinel:\n return res\n res = function(arg)\n cache[arg] = res\n return res\n\n else:\n\n def wrapper(arg):\n with lock:\n link = get(arg)\n if link is not None:\n root = root_full[0]\n prv, nxt, _arg, res = link\n prv[1] = nxt\n nxt[0] = prv\n last = root[0]\n last[1] = root[0] = link\n link[0] = last\n link[1] = root\n return res\n res = function(arg)\n with lock:\n root, full = root_full\n if arg in cache:\n pass\n elif full:\n oldroot = root\n oldroot[2] = arg\n oldroot[3] = res\n root = root_full[0] = oldroot[1]\n oldarg = root[2]\n oldres = root[3] # noqa F481 (keep reference)\n root[2] = root[3] = None\n del cache[oldarg]\n cache[arg] = oldroot\n else:\n last = root[0]\n link = [last, root, arg, res]\n last[1] = root[0] = cache[arg] = link\n if len(cache) >= maxsize:\n root_full[1] = True\n return res\n\n wrapper.__wrapped__ = function\n return update_wrapper(wrapper, function)\n\n return decorator\n\n\n# Auxiliary classes and functions that are independent of a DB connection:\n\ntry: # noinspection PyUnresolvedReferences\n from inspect import signature\nexcept ImportError: # Python < 3.3\n from inspect import getargspec\n\n def get_args(func):\n return getargspec(func).args\nelse:\n\n def get_args(func):\n return list(signature(func).parameters)\n\ntry:\n from datetime import timezone\nexcept ImportError: # Python < 3.2\n\n class timezone(tzinfo):\n \"\"\"Simple timezone implementation.\"\"\"\n\n def __init__(self, offset, name=None):\n self.offset = offset\n if not name:\n minutes = self.offset.days * 1440 + self.offset.seconds // 60\n if minutes < 0:\n hours, minutes = divmod(-minutes, 60)\n hours = -hours\n else:\n hours, minutes = divmod(minutes, 60)\n name = 'UTC%+03d:%02d' % (hours, minutes)\n self.name = name\n\n def utcoffset(self, dt):\n return self.offset\n\n def tzname(self, dt):\n return self.name\n\n def dst(self, dt):\n return None\n\n timezone.utc = timezone(timedelta(0), 'UTC')\n\n _has_timezone = False\nelse:\n _has_timezone = True\n\n# time zones used in Postgres timestamptz output\n_timezones = dict(CET='+0100', EET='+0200', EST='-0500',\n GMT='+0000', HST='-1000', MET='+0100', MST='-0700',\n UCT='+0000', UTC='+0000', WET='+0000')\n\n\ndef _timezone_as_offset(tz):\n if tz.startswith(('+', '-')):\n if len(tz) < 5:\n return tz + '00'\n return tz.replace(':', '')\n return _timezones.get(tz, '+0000')\n\n\ndef _get_timezone(tz):\n tz = _timezone_as_offset(tz)\n minutes = 60 * int(tz[1:3]) + int(tz[3:5])\n if tz[0] == '-':\n minutes = -minutes\n return timezone(timedelta(minutes=minutes), tz)\n\n\ndef _oid_key(table):\n \"\"\"Build oid key from a table name.\"\"\"\n return 'oid(%s)' % table\n\n\nclass Bytea(bytes):\n \"\"\"Wrapper class for marking Bytea values.\"\"\"\n\n\nclass Hstore(dict):\n \"\"\"Wrapper class for marking hstore values.\"\"\"\n\n _re_quote = regex('^[Nn][Uu][Ll][Ll]$|[ ,=>]')\n\n @classmethod\n def _quote(cls, s):\n if s is None:\n return 'NULL'\n if not isinstance(s, basestring):\n s = str(s)\n if not s:\n return '\"\"'\n s = s.replace('\"', '\\\\\"')\n if cls._re_quote.search(s):\n s = '\"%s\"' % s\n return s\n\n def __str__(self):\n q = self._quote\n return ','.join('%s=>%s' % (q(k), q(v)) for k, v in self.items())\n\n\nclass Json:\n \"\"\"Wrapper class for marking Json values.\"\"\"\n\n def __init__(self, obj, encode=None):\n self.obj = obj\n self.encode = encode or jsonencode\n\n def __str__(self):\n obj = self.obj\n if isinstance(obj, basestring):\n return obj\n return self.encode(obj)\n\n\nclass _SimpleTypes(dict):\n \"\"\"Dictionary mapping pg_type names to simple type names.\n\n The corresponding Python types and simple names are also mapped.\n \"\"\"\n\n _type_aliases = {\n 'bool': [bool],\n 'bytea': [Bytea],\n 'date': ['interval', 'time', 'timetz', 'timestamp', 'timestamptz',\n 'abstime', 'reltime', # these are very old\n 'datetime', 'timedelta', # these do not really exist\n date, time, datetime, timedelta],\n 'float': ['float4', 'float8', float],\n 'int': ['cid', 'int2', 'int4', 'int8', 'oid', 'xid', int],\n 'hstore': [Hstore], 'json': ['jsonb', Json], 'uuid': [UUID],\n 'num': ['numeric', Decimal], 'money': [],\n 'text': ['bpchar', 'char', 'name', 'varchar',\n bytes, unicode, basestring]\n } # type: Dict[str, List[Union[str, type]]]\n\n if long is not int: # Python 2 has a separate long type\n _type_aliases['num'].append(long)\n\n # noinspection PyMissingConstructor\n def __init__(self):\n \"\"\"Initialize type mapping.\"\"\"\n for typ, keys in self._type_aliases.items():\n keys = [typ] + keys\n for key in keys:\n self[key] = typ\n if isinstance(key, str):\n self['_%s' % key] = '%s[]' % typ\n elif has_typing and not isinstance(key, tuple):\n self[List[key]] = '%s[]' % typ\n\n @staticmethod\n def __missing__(key):\n \"\"\"Unmapped types are interpreted as text.\"\"\"\n return 'text'\n\n def get_type_dict(self):\n \"\"\"Get a plain dictionary of only the types.\"\"\"\n return dict((key, typ) for key, typ in self.items()\n if not isinstance(key, (str, tuple)))\n\n\n_simpletypes = _SimpleTypes()\n_simple_type_dict = _simpletypes.get_type_dict()\n\n\ndef _quote_if_unqualified(param, name):\n \"\"\"Quote parameter representing a qualified name.\n\n Puts a quote_ident() call around the given parameter unless\n the name contains a dot, in which case the name is ambiguous\n (could be a qualified name or just a name with a dot in it)\n and must be quoted manually by the caller.\n \"\"\"\n if isinstance(name, basestring) and '.' not in name:\n return 'quote_ident(%s)' % (param,)\n return param\n\n\nclass _ParameterList(list):\n \"\"\"Helper class for building typed parameter lists.\"\"\"\n\n def add(self, value, typ=None):\n \"\"\"Typecast value with known database type and build parameter list.\n\n If this is a literal value, it will be returned as is. Otherwise, a\n placeholder will be returned and the parameter list will be augmented.\n \"\"\"\n # noinspection PyUnresolvedReferences\n value = self.adapt(value, typ)\n if isinstance(value, Literal):\n return value\n self.append(value)\n return '$%d' % len(self)\n\n\nclass Literal(str):\n \"\"\"Wrapper class for marking literal SQL values.\"\"\"\n\n\nclass AttrDict(OrderedDict):\n \"\"\"Simple read-only ordered dictionary for storing attribute names.\"\"\"\n\n def __init__(self, *args, **kw):\n self._read_only = False\n OrderedDict.__init__(self, *args, **kw)\n self._read_only = True\n error = self._read_only_error\n self.clear = self.update = error\n self.pop = self.setdefault = self.popitem = error\n\n def __setitem__(self, key, value):\n if self._read_only:\n self._read_only_error()\n OrderedDict.__setitem__(self, key, value)\n\n def __delitem__(self, key):\n if self._read_only:\n self._read_only_error()\n OrderedDict.__delitem__(self, key)\n\n @staticmethod\n def _read_only_error(*args, **kw):\n raise TypeError('This object is read-only')\n\n\nclass Adapter:\n \"\"\"Class providing methods for adapting parameters to the database.\"\"\"\n\n _bool_true_values = frozenset('t true 1 y yes on'.split())\n\n _date_literals = frozenset(\n 'current_date current_time'\n ' current_timestamp localtime localtimestamp'.split())\n\n _re_array_quote = regex(r'[{},\"\\\\\\s]|^[Nn][Uu][Ll][Ll]$')\n _re_record_quote = regex(r'[(,\"\\\\]')\n _re_array_escape = _re_record_escape = regex(r'([\"\\\\])')\n\n def __init__(self, db):\n self.db = weakref.proxy(db)\n\n @classmethod\n def _adapt_bool(cls, v):\n \"\"\"Adapt a boolean parameter.\"\"\"\n if isinstance(v, basestring):\n if not v:\n return None\n v = v.lower() in cls._bool_true_values\n return 't' if v else 'f'\n\n @classmethod\n def _adapt_date(cls, v):\n \"\"\"Adapt a date parameter.\"\"\"\n if not v:\n return None\n if isinstance(v, basestring) and v.lower() in cls._date_literals:\n return Literal(v)\n return v\n\n @staticmethod\n def _adapt_num(v):\n \"\"\"Adapt a numeric parameter.\"\"\"\n if not v and v != 0:\n return None\n return v\n\n _adapt_int = _adapt_float = _adapt_money = _adapt_num\n\n def _adapt_bytea(self, v):\n \"\"\"Adapt a bytea parameter.\"\"\"\n return self.db.escape_bytea(v)\n\n def _adapt_json(self, v):\n \"\"\"Adapt a json parameter.\"\"\"\n if not v:\n return None\n if isinstance(v, basestring):\n return v\n if isinstance(v, Json):\n return str(v)\n return self.db.encode_json(v)\n\n def _adapt_hstore(self, v):\n \"\"\"Adapt a hstore parameter.\"\"\"\n if not v:\n return None\n if isinstance(v, basestring):\n return v\n if isinstance(v, Hstore):\n return str(v)\n if isinstance(v, dict):\n return str(Hstore(v))\n raise TypeError('Hstore parameter %s has wrong type' % v)\n\n def _adapt_uuid(self, v):\n \"\"\"Adapt a UUID parameter.\"\"\"\n if not v:\n return None\n if isinstance(v, basestring):\n return v\n return str(v)\n\n @classmethod\n def _adapt_text_array(cls, v):\n \"\"\"Adapt a text type array parameter.\"\"\"\n if isinstance(v, list):\n adapt = cls._adapt_text_array\n return '{%s}' % ','.join(adapt(v) for v in v)\n if v is None:\n return 'null'\n if not v:\n return '\"\"'\n v = str(v)\n if cls._re_array_quote.search(v):\n v = '\"%s\"' % cls._re_array_escape.sub(r'\\\\\\1', v)\n return v\n\n _adapt_date_array = _adapt_text_array\n\n @classmethod\n def _adapt_bool_array(cls, v):\n \"\"\"Adapt a boolean array parameter.\"\"\"\n if isinstance(v, list):\n adapt = cls._adapt_bool_array\n return '{%s}' % ','.join(adapt(v) for v in v)\n if v is None:\n return 'null'\n if isinstance(v, basestring):\n if not v:\n return 'null'\n v = v.lower() in cls._bool_true_values\n return 't' if v else 'f'\n\n @classmethod\n def _adapt_num_array(cls, v):\n \"\"\"Adapt a numeric array parameter.\"\"\"\n if isinstance(v, list):\n adapt = cls._adapt_num_array\n return '{%s}' % ','.join(adapt(v) for v in v)\n if not v and v != 0:\n return 'null'\n return str(v)\n\n _adapt_int_array = _adapt_float_array = _adapt_money_array = \\\n _adapt_num_array\n\n def _adapt_bytea_array(self, v):\n \"\"\"Adapt a bytea array parameter.\"\"\"\n if isinstance(v, list):\n return b'{' + b','.join(\n self._adapt_bytea_array(v) for v in v) + b'}'\n if v is None:\n return b'null'\n return self.db.escape_bytea(v).replace(b'\\\\', b'\\\\\\\\')\n\n def _adapt_json_array(self, v):\n \"\"\"Adapt a json array parameter.\"\"\"\n if isinstance(v, list):\n adapt = self._adapt_json_array\n return '{%s}' % ','.join(adapt(v) for v in v)\n if not v:\n return 'null'\n if not isinstance(v, basestring):\n v = self.db.encode_json(v)\n if self._re_array_quote.search(v):\n v = '\"%s\"' % self._re_array_escape.sub(r'\\\\\\1', v)\n return v\n\n def _adapt_record(self, v, typ):\n \"\"\"Adapt a record parameter with given type.\"\"\"\n typ = self.get_attnames(typ).values()\n if len(typ) != len(v):\n raise TypeError('Record parameter %s has wrong size' % v)\n adapt = self.adapt\n value = []\n for v, t in zip(v, typ):\n v = adapt(v, t)\n if v is None:\n v = ''\n elif not v:\n v = '\"\"'\n else:\n if isinstance(v, bytes):\n if str is not bytes:\n v = v.decode('ascii')\n else:\n v = str(v)\n if self._re_record_quote.search(v):\n v = '\"%s\"' % self._re_record_escape.sub(r'\\\\\\1', v)\n value.append(v)\n return '(%s)' % ','.join(value)\n\n def adapt(self, value, typ=None):\n \"\"\"Adapt a value with known database type.\"\"\"\n if value is not None and not isinstance(value, Literal):\n if typ:\n simple = self.get_simple_name(typ)\n else:\n typ = simple = self.guess_simple_type(value) or 'text'\n pg_str = getattr(value, '__pg_str__', None)\n if pg_str:\n value = pg_str(typ)\n if simple == 'text':\n pass\n elif simple == 'record':\n if isinstance(value, tuple):\n value = self._adapt_record(value, typ)\n elif simple.endswith('[]'):\n if isinstance(value, list):\n adapt = getattr(self, '_adapt_%s_array' % simple[:-2])\n value = adapt(value)\n else:\n adapt = getattr(self, '_adapt_%s' % simple)\n value = adapt(value)\n return value\n\n @staticmethod\n def simple_type(name):\n \"\"\"Create a simple database type with given attribute names.\"\"\"\n typ = DbType(name)\n typ.simple = name\n return typ\n\n @staticmethod\n def get_simple_name(typ):\n \"\"\"Get the simple name of a database type.\"\"\"\n if isinstance(typ, DbType):\n # noinspection PyUnresolvedReferences\n return typ.simple\n return _simpletypes[typ]\n\n @staticmethod\n def get_attnames(typ):\n \"\"\"Get the attribute names of a composite database type.\"\"\"\n if isinstance(typ, DbType):\n return typ.attnames\n return {}\n\n @classmethod\n def guess_simple_type(cls, value):\n \"\"\"Try to guess which database type the given value has.\"\"\"\n # optimize for most frequent types\n try:\n return _simple_type_dict[type(value)]\n except KeyError:\n pass\n if isinstance(value, basestring):\n return 'text'\n if isinstance(value, bool):\n return 'bool'\n if isinstance(value, (int, long)):\n return 'int'\n if isinstance(value, float):\n return 'float'\n if isinstance(value, Decimal):\n return 'num'\n if isinstance(value, (date, time, datetime, timedelta)):\n return 'date'\n if isinstance(value, Bytea):\n return 'bytea'\n if isinstance(value, Json):\n return 'json'\n if isinstance(value, Hstore):\n return 'hstore'\n if isinstance(value, UUID):\n return 'uuid'\n if isinstance(value, list):\n return '%s[]' % (cls.guess_simple_base_type(value) or 'text',)\n if isinstance(value, tuple):\n simple_type = cls.simple_type\n guess = cls.guess_simple_type\n\n # noinspection PyUnusedLocal\n def get_attnames(self):\n return AttrDict((str(n + 1), simple_type(guess(v)))\n for n, v in enumerate(value))\n\n typ = simple_type('record')\n typ._get_attnames = get_attnames\n return typ\n\n @classmethod\n def guess_simple_base_type(cls, value):\n \"\"\"Try to guess the base type of a given array.\"\"\"\n for v in value:\n if isinstance(v, list):\n typ = cls.guess_simple_base_type(v)\n else:\n typ = cls.guess_simple_type(v)\n if typ:\n return typ\n\n def adapt_inline(self, value, nested=False):\n \"\"\"Adapt a value that is put into the SQL and needs to be quoted.\"\"\"\n if value is None:\n return 'NULL'\n if isinstance(value, Literal):\n return value\n if isinstance(value, Bytea):\n value = self.db.escape_bytea(value)\n if bytes is not str: # Python >= 3.0\n value = value.decode('ascii')\n elif isinstance(value, (datetime, date, time, timedelta)):\n value = str(value)\n if isinstance(value, basestring):\n value = self.db.escape_string(value)\n return \"'%s'\" % value\n if isinstance(value, bool):\n return 'true' if value else 'false'\n if isinstance(value, float):\n if isinf(value):\n return \"'-Infinity'\" if value < 0 else \"'Infinity'\"\n if isnan(value):\n return \"'NaN'\"\n return value\n if isinstance(value, (int, long, Decimal)):\n return value\n if isinstance(value, list):\n q = self.adapt_inline\n s = '[%s]' if nested else 'ARRAY[%s]'\n return s % ','.join(str(q(v, nested=True)) for v in value)\n if isinstance(value, tuple):\n q = self.adapt_inline\n return '(%s)' % ','.join(str(q(v)) for v in value)\n if isinstance(value, Json):\n value = self.db.escape_string(str(value))\n return \"'%s'::json\" % value\n if isinstance(value, Hstore):\n value = self.db.escape_string(str(value))\n return \"'%s'::hstore\" % value\n pg_repr = getattr(value, '__pg_repr__', None)\n if not pg_repr:\n raise InterfaceError(\n 'Do not know how to adapt type %s' % type(value))\n value = pg_repr()\n if isinstance(value, (tuple, list)):\n value = self.adapt_inline(value)\n return value\n\n def parameter_list(self):\n \"\"\"Return a parameter list for parameters with known database types.\n\n The list has an add(value, typ) method that will build up the\n list and return either the literal value or a placeholder.\n \"\"\"\n params = _ParameterList()\n params.adapt = self.adapt\n return params\n\n def format_query(self, command, values=None, types=None, inline=False):\n \"\"\"Format a database query using the given values and types.\n\n The optional types describe the values and must be passed as a list,\n tuple or string (that will be split on whitespace) when values are\n passed as a list or tuple, or as a dict if values are passed as a dict.\n\n If inline is set to True, then parameters will be passed inline\n together with the query string.\n \"\"\"\n if not values:\n return command, []\n if inline and types:\n raise ValueError('Typed parameters must be sent separately')\n params = self.parameter_list()\n if isinstance(values, (list, tuple)):\n if inline:\n adapt = self.adapt_inline\n literals = [adapt(value) for value in values]\n else:\n add = params.add\n if types:\n if isinstance(types, basestring):\n types = types.split()\n if (not isinstance(types, (list, tuple))\n or len(types) != len(values)):\n raise TypeError('The values and types do not match')\n literals = [add(value, typ)\n for value, typ in zip(values, types)]\n else:\n literals = [add(value) for value in values]\n command %= tuple(literals)\n elif isinstance(values, dict):\n # we want to allow extra keys in the dictionary,\n # so we first must find the values actually used in the command\n used_values = {}\n literals = dict.fromkeys(values, '')\n for key in values:\n del literals[key]\n try:\n command % literals\n except KeyError:\n used_values[key] = values[key]\n literals[key] = ''\n values = used_values\n if inline:\n adapt = self.adapt_inline\n literals = {key: adapt(value)\n for key, value in values.items()}\n else:\n add = params.add\n if types:\n if not isinstance(types, dict):\n raise TypeError('The values and types do not match')\n literals = {key: add(values[key], types.get(key))\n for key in sorted(values)}\n else:\n literals = {key: add(values[key])\n for key in sorted(values)}\n command %= literals\n else:\n raise TypeError('The values must be passed as tuple, list or dict')\n return command, params\n\n\ndef cast_bool(value):\n \"\"\"Cast a boolean value.\"\"\"\n if not get_bool():\n return value\n return value[0] == 't'\n\n\ndef cast_json(value):\n \"\"\"Cast a JSON value.\"\"\"\n cast = get_jsondecode()\n if not cast:\n return value\n return cast(value)\n\n\ndef cast_num(value):\n \"\"\"Cast a numeric value.\"\"\"\n return (get_decimal() or float)(value)\n\n\ndef cast_money(value):\n \"\"\"Cast a money value.\"\"\"\n point = get_decimal_point()\n if not point:\n return value\n if point != '.':\n value = value.replace(point, '.')\n value = value.replace('(', '-')\n value = ''.join(c for c in value if c.isdigit() or c in '.-')\n return (get_decimal() or float)(value)\n\n\ndef cast_int2vector(value):\n \"\"\"Cast an int2vector value.\"\"\"\n return [int(v) for v in value.split()]\n\n\ndef cast_date(value, connection):\n \"\"\"Cast a date value.\"\"\"\n # The output format depends on the server setting DateStyle. The default\n # setting ISO and the setting for German are actually unambiguous. The\n # order of days and months in the other two settings is however ambiguous,\n # so at least here we need to consult the setting to properly parse values.\n if value == '-infinity':\n return date.min\n if value == 'infinity':\n return date.max\n value = value.split()\n if value[-1] == 'BC':\n return date.min\n value = value[0]\n if len(value) > 10:\n return date.max\n fmt = connection.date_format()\n return datetime.strptime(value, fmt).date()\n\n\ndef cast_time(value):\n \"\"\"Cast a time value.\"\"\"\n fmt = '%H:%M:%S.%f' if len(value) > 8 else '%H:%M:%S'\n return datetime.strptime(value, fmt).time()\n\n\n_re_timezone = regex('(.*)([+-].*)')\n\n\ndef cast_timetz(value):\n \"\"\"Cast a timetz value.\"\"\"\n tz = _re_timezone.match(value)\n if tz:\n value, tz = tz.groups()\n else:\n tz = '+0000'\n fmt = '%H:%M:%S.%f' if len(value) > 8 else '%H:%M:%S'\n if _has_timezone:\n value += _timezone_as_offset(tz)\n fmt += '%z'\n return datetime.strptime(value, fmt).timetz()\n return datetime.strptime(value, fmt).timetz().replace(\n tzinfo=_get_timezone(tz))\n\n\ndef cast_timestamp(value, connection):\n \"\"\"Cast a timestamp value.\"\"\"\n if value == '-infinity':\n return datetime.min\n if value == 'infinity':\n return datetime.max\n value = value.split()\n if value[-1] == 'BC':\n return datetime.min\n fmt = connection.date_format()\n if fmt.endswith('-%Y') and len(value) > 2:\n value = value[1:5]\n if len(value[3]) > 4:\n return datetime.max\n fmt = ['%d %b' if fmt.startswith('%d') else '%b %d',\n '%H:%M:%S.%f' if len(value[2]) > 8 else '%H:%M:%S', '%Y']\n else:\n if len(value[0]) > 10:\n return datetime.max\n fmt = [fmt, '%H:%M:%S.%f' if len(value[1]) > 8 else '%H:%M:%S']\n return datetime.strptime(' '.join(value), ' '.join(fmt))\n\n\ndef cast_timestamptz(value, connection):\n \"\"\"Cast a timestamptz value.\"\"\"\n if value == '-infinity':\n return datetime.min\n if value == 'infinity':\n return datetime.max\n value = value.split()\n if value[-1] == 'BC':\n return datetime.min\n fmt = connection.date_format()\n if fmt.endswith('-%Y') and len(value) > 2:\n value = value[1:]\n if len(value[3]) > 4:\n return datetime.max\n fmt = ['%d %b' if fmt.startswith('%d') else '%b %d',\n '%H:%M:%S.%f' if len(value[2]) > 8 else '%H:%M:%S', '%Y']\n value, tz = value[:-1], value[-1]\n else:\n if fmt.startswith('%Y-'):\n tz = _re_timezone.match(value[1])\n if tz:\n value[1], tz = tz.groups()\n else:\n tz = '+0000'\n else:\n value, tz = value[:-1], value[-1]\n if len(value[0]) > 10:\n return datetime.max\n fmt = [fmt, '%H:%M:%S.%f' if len(value[1]) > 8 else '%H:%M:%S']\n if _has_timezone:\n value.append(_timezone_as_offset(tz))\n fmt.append('%z')\n return datetime.strptime(' '.join(value), ' '.join(fmt))\n return datetime.strptime(' '.join(value), ' '.join(fmt)).replace(\n tzinfo=_get_timezone(tz))\n\n\n_re_interval_sql_standard = regex(\n '(?:([+-])?([0-9]+)-([0-9]+) ?)?'\n '(?:([+-]?[0-9]+)(?!:) ?)?'\n '(?:([+-])?([0-9]+):([0-9]+):([0-9]+)(?:\\\\.([0-9]+))?)?')\n\n_re_interval_postgres = regex(\n '(?:([+-]?[0-9]+) ?years? ?)?'\n '(?:([+-]?[0-9]+) ?mons? ?)?'\n '(?:([+-]?[0-9]+) ?days? ?)?'\n '(?:([+-])?([0-9]+):([0-9]+):([0-9]+)(?:\\\\.([0-9]+))?)?')\n\n_re_interval_postgres_verbose = regex(\n '@ ?(?:([+-]?[0-9]+) ?years? ?)?'\n '(?:([+-]?[0-9]+) ?mons? ?)?'\n '(?:([+-]?[0-9]+) ?days? ?)?'\n '(?:([+-]?[0-9]+) ?hours? ?)?'\n '(?:([+-]?[0-9]+) ?mins? ?)?'\n '(?:([+-])?([0-9]+)(?:\\\\.([0-9]+))? ?secs?)? ?(ago)?')\n\n_re_interval_iso_8601 = regex(\n 'P(?:([+-]?[0-9]+)Y)?'\n '(?:([+-]?[0-9]+)M)?'\n '(?:([+-]?[0-9]+)D)?'\n '(?:T(?:([+-]?[0-9]+)H)?'\n '(?:([+-]?[0-9]+)M)?'\n '(?:([+-])?([0-9]+)(?:\\\\.([0-9]+))?S)?)?')\n\n\ndef cast_interval(value):\n \"\"\"Cast an interval value.\"\"\"\n # The output format depends on the server setting IntervalStyle, but it's\n # not necessary to consult this setting to parse it. It's faster to just\n # check all possible formats, and there is no ambiguity here.\n m = _re_interval_iso_8601.match(value)\n if m:\n m = [d or '0' for d in m.groups()]\n secs_ago = m.pop(5) == '-'\n m = [int(d) for d in m]\n years, mons, days, hours, mins, secs, usecs = m\n if secs_ago:\n secs = -secs\n usecs = -usecs\n else:\n m = _re_interval_postgres_verbose.match(value)\n if m:\n m, ago = [d or '0' for d in m.groups()[:8]], m.group(9)\n secs_ago = m.pop(5) == '-'\n m = [-int(d) for d in m] if ago else [int(d) for d in m]\n years, mons, days, hours, mins, secs, usecs = m\n if secs_ago:\n secs = - secs\n usecs = -usecs\n else:\n m = _re_interval_postgres.match(value)\n if m and any(m.groups()):\n m = [d or '0' for d in m.groups()]\n hours_ago = m.pop(3) == '-'\n m = [int(d) for d in m]\n years, mons, days, hours, mins, secs, usecs = m\n if hours_ago:\n hours = -hours\n mins = -mins\n secs = -secs\n usecs = -usecs\n else:\n m = _re_interval_sql_standard.match(value)\n if m and any(m.groups()):\n m = [d or '0' for d in m.groups()]\n years_ago = m.pop(0) == '-'\n hours_ago = m.pop(3) == '-'\n m = [int(d) for d in m]\n years, mons, days, hours, mins, secs, usecs = m\n if years_ago:\n years = -years\n mons = -mons\n if hours_ago:\n hours = -hours\n mins = -mins\n secs = -secs\n usecs = -usecs\n else:\n raise ValueError('Cannot parse interval: %s' % value)\n days += 365 * years + 30 * mons\n return timedelta(days=days, hours=hours, minutes=mins,\n seconds=secs, microseconds=usecs)\n\n\nclass Typecasts(dict):\n \"\"\"Dictionary mapping database types to typecast functions.\n\n The cast functions get passed the string representation of a value in\n the database which they need to convert to a Python object. The\n passed string will never be None since NULL values are already\n handled before the cast function is called.\n\n Note that the basic types are already handled by the C extension.\n They only need to be handled here as record or array components.\n \"\"\"\n\n # the default cast functions\n # (str functions are ignored but have been added for faster access)\n defaults = {\n 'char': str, 'bpchar': str, 'name': str,\n 'text': str, 'varchar': str,\n 'bool': cast_bool, 'bytea': unescape_bytea,\n 'int2': int, 'int4': int, 'serial': int, 'int8': long, 'oid': int,\n 'hstore': cast_hstore, 'json': cast_json, 'jsonb': cast_json,\n 'float4': float, 'float8': float,\n 'numeric': cast_num, 'money': cast_money,\n 'date': cast_date, 'interval': cast_interval,\n 'time': cast_time, 'timetz': cast_timetz,\n 'timestamp': cast_timestamp, 'timestamptz': cast_timestamptz,\n 'int2vector': cast_int2vector, 'uuid': UUID,\n 'anyarray': cast_array, 'record': cast_record}\n\n connection = None # will be set in a connection specific instance\n\n def __missing__(self, typ):\n \"\"\"Create a cast function if it is not cached.\n\n Note that this class never raises a KeyError,\n but returns None when no special cast function exists.\n \"\"\"\n if not isinstance(typ, str):\n raise TypeError('Invalid type: %s' % typ)\n cast = self.defaults.get(typ)\n if cast:\n # store default for faster access\n cast = self._add_connection(cast)\n self[typ] = cast\n elif typ.startswith('_'):\n base_cast = self[typ[1:]]\n cast = self.create_array_cast(base_cast)\n if base_cast:\n self[typ] = cast\n else:\n attnames = self.get_attnames(typ)\n if attnames:\n casts = [self[v.pgtype] for v in attnames.values()]\n cast = self.create_record_cast(typ, attnames, casts)\n self[typ] = cast\n return cast\n\n @staticmethod\n def _needs_connection(func):\n \"\"\"Check if a typecast function needs a connection argument.\"\"\"\n try:\n args = get_args(func)\n except (TypeError, ValueError):\n return False\n else:\n return 'connection' in args[1:]\n\n def _add_connection(self, cast):\n \"\"\"Add a connection argument to the typecast function if necessary.\"\"\"\n if not self.connection or not self._needs_connection(cast):\n return cast\n return partial(cast, connection=self.connection)\n\n def get(self, typ, default=None):\n \"\"\"Get the typecast function for the given database type.\"\"\"\n return self[typ] or default\n\n def set(self, typ, cast):\n \"\"\"Set a typecast function for the specified database type(s).\"\"\"\n if isinstance(typ, basestring):\n typ = [typ]\n if cast is None:\n for t in typ:\n self.pop(t, None)\n self.pop('_%s' % t, None)\n else:\n if not callable(cast):\n raise TypeError(\"Cast parameter must be callable\")\n for t in typ:\n self[t] = self._add_connection(cast)\n self.pop('_%s' % t, None)\n\n def reset(self, typ=None):\n \"\"\"Reset the typecasts for the specified type(s) to their defaults.\n\n When no type is specified, all typecasts will be reset.\n \"\"\"\n if typ is None:\n self.clear()\n else:\n if isinstance(typ, basestring):\n typ = [typ]\n for t in typ:\n self.pop(t, None)\n\n @classmethod\n def get_default(cls, typ):\n \"\"\"Get the default typecast function for the given database type.\"\"\"\n return cls.defaults.get(typ)\n\n @classmethod\n def set_default(cls, typ, cast):\n \"\"\"Set a default typecast function for the given database type(s).\"\"\"\n if isinstance(typ, basestring):\n typ = [typ]\n defaults = cls.defaults\n if cast is None:\n for t in typ:\n defaults.pop(t, None)\n defaults.pop('_%s' % t, None)\n else:\n if not callable(cast):\n raise TypeError(\"Cast parameter must be callable\")\n for t in typ:\n defaults[t] = cast\n defaults.pop('_%s' % t, None)\n\n # noinspection PyMethodMayBeStatic,PyUnusedLocal\n def get_attnames(self, typ):\n \"\"\"Return the fields for the given record type.\n\n This method will be replaced with the get_attnames() method of DbTypes.\n \"\"\"\n return {}\n\n # noinspection PyMethodMayBeStatic\n def dateformat(self):\n \"\"\"Return the current date format.\n\n This method will be replaced with the dateformat() method of DbTypes.\n \"\"\"\n return '%Y-%m-%d'\n\n def create_array_cast(self, basecast):\n \"\"\"Create an array typecast for the given base cast.\"\"\"\n cast_array = self['anyarray']\n\n def cast(v):\n return cast_array(v, basecast)\n return cast\n\n def create_record_cast(self, name, fields, casts):\n \"\"\"Create a named record typecast for the given fields and casts.\"\"\"\n cast_record = self['record']\n record = namedtuple(name, fields)\n\n def cast(v):\n # noinspection PyArgumentList\n return record(*cast_record(v, casts))\n return cast\n\n\ndef get_typecast(typ):\n \"\"\"Get the global typecast function for the given database type(s).\"\"\"\n return Typecasts.get_default(typ)\n\n\ndef set_typecast(typ, cast):\n \"\"\"Set a global typecast function for the given database type(s).\n\n Note that connections cache cast functions. To be sure a global change\n is picked up by a running connection, call db.db_types.reset_typecast().\n \"\"\"\n Typecasts.set_default(typ, cast)\n\n\nclass DbType(str):\n \"\"\"Class augmenting the simple type name with additional info.\n\n The following additional information is provided:\n\n oid: the PostgreSQL type OID\n pgtype: the internal PostgreSQL data type name\n regtype: the registered PostgreSQL data type name\n simple: the more coarse-grained PyGreSQL type name\n typlen: the internal size, negative if variable\n typtype: b = base type, c = composite type etc.\n category: A = Array, b = Boolean, C = Composite etc.\n delim: delimiter for array types\n relid: corresponding table for composite types\n attnames: attributes for composite types\n \"\"\"\n\n @property\n def attnames(self):\n \"\"\"Get names and types of the fields of a composite type.\"\"\"\n # noinspection PyUnresolvedReferences\n return self._get_attnames(self)\n\n\nclass DbTypes(dict):\n \"\"\"Cache for PostgreSQL data types.\n\n This cache maps type OIDs and names to DbType objects containing\n information on the associated database type.\n \"\"\"\n\n _num_types = frozenset('int float num money int2 int4 int8'\n ' float4 float8 numeric money'.split())\n\n def __init__(self, db):\n \"\"\"Initialize type cache for connection.\"\"\"\n super(DbTypes, self).__init__()\n self._db = weakref.proxy(db)\n self._regtypes = False\n self._typecasts = Typecasts()\n self._typecasts.get_attnames = self.get_attnames\n self._typecasts.connection = self._db\n if db.server_version < 80400:\n # very old remote databases (not officially supported)\n self._query_pg_type = (\n \"SELECT oid, typname, typname::text::regtype,\"\n \" typlen, typtype, null as typcategory, typdelim, typrelid\"\n \" FROM pg_catalog.pg_type\"\n \" WHERE oid OPERATOR(pg_catalog.=) %s::regtype\")\n else:\n self._query_pg_type = (\n \"SELECT oid, typname, typname::regtype,\"\n \" typlen, typtype, typcategory, typdelim, typrelid\"\n \" FROM pg_catalog.pg_type\"\n \" WHERE oid OPERATOR(pg_catalog.=) %s::regtype\")\n\n def add(self, oid, pgtype, regtype,\n typlen, typtype, category, delim, relid):\n \"\"\"Create a PostgreSQL type name with additional info.\"\"\"\n if oid in self:\n return self[oid]\n simple = 'record' if relid else _simpletypes[pgtype]\n typ = DbType(regtype if self._regtypes else simple)\n typ.oid = oid\n typ.simple = simple\n typ.pgtype = pgtype\n typ.regtype = regtype\n typ.typlen = typlen\n typ.typtype = typtype\n typ.category = category\n typ.delim = delim\n typ.relid = relid\n typ._get_attnames = self.get_attnames\n return typ\n\n def __missing__(self, key):\n \"\"\"Get the type info from the database if it is not cached.\"\"\"\n try:\n q = self._query_pg_type % (_quote_if_unqualified('$1', key),)\n res = self._db.query(q, (key,)).getresult()\n except ProgrammingError:\n res = None\n if not res:\n raise KeyError('Type %s could not be found' % (key,))\n res = res[0]\n typ = self.add(*res)\n self[typ.oid] = self[typ.pgtype] = typ\n return typ\n\n def get(self, key, default=None):\n \"\"\"Get the type even if it is not cached.\"\"\"\n try:\n return self[key]\n except KeyError:\n return default\n\n def get_attnames(self, typ):\n \"\"\"Get names and types of the fields of a composite type.\"\"\"\n if not isinstance(typ, DbType):\n typ = self.get(typ)\n if not typ:\n return None\n if not typ.relid:\n return None\n return self._db.get_attnames(typ.relid, with_oid=False)\n\n def get_typecast(self, typ):\n \"\"\"Get the typecast function for the given database type.\"\"\"\n return self._typecasts.get(typ)\n\n def set_typecast(self, typ, cast):\n \"\"\"Set a typecast function for the specified database type(s).\"\"\"\n self._typecasts.set(typ, cast)\n\n def reset_typecast(self, typ=None):\n \"\"\"Reset the typecast function for the specified database type(s).\"\"\"\n self._typecasts.reset(typ)\n\n def typecast(self, value, typ):\n \"\"\"Cast the given value according to the given database type.\"\"\"\n if value is None:\n # for NULL values, no typecast is necessary\n return None\n if not isinstance(typ, DbType):\n typ = self.get(typ)\n if typ:\n typ = typ.pgtype\n cast = self.get_typecast(typ) if typ else None\n if not cast or cast is str:\n # no typecast is necessary\n return value\n return cast(value)\n\n\n_re_fieldname = regex('^[A-Za-z][_a-zA-Z0-9]*$')\n\n\n# The result rows for database operations are returned as named tuples\n# by default. Since creating namedtuple classes is a somewhat expensive\n# operation, we cache up to 1024 of these classes by default.\n\n# noinspection PyUnresolvedReferences\n@lru_cache(maxsize=1024)\ndef _row_factory(names):\n \"\"\"Get a namedtuple factory for row results with the given names.\"\"\"\n try:\n return namedtuple('Row', names, rename=True)._make\n except ValueError: # there is still a problem with the field names\n names = ['column_%d' % (n,) for n in range(len(names))]\n return namedtuple('Row', names)._make\n\n\ndef set_row_factory_size(maxsize):\n \"\"\"Change the size of the namedtuple factory cache.\n\n If maxsize is set to None, the cache can grow without bound.\n \"\"\"\n # noinspection PyGlobalUndefined\n global _row_factory\n _row_factory = lru_cache(maxsize)(_row_factory.__wrapped__)\n\n\n# Helper functions used by the query object\n\ndef _dictiter(q):\n \"\"\"Get query result as an iterator of dictionaries.\"\"\"\n fields = q.listfields()\n for r in q:\n yield dict(zip(fields, r))\n\n\ndef _namediter(q):\n \"\"\"Get query result as an iterator of named tuples.\"\"\"\n row = _row_factory(q.listfields())\n for r in q:\n yield row(r)\n\n\ndef _namednext(q):\n \"\"\"Get next row from query result as a named tuple.\"\"\"\n return _row_factory(q.listfields())(next(q))\n\n\ndef _scalariter(q):\n \"\"\"Get query result as an iterator of scalar values.\"\"\"\n for r in q:\n yield r[0]\n\n\nclass _MemoryQuery:\n \"\"\"Class that embodies a given query result.\"\"\"\n\n def __init__(self, result, fields):\n \"\"\"Create query from given result rows and field names.\"\"\"\n self.result = result\n self.fields = tuple(fields)\n\n def listfields(self):\n \"\"\"Return the stored field names of this query.\"\"\"\n return self.fields\n\n def getresult(self):\n \"\"\"Return the stored result of this query.\"\"\"\n return self.result\n\n def __iter__(self):\n return iter(self.result)\n\n\ndef _db_error(msg, cls=DatabaseError):\n \"\"\"Return DatabaseError with empty sqlstate attribute.\"\"\"\n error = cls(msg)\n error.sqlstate = None\n return error\n\n\ndef _int_error(msg):\n \"\"\"Return InternalError.\"\"\"\n return _db_error(msg, InternalError)\n\n\ndef _prg_error(msg):\n \"\"\"Return ProgrammingError.\"\"\"\n return _db_error(msg, ProgrammingError)\n\n\n# Initialize the C module\n\nset_decimal(Decimal)\nset_jsondecode(jsondecode)\nset_query_helpers(_dictiter, _namediter, _namednext, _scalariter)\n\n\n# The notification handler\n\nclass NotificationHandler(object):\n \"\"\"A PostgreSQL client-side asynchronous notification handler.\"\"\"\n\n def __init__(self, db, event, callback=None,\n arg_dict=None, timeout=None, stop_event=None):\n \"\"\"Initialize the notification handler.\n\n You must pass a PyGreSQL database connection, the name of an\n event (notification channel) to listen for and a callback function.\n\n You can also specify a dictionary arg_dict that will be passed as\n the single argument to the callback function, and a timeout value\n in seconds (a floating point number denotes fractions of seconds).\n If it is absent or None, the callers will never time out. If the\n timeout is reached, the callback function will be called with a\n single argument that is None. If you set the timeout to zero,\n the handler will poll notifications synchronously and return.\n\n You can specify the name of the event that will be used to signal\n the handler to stop listening as stop_event. By default, it will\n be the event name prefixed with 'stop_'.\n \"\"\"\n self.db = db\n self.event = event\n self.stop_event = stop_event or 'stop_%s' % event\n self.listening = False\n self.callback = callback\n if arg_dict is None:\n arg_dict = {}\n self.arg_dict = arg_dict\n self.timeout = timeout\n\n def __del__(self):\n self.unlisten()\n\n def close(self):\n \"\"\"Stop listening and close the connection.\"\"\"\n if self.db:\n self.unlisten()\n self.db.close()\n self.db = None\n\n def listen(self):\n \"\"\"Start listening for the event and the stop event.\"\"\"\n if not self.listening:\n self.db.query('listen \"%s\"' % self.event)\n self.db.query('listen \"%s\"' % self.stop_event)\n self.listening = True\n\n def unlisten(self):\n \"\"\"Stop listening for the event and the stop event.\"\"\"\n if self.listening:\n self.db.query('unlisten \"%s\"' % self.event)\n self.db.query('unlisten \"%s\"' % self.stop_event)\n self.listening = False\n\n def notify(self, db=None, stop=False, payload=None):\n \"\"\"Generate a notification.\n\n Optionally, you can pass a payload with the notification.\n\n If you set the stop flag, a stop notification will be sent that\n will cause the handler to stop listening.\n\n Note: If the notification handler is running in another thread, you\n must pass a different database connection since PyGreSQL database\n connections are not thread-safe.\n \"\"\"\n if self.listening:\n if not db:\n db = self.db\n q = 'notify \"%s\"' % (self.stop_event if stop else self.event)\n if payload:\n q += \", '%s'\" % payload\n return db.query(q)\n\n def __call__(self):\n \"\"\"Invoke the notification handler.\n\n The handler is a loop that listens for notifications on the event\n and stop event channels. When either of these notifications are\n received, its associated 'pid', 'event' and 'extra' (the payload\n passed with the notification) are inserted into its arg_dict\n dictionary and the callback is invoked with this dictionary as\n a single argument. When the handler receives a stop event, it\n stops listening to both events and return.\n\n In the special case that the timeout of the handler has been set\n to zero, the handler will poll all events synchronously and return.\n If will keep listening until it receives a stop event.\n\n Note: If you run this loop in another thread, don't use the same\n database connection for database operations in the main thread.\n \"\"\"\n self.listen()\n poll = self.timeout == 0\n if not poll:\n rlist = [self.db.fileno()]\n while self.listening:\n # noinspection PyUnboundLocalVariable\n if poll or select.select(rlist, [], [], self.timeout)[0]:\n while self.listening:\n notice = self.db.getnotify()\n if not notice: # no more messages\n break\n event, pid, extra = notice\n if event not in (self.event, self.stop_event):\n self.unlisten()\n raise _db_error(\n 'Listening for \"%s\" and \"%s\", but notified of \"%s\"'\n % (self.event, self.stop_event, event))\n if event == self.stop_event:\n self.unlisten()\n self.arg_dict.update(pid=pid, event=event, extra=extra)\n self.callback(self.arg_dict)\n if poll:\n break\n else: # we timed out\n self.unlisten()\n self.callback(None)\n\n\ndef pgnotify(*args, **kw):\n \"\"\"Same as NotificationHandler, under the traditional name.\"\"\"\n warnings.warn(\"pgnotify is deprecated, use NotificationHandler instead\",\n DeprecationWarning, stacklevel=2)\n return NotificationHandler(*args, **kw)\n\n\n# The actual PostgreSQL database connection interface:\n\nclass DB:\n \"\"\"Wrapper class for the _pg connection type.\"\"\"\n\n db = None # invalid fallback for underlying connection\n\n def __init__(self, *args, **kw):\n \"\"\"Create a new connection\n\n You can pass either the connection parameters or an existing\n _pg or pgdb connection. This allows you to use the methods\n of the classic pg interface with a DB-API 2 pgdb connection.\n \"\"\"\n if not args and len(kw) == 1:\n db = kw.get('db')\n elif not kw and len(args) == 1:\n db = args[0]\n else:\n db = None\n if db:\n if isinstance(db, DB):\n db = db.db\n else:\n try:\n # noinspection PyUnresolvedReferences\n db = db._cnx\n except AttributeError:\n pass\n if not db or not hasattr(db, 'db') or not hasattr(db, 'query'):\n db = connect(*args, **kw)\n self._db_args = args, kw\n self._closeable = True\n else:\n self._db_args = db\n self._closeable = False\n self.db = db\n self.dbname = db.db\n self._regtypes = False\n self._attnames = {}\n self._pkeys = {}\n self._privileges = {}\n self.adapter = Adapter(self)\n self.dbtypes = DbTypes(self)\n if db.server_version < 80400:\n # very old remote databases (not officially supported)\n self._query_attnames = (\n \"SELECT a.attname, t.oid, t.typname, t.typname::text::regtype,\"\n \" t.typlen, t.typtype, null as typcategory,\"\n \" t.typdelim, t.typrelid\"\n \" FROM pg_catalog.pg_attribute a\"\n \" JOIN pg_catalog.pg_type t\"\n \" ON t.oid OPERATOR(pg_catalog.=) a.atttypid\"\n \" WHERE a.attrelid OPERATOR(pg_catalog.=) %s::regclass\"\n \" AND %s AND NOT a.attisdropped ORDER BY a.attnum\")\n else:\n self._query_attnames = (\n \"SELECT a.attname, t.oid, t.typname, t.typname::regtype,\"\n \" t.typlen, t.typtype, t.typcategory, t.typdelim, t.typrelid\"\n \" FROM pg_catalog.pg_attribute a\"\n \" JOIN pg_catalog.pg_type t\"\n \" ON t.oid OPERATOR(pg_catalog.=) a.atttypid\"\n \" WHERE a.attrelid OPERATOR(pg_catalog.=) %s::regclass\"\n \" AND %s AND NOT a.attisdropped ORDER BY a.attnum\")\n db.set_cast_hook(self.dbtypes.typecast)\n # For debugging scripts, self.debug can be set\n # * to a string format specification (e.g. in CGI set to \"%s
    \"),\n # * to a file object to write debug statements or\n # * to a callable object which takes a string argument\n # * to any other true value to just print debug statements\n self.debug = None\n\n def __getattr__(self, name):\n # All undefined members are same as in underlying connection:\n if self.db:\n return getattr(self.db, name)\n else:\n raise _int_error('Connection is not valid')\n\n def __dir__(self):\n # Custom dir function including the attributes of the connection:\n attrs = set(self.__class__.__dict__)\n attrs.update(self.__dict__)\n attrs.update(dir(self.db))\n return sorted(attrs)\n\n # Context manager methods\n\n def __enter__(self):\n \"\"\"Enter the runtime context. This will start a transaction.\"\"\"\n self.begin()\n return self\n\n def __exit__(self, et, ev, tb):\n \"\"\"Exit the runtime context. This will end the transaction.\"\"\"\n if et is None and ev is None and tb is None:\n self.commit()\n else:\n self.rollback()\n\n def __del__(self):\n try:\n db = self.db\n except AttributeError:\n db = None\n if db:\n try:\n db.set_cast_hook(None)\n except TypeError:\n pass # probably already closed\n if self._closeable:\n try:\n db.close()\n except InternalError:\n pass # probably already closed\n\n # Auxiliary methods\n\n def _do_debug(self, *args):\n \"\"\"Print a debug message\"\"\"\n if self.debug:\n s = '\\n'.join(str(arg) for arg in args)\n if isinstance(self.debug, basestring):\n print(self.debug % s)\n elif hasattr(self.debug, 'write'):\n # noinspection PyCallingNonCallable\n self.debug.write(s + '\\n')\n elif callable(self.debug):\n self.debug(s)\n else:\n print(s)\n\n def _escape_qualified_name(self, s):\n \"\"\"Escape a qualified name.\n\n Escapes the name for use as an SQL identifier, unless the\n name contains a dot, in which case the name is ambiguous\n (could be a qualified name or just a name with a dot in it)\n and must be quoted manually by the caller.\n \"\"\"\n if '.' not in s:\n s = self.escape_identifier(s)\n return s\n\n @staticmethod\n def _make_bool(d):\n \"\"\"Get boolean value corresponding to d.\"\"\"\n return bool(d) if get_bool() else ('t' if d else 'f')\n\n @staticmethod\n def _list_params(params):\n \"\"\"Create a human readable parameter list.\"\"\"\n return ', '.join('$%d=%r' % (n, v) for n, v in enumerate(params, 1))\n\n # Public methods\n\n # escape_string and escape_bytea exist as methods,\n # so we define unescape_bytea as a method as well\n unescape_bytea = staticmethod(unescape_bytea)\n\n @staticmethod\n def decode_json(s):\n \"\"\"Decode a JSON string coming from the database.\"\"\"\n return (get_jsondecode() or jsondecode)(s)\n\n @staticmethod\n def encode_json(d):\n \"\"\"Encode a JSON string for use within SQL.\"\"\"\n return jsonencode(d)\n\n def close(self):\n \"\"\"Close the database connection.\"\"\"\n # Wraps shared library function so we can track state.\n db = self.db\n if db:\n try:\n db.set_cast_hook(None)\n except TypeError:\n pass # probably already closed\n if self._closeable:\n db.close()\n self.db = None\n else:\n raise _int_error('Connection already closed')\n\n def reset(self):\n \"\"\"Reset connection with current parameters.\n\n All derived queries and large objects derived from this connection\n will not be usable after this call.\n\n \"\"\"\n if self.db:\n self.db.reset()\n else:\n raise _int_error('Connection already closed')\n\n def reopen(self):\n \"\"\"Reopen connection to the database.\n\n Used in case we need another connection to the same database.\n Note that we can still reopen a database that we have closed.\n\n \"\"\"\n # There is no such shared library function.\n if self._closeable:\n db = connect(*self._db_args[0], **self._db_args[1])\n if self.db:\n self.db.set_cast_hook(None)\n self.db.close()\n db.set_cast_hook(self.dbtypes.typecast)\n self.db = db\n else:\n self.db = self._db_args\n\n def begin(self, mode=None):\n \"\"\"Begin a transaction.\"\"\"\n qstr = 'BEGIN'\n if mode:\n qstr += ' ' + mode\n return self.query(qstr)\n\n start = begin\n\n def commit(self):\n \"\"\"Commit the current transaction.\"\"\"\n return self.query('COMMIT')\n\n end = commit\n\n def rollback(self, name=None):\n \"\"\"Roll back the current transaction.\"\"\"\n qstr = 'ROLLBACK'\n if name:\n qstr += ' TO ' + name\n return self.query(qstr)\n\n abort = rollback\n\n def savepoint(self, name):\n \"\"\"Define a new savepoint within the current transaction.\"\"\"\n return self.query('SAVEPOINT ' + name)\n\n def release(self, name):\n \"\"\"Destroy a previously defined savepoint.\"\"\"\n return self.query('RELEASE ' + name)\n\n def get_parameter(self, parameter):\n \"\"\"Get the value of a run-time parameter.\n\n If the parameter is a string, the return value will also be a string\n that is the current setting of the run-time parameter with that name.\n\n You can get several parameters at once by passing a list, set or dict.\n When passing a list of parameter names, the return value will be a\n corresponding list of parameter settings. When passing a set of\n parameter names, a new dict will be returned, mapping these parameter\n names to their settings. Finally, if you pass a dict as parameter,\n its values will be set to the current parameter settings corresponding\n to its keys.\n\n By passing the special name 'all' as the parameter, you can get a dict\n of all existing configuration parameters.\n \"\"\"\n if isinstance(parameter, basestring):\n parameter = [parameter]\n values = None\n elif isinstance(parameter, (list, tuple)):\n values = []\n elif isinstance(parameter, (set, frozenset)):\n values = {}\n elif isinstance(parameter, dict):\n values = parameter\n else:\n raise TypeError(\n 'The parameter must be a string, list, set or dict')\n if not parameter:\n raise TypeError('No parameter has been specified')\n params = {} if isinstance(values, dict) else []\n for key in parameter:\n param = key.strip().lower() if isinstance(\n key, basestring) else None\n if not param:\n raise TypeError('Invalid parameter')\n if param == 'all':\n q = 'SHOW ALL'\n values = self.db.query(q).getresult()\n values = {value[0]: value[1] for value in values}\n break\n if isinstance(values, dict):\n params[param] = key\n else:\n params.append(param)\n else:\n for param in params:\n q = 'SHOW %s' % (param,)\n value = self.db.query(q).singlescalar()\n if values is None:\n values = value\n elif isinstance(values, list):\n values.append(value)\n else:\n values[params[param]] = value\n return values\n\n def set_parameter(self, parameter, value=None, local=False):\n \"\"\"Set the value of a run-time parameter.\n\n If the parameter and the value are strings, the run-time parameter\n will be set to that value. If no value or None is passed as a value,\n then the run-time parameter will be restored to its default value.\n\n You can set several parameters at once by passing a list of parameter\n names, together with a single value that all parameters should be\n set to or with a corresponding list of values. You can also pass\n the parameters as a set if you only provide a single value.\n Finally, you can pass a dict with parameter names as keys. In this\n case, you should not pass a value, since the values for the parameters\n will be taken from the dict.\n\n By passing the special name 'all' as the parameter, you can reset\n all existing settable run-time parameters to their default values.\n\n If you set local to True, then the command takes effect for only the\n current transaction. After commit() or rollback(), the session-level\n setting takes effect again. Setting local to True will appear to\n have no effect if it is executed outside a transaction, since the\n transaction will end immediately.\n \"\"\"\n if isinstance(parameter, basestring):\n parameter = {parameter: value}\n elif isinstance(parameter, (list, tuple)):\n if isinstance(value, (list, tuple)):\n parameter = dict(zip(parameter, value))\n else:\n parameter = dict.fromkeys(parameter, value)\n elif isinstance(parameter, (set, frozenset)):\n if isinstance(value, (list, tuple, set, frozenset)):\n value = set(value)\n if len(value) == 1:\n value = value.pop()\n if not(value is None or isinstance(value, basestring)):\n raise ValueError(\n 'A single value must be specified'\n ' when parameter is a set')\n parameter = dict.fromkeys(parameter, value)\n elif isinstance(parameter, dict):\n if value is not None:\n raise ValueError(\n 'A value must not be specified'\n ' when parameter is a dictionary')\n else:\n raise TypeError(\n 'The parameter must be a string, list, set or dict')\n if not parameter:\n raise TypeError('No parameter has been specified')\n params = {}\n for key, value in parameter.items():\n param = key.strip().lower() if isinstance(\n key, basestring) else None\n if not param:\n raise TypeError('Invalid parameter')\n if param == 'all':\n if value is not None:\n raise ValueError(\n 'A value must ot be specified'\n \" when parameter is 'all'\")\n params = {'all': None}\n break\n params[param] = value\n local = ' LOCAL' if local else ''\n for param, value in params.items():\n if value is None:\n q = 'RESET%s %s' % (local, param)\n else:\n q = 'SET%s %s TO %s' % (local, param, value)\n self._do_debug(q)\n self.db.query(q)\n\n def query(self, command, *args):\n \"\"\"Execute a SQL command string.\n\n This method simply sends a SQL query to the database. If the query is\n an insert statement that inserted exactly one row into a table that\n has OIDs, the return value is the OID of the newly inserted row.\n If the query is an update or delete statement, or an insert statement\n that did not insert exactly one row in a table with OIDs, then the\n number of rows affected is returned as a string. If it is a statement\n that returns rows as a result (usually a select statement, but maybe\n also an \"insert/update ... returning\" statement), this method returns\n a Query object that can be accessed via getresult() or dictresult()\n or simply printed. Otherwise, it returns `None`.\n\n The query can contain numbered parameters of the form $1 in place\n of any data constant. Arguments given after the query string will\n be substituted for the corresponding numbered parameter. Parameter\n values can also be given as a single list or tuple argument.\n \"\"\"\n # Wraps shared library function for debugging.\n if not self.db:\n raise _int_error('Connection is not valid')\n if args:\n self._do_debug(command, args)\n return self.db.query(command, args)\n self._do_debug(command)\n return self.db.query(command)\n\n def query_formatted(self, command,\n parameters=None, types=None, inline=False):\n \"\"\"Execute a formatted SQL command string.\n\n Similar to query, but using Python format placeholders of the form\n %s or %(names)s instead of PostgreSQL placeholders of the form $1.\n The parameters must be passed as a tuple, list or dict. You can\n also pass a corresponding tuple, list or dict of database types in\n order to format the parameters properly in case there is ambiguity.\n\n If you set inline to True, the parameters will be sent to the database\n embedded in the SQL command, otherwise they will be sent separately.\n \"\"\"\n return self.query(*self.adapter.format_query(\n command, parameters, types, inline))\n\n def query_prepared(self, name, *args):\n \"\"\"Execute a prepared SQL statement.\n\n This works like the query() method, except that instead of passing\n the SQL command, you pass the name of a prepared statement. If you\n pass an empty name, the unnamed statement will be executed.\n \"\"\"\n if not self.db:\n raise _int_error('Connection is not valid')\n if name is None:\n name = ''\n if args:\n self._do_debug('EXECUTE', name, args)\n return self.db.query_prepared(name, args)\n self._do_debug('EXECUTE', name)\n return self.db.query_prepared(name)\n\n def prepare(self, name, command):\n \"\"\"Create a prepared SQL statement.\n\n This creates a prepared statement for the given command with the\n given name for later execution with the query_prepared() method.\n\n The name can be empty to create an unnamed statement, in which case\n any pre-existing unnamed statement is automatically replaced;\n otherwise it is an error if the statement name is already\n defined in the current database session. We recommend always using\n named queries, since unnamed queries have a limited lifetime and\n can be automatically replaced or destroyed by various operations.\n \"\"\"\n if not self.db:\n raise _int_error('Connection is not valid')\n if name is None:\n name = ''\n self._do_debug('prepare', name, command)\n return self.db.prepare(name, command)\n\n def describe_prepared(self, name=None):\n \"\"\"Describe a prepared SQL statement.\n\n This method returns a Query object describing the result columns of\n the prepared statement with the given name. If you omit the name,\n the unnamed statement will be described if you created one before.\n \"\"\"\n if name is None:\n name = ''\n return self.db.describe_prepared(name)\n\n def delete_prepared(self, name=None):\n \"\"\"Delete a prepared SQL statement\n\n This deallocates a previously prepared SQL statement with the given\n name, or deallocates all prepared statements if you do not specify a\n name. Note that prepared statements are also deallocated automatically\n when the current session ends.\n \"\"\"\n q = \"DEALLOCATE %s\" % (name or 'ALL',)\n self._do_debug(q)\n return self.db.query(q)\n\n def pkey(self, table, composite=False, flush=False):\n \"\"\"Get or set the primary key of a table.\n\n Single primary keys are returned as strings unless you\n set the composite flag. Composite primary keys are always\n represented as tuples. Note that this raises a KeyError\n if the table does not have a primary key.\n\n If flush is set then the internal cache for primary keys will\n be flushed. This may be necessary after the database schema or\n the search path has been changed.\n \"\"\"\n pkeys = self._pkeys\n if flush:\n pkeys.clear()\n self._do_debug('The pkey cache has been flushed')\n try: # cache lookup\n pkey = pkeys[table]\n except KeyError: # cache miss, check the database\n q = (\"SELECT a.attname, a.attnum, i.indkey\"\n \" FROM pg_catalog.pg_index i\"\n \" JOIN pg_catalog.pg_attribute a\"\n \" ON a.attrelid OPERATOR(pg_catalog.=) i.indrelid\"\n \" AND a.attnum OPERATOR(pg_catalog.=) ANY(i.indkey)\"\n \" AND NOT a.attisdropped\"\n \" WHERE i.indrelid OPERATOR(pg_catalog.=) %s::regclass\"\n \" AND i.indisprimary ORDER BY a.attnum\") % (\n _quote_if_unqualified('$1', table),)\n pkey = self.db.query(q, (table,)).getresult()\n if not pkey:\n raise KeyError('Table %s has no primary key' % table)\n # we want to use the order defined in the primary key index here,\n # not the order as defined by the columns in the table\n if len(pkey) > 1:\n indkey = pkey[0][2]\n pkey = sorted(pkey, key=lambda row: indkey.index(row[1]))\n pkey = tuple(row[0] for row in pkey)\n else:\n pkey = pkey[0][0]\n pkeys[table] = pkey # cache it\n if composite and not isinstance(pkey, tuple):\n pkey = (pkey,)\n return pkey\n\n def get_databases(self):\n \"\"\"Get list of databases in the system.\"\"\"\n return [s[0] for s in self.db.query(\n 'SELECT datname FROM pg_catalog.pg_database').getresult()]\n\n def get_relations(self, kinds=None, system=False):\n \"\"\"Get list of relations in connected database of specified kinds.\n\n If kinds is None or empty, all kinds of relations are returned.\n Otherwise kinds can be a string or sequence of type letters\n specifying which kind of relations you want to list.\n\n Set the system flag if you want to get the system relations as well.\n \"\"\"\n where = []\n if kinds:\n where.append(\"r.relkind IN (%s)\" %\n ','.join(\"'%s'\" % k for k in kinds))\n if not system:\n where.append(\"s.nspname NOT SIMILAR\"\n \" TO 'pg/_%|information/_schema' ESCAPE '/'\")\n where = \" WHERE %s\" % ' AND '.join(where) if where else ''\n q = (\"SELECT pg_catalog.quote_ident(s.nspname) OPERATOR(pg_catalog.||)\"\n \" '.' OPERATOR(pg_catalog.||) pg_catalog.quote_ident(r.relname)\"\n \" FROM pg_catalog.pg_class r\"\n \" JOIN pg_catalog.pg_namespace s\"\n \" ON s.oid OPERATOR(pg_catalog.=) r.relnamespace%s\"\n \" ORDER BY s.nspname, r.relname\") % where\n return [r[0] for r in self.db.query(q).getresult()]\n\n def get_tables(self, system=False):\n \"\"\"Return list of tables in connected database.\n\n Set the system flag if you want to get the system tables as well.\n \"\"\"\n return self.get_relations('r', system)\n\n def get_attnames(self, table, with_oid=True, flush=False):\n \"\"\"Given the name of a table, dig out the set of attribute names.\n\n Returns a read-only dictionary of attribute names (the names are\n the keys, the values are the names of the attributes' types)\n with the column names in the proper order if you iterate over it.\n\n If flush is set, then the internal cache for attribute names will\n be flushed. This may be necessary after the database schema or\n the search path has been changed.\n\n By default, only a limited number of simple types will be returned.\n You can get the registered types after calling use_regtypes(True).\n \"\"\"\n attnames = self._attnames\n if flush:\n attnames.clear()\n self._do_debug('The attnames cache has been flushed')\n try: # cache lookup\n names = attnames[table]\n except KeyError: # cache miss, check the database\n q = \"a.attnum OPERATOR(pg_catalog.>) 0\"\n if with_oid:\n q = \"(%s OR a.attname OPERATOR(pg_catalog.=) 'oid')\" % q\n q = self._query_attnames % (_quote_if_unqualified('$1', table), q)\n names = self.db.query(q, (table,)).getresult()\n types = self.dbtypes\n names = ((name[0], types.add(*name[1:])) for name in names)\n names = AttrDict(names)\n attnames[table] = names # cache it\n return names\n\n def use_regtypes(self, regtypes=None):\n \"\"\"Use registered type names instead of simplified type names.\"\"\"\n if regtypes is None:\n return self.dbtypes._regtypes\n else:\n regtypes = bool(regtypes)\n if regtypes != self.dbtypes._regtypes:\n self.dbtypes._regtypes = regtypes\n self._attnames.clear()\n self.dbtypes.clear()\n return regtypes\n\n def has_table_privilege(self, table, privilege='select', flush=False):\n \"\"\"Check whether current user has specified table privilege.\n\n If flush is set, then the internal cache for table privileges will\n be flushed. This may be necessary after privileges have been changed.\n \"\"\"\n privileges = self._privileges\n if flush:\n privileges.clear()\n self._do_debug('The privileges cache has been flushed')\n privilege = privilege.lower()\n try: # ask cache\n ret = privileges[table, privilege]\n except KeyError: # cache miss, ask the database\n q = \"SELECT pg_catalog.has_table_privilege(%s, $2)\" % (\n _quote_if_unqualified('$1', table),)\n q = self.db.query(q, (table, privilege))\n ret = q.singlescalar() == self._make_bool(True)\n privileges[table, privilege] = ret # cache it\n return ret\n\n def get(self, table, row, keyname=None):\n \"\"\"Get a row from a database table or view.\n\n This method is the basic mechanism to get a single row. It assumes\n that the keyname specifies a unique row. It must be the name of a\n single column or a tuple of column names. If the keyname is not\n specified, then the primary key for the table is used.\n\n If row is a dictionary, then the value for the key is taken from it.\n Otherwise, the row must be a single value or a tuple of values\n corresponding to the passed keyname or primary key. The fetched row\n from the table will be returned as a new dictionary or used to replace\n the existing values when row was passed as a dictionary.\n\n The OID is also put into the dictionary if the table has one, but\n in order to allow the caller to work with multiple tables, it is\n munged as \"oid(table)\" using the actual name of the table.\n \"\"\"\n if table.endswith('*'): # hint for descendant tables can be ignored\n table = table[:-1].rstrip()\n attnames = self.get_attnames(table)\n qoid = _oid_key(table) if 'oid' in attnames else None\n if keyname and isinstance(keyname, basestring):\n keyname = (keyname,)\n if qoid and isinstance(row, dict) and qoid in row and 'oid' not in row:\n row['oid'] = row[qoid]\n if not keyname:\n try: # if keyname is not specified, try using the primary key\n keyname = self.pkey(table, True)\n except KeyError: # the table has no primary key\n # try using the oid instead\n if qoid and isinstance(row, dict) and 'oid' in row:\n keyname = ('oid',)\n else:\n raise _prg_error('Table %s has no primary key' % table)\n else: # the table has a primary key\n # check whether all key columns have values\n if isinstance(row, dict) and not set(keyname).issubset(row):\n # try using the oid instead\n if qoid and 'oid' in row:\n keyname = ('oid',)\n else:\n raise KeyError(\n 'Missing value in row for specified keyname')\n if not isinstance(row, dict):\n if not isinstance(row, (tuple, list)):\n row = [row]\n if len(keyname) != len(row):\n raise KeyError(\n 'Differing number of items in keyname and row')\n row = dict(zip(keyname, row))\n params = self.adapter.parameter_list()\n adapt = params.add\n col = self.escape_identifier\n what = 'oid, *' if qoid else '*'\n where = ' AND '.join('%s OPERATOR(pg_catalog.=) %s' % (\n col(k), adapt(row[k], attnames[k])) for k in keyname)\n if 'oid' in row:\n if qoid:\n row[qoid] = row['oid']\n del row['oid']\n q = 'SELECT %s FROM %s WHERE %s LIMIT 1' % (\n what, self._escape_qualified_name(table), where)\n self._do_debug(q, params)\n q = self.db.query(q, params)\n res = q.dictresult()\n if not res:\n # make where clause in error message better readable\n where = where.replace('OPERATOR(pg_catalog.=)', '=')\n raise _db_error('No such record in %s\\nwhere %s\\nwith %s' % (\n table, where, self._list_params(params)))\n for n, value in res[0].items():\n if qoid and n == 'oid':\n n = qoid\n row[n] = value\n return row\n\n def insert(self, table, row=None, **kw):\n \"\"\"Insert a row into a database table.\n\n This method inserts a row into a table. The name of the table must\n be passed as the first parameter. The other parameters are used for\n providing the data of the row that shall be inserted into the table.\n If a dictionary is supplied as the second parameter, it starts with\n that. Otherwise it uses a blank dictionary. Either way the dictionary\n is updated from the keywords.\n\n The dictionary is then reloaded with the values actually inserted in\n order to pick up values modified by rules, triggers, etc.\n \"\"\"\n if table.endswith('*'): # hint for descendant tables can be ignored\n table = table[:-1].rstrip()\n if row is None:\n row = {}\n row.update(kw)\n if 'oid' in row:\n del row['oid'] # do not insert oid\n attnames = self.get_attnames(table)\n qoid = _oid_key(table) if 'oid' in attnames else None\n params = self.adapter.parameter_list()\n adapt = params.add\n col = self.escape_identifier\n names, values = [], []\n for n in attnames:\n if n in row:\n names.append(col(n))\n values.append(adapt(row[n], attnames[n]))\n if not names:\n raise _prg_error('No column found that can be inserted')\n names, values = ', '.join(names), ', '.join(values)\n ret = 'oid, *' if qoid else '*'\n q = 'INSERT INTO %s (%s) VALUES (%s) RETURNING %s' % (\n self._escape_qualified_name(table), names, values, ret)\n self._do_debug(q, params)\n q = self.db.query(q, params)\n res = q.dictresult()\n if res: # this should always be true\n for n, value in res[0].items():\n if qoid and n == 'oid':\n n = qoid\n row[n] = value\n return row\n\n def update(self, table, row=None, **kw):\n \"\"\"Update an existing row in a database table.\n\n Similar to insert, but updates an existing row. The update is based\n on the primary key of the table or the OID value as munged by get()\n or passed as keyword. The OID will take precedence if provided, so\n that it is possible to update the primary key itself.\n\n The dictionary is then modified to reflect any changes caused by the\n update due to triggers, rules, default values, etc.\n \"\"\"\n if table.endswith('*'):\n table = table[:-1].rstrip() # need parent table name\n attnames = self.get_attnames(table)\n qoid = _oid_key(table) if 'oid' in attnames else None\n if row is None:\n row = {}\n elif 'oid' in row:\n del row['oid'] # only accept oid key from named args for safety\n row.update(kw)\n if qoid and qoid in row and 'oid' not in row:\n row['oid'] = row[qoid]\n if qoid and 'oid' in row: # try using the oid\n keyname = ('oid',)\n else: # try using the primary key\n try:\n keyname = self.pkey(table, True)\n except KeyError: # the table has no primary key\n raise _prg_error('Table %s has no primary key' % table)\n # check whether all key columns have values\n if not set(keyname).issubset(row):\n raise KeyError('Missing value for primary key in row')\n params = self.adapter.parameter_list()\n adapt = params.add\n col = self.escape_identifier\n where = ' AND '.join('%s OPERATOR(pg_catalog.=) %s' % (\n col(k), adapt(row[k], attnames[k])) for k in keyname)\n if 'oid' in row:\n if qoid:\n row[qoid] = row['oid']\n del row['oid']\n values = []\n keyname = set(keyname)\n for n in attnames:\n if n in row and n not in keyname:\n values.append('%s = %s' % (col(n), adapt(row[n], attnames[n])))\n if not values:\n return row\n values = ', '.join(values)\n ret = 'oid, *' if qoid else '*'\n q = 'UPDATE %s SET %s WHERE %s RETURNING %s' % (\n self._escape_qualified_name(table), values, where, ret)\n self._do_debug(q, params)\n q = self.db.query(q, params)\n res = q.dictresult()\n if res: # may be empty when row does not exist\n for n, value in res[0].items():\n if qoid and n == 'oid':\n n = qoid\n row[n] = value\n return row\n\n def upsert(self, table, row=None, **kw):\n \"\"\"Insert a row into a database table with conflict resolution\n\n This method inserts a row into a table, but instead of raising a\n ProgrammingError exception in case a row with the same primary key\n already exists, an update will be executed instead. This will be\n performed as a single atomic operation on the database, so race\n conditions can be avoided.\n\n Like the insert method, the first parameter is the name of the\n table and the second parameter can be used to pass the values to\n be inserted as a dictionary.\n\n Unlike the insert und update statement, keyword parameters are not\n used to modify the dictionary, but to specify which columns shall\n be updated in case of a conflict, and in which way:\n\n A value of False or None means the column shall not be updated,\n a value of True means the column shall be updated with the value\n that has been proposed for insertion, i.e. has been passed as value\n in the dictionary. Columns that are not specified by keywords but\n appear as keys in the dictionary are also updated like in the case\n keywords had been passed with the value True.\n\n So if in the case of a conflict you want to update every column\n that has been passed in the dictionary row, you would call\n upsert(table, row). If you don't want to do anything in case\n of a conflict, i.e. leave the existing row as it is, call\n upsert(table, row, **dict.fromkeys(row)).\n\n If you need more fine-grained control of what gets updated, you can\n also pass strings in the keyword parameters. These strings will\n be used as SQL expressions for the update columns. In these\n expressions you can refer to the value that already exists in\n the table by prefixing the column name with \"included.\", and to\n the value that has been proposed for insertion by prefixing the\n column name with the \"excluded.\"\n\n The dictionary is modified in any case to reflect the values in\n the database after the operation has completed.\n\n Note: The method uses the PostgreSQL \"upsert\" feature which is\n only available since PostgreSQL 9.5.\n \"\"\"\n if table.endswith('*'): # hint for descendant tables can be ignored\n table = table[:-1].rstrip()\n if row is None:\n row = {}\n if 'oid' in row:\n del row['oid'] # do not insert oid\n if 'oid' in kw:\n del kw['oid'] # do not update oid\n attnames = self.get_attnames(table)\n qoid = _oid_key(table) if 'oid' in attnames else None\n params = self.adapter.parameter_list()\n adapt = params.add\n col = self.escape_identifier\n names, values = [], []\n for n in attnames:\n if n in row:\n names.append(col(n))\n values.append(adapt(row[n], attnames[n]))\n names, values = ', '.join(names), ', '.join(values)\n try:\n keyname = self.pkey(table, True)\n except KeyError:\n raise _prg_error('Table %s has no primary key' % table)\n target = ', '.join(col(k) for k in keyname)\n update = []\n keyname = set(keyname)\n keyname.add('oid')\n for n in attnames:\n if n not in keyname:\n value = kw.get(n, True)\n if value:\n if not isinstance(value, basestring):\n value = 'excluded.%s' % col(n)\n update.append('%s = %s' % (col(n), value))\n if not values:\n return row\n do = 'update set %s' % ', '.join(update) if update else 'nothing'\n ret = 'oid, *' if qoid else '*'\n q = ('INSERT INTO %s AS included (%s) VALUES (%s)'\n ' ON CONFLICT (%s) DO %s RETURNING %s') % (\n self._escape_qualified_name(table), names, values, target, do, ret)\n self._do_debug(q, params)\n try:\n q = self.db.query(q, params)\n except ProgrammingError:\n if self.server_version < 90500:\n raise _prg_error(\n 'Upsert operation is not supported by PostgreSQL version')\n raise # re-raise original error\n res = q.dictresult()\n if res: # may be empty with \"do nothing\"\n for n, value in res[0].items():\n if qoid and n == 'oid':\n n = qoid\n row[n] = value\n else:\n self.get(table, row)\n return row\n\n def clear(self, table, row=None):\n \"\"\"Clear all the attributes to values determined by the types.\n\n Numeric types are set to 0, Booleans are set to false, and everything\n else is set to the empty string. If the row argument is present,\n it is used as the row dictionary and any entries matching attribute\n names are cleared with everything else left unchanged.\n \"\"\"\n # At some point we will need a way to get defaults from a table.\n if row is None:\n row = {} # empty if argument is not present\n attnames = self.get_attnames(table)\n for n, t in attnames.items():\n if n == 'oid':\n continue\n t = t.simple\n if t in DbTypes._num_types:\n row[n] = 0\n elif t == 'bool':\n row[n] = self._make_bool(False)\n else:\n row[n] = ''\n return row\n\n def delete(self, table, row=None, **kw):\n \"\"\"Delete an existing row in a database table.\n\n This method deletes the row from a table. It deletes based on the\n primary key of the table or the OID value as munged by get() or\n passed as keyword. The OID will take precedence if provided.\n\n The return value is the number of deleted rows (i.e. 0 if the row\n did not exist and 1 if the row was deleted).\n\n Note that if the row cannot be deleted because e.g. it is still\n referenced by another table, this method raises a ProgrammingError.\n \"\"\"\n if table.endswith('*'): # hint for descendant tables can be ignored\n table = table[:-1].rstrip()\n attnames = self.get_attnames(table)\n qoid = _oid_key(table) if 'oid' in attnames else None\n if row is None:\n row = {}\n elif 'oid' in row:\n del row['oid'] # only accept oid key from named args for safety\n row.update(kw)\n if qoid and qoid in row and 'oid' not in row:\n row['oid'] = row[qoid]\n if qoid and 'oid' in row: # try using the oid\n keyname = ('oid',)\n else: # try using the primary key\n try:\n keyname = self.pkey(table, True)\n except KeyError: # the table has no primary key\n raise _prg_error('Table %s has no primary key' % table)\n # check whether all key columns have values\n if not set(keyname).issubset(row):\n raise KeyError('Missing value for primary key in row')\n params = self.adapter.parameter_list()\n adapt = params.add\n col = self.escape_identifier\n where = ' AND '.join('%s OPERATOR(pg_catalog.=) %s' % (\n col(k), adapt(row[k], attnames[k])) for k in keyname)\n if 'oid' in row:\n if qoid:\n row[qoid] = row['oid']\n del row['oid']\n q = 'DELETE FROM %s WHERE %s' % (\n self._escape_qualified_name(table), where)\n self._do_debug(q, params)\n res = self.db.query(q, params)\n return int(res)\n\n def truncate(self, table, restart=False, cascade=False, only=False):\n \"\"\"Empty a table or set of tables.\n\n This method quickly removes all rows from the given table or set\n of tables. It has the same effect as an unqualified DELETE on each\n table, but since it does not actually scan the tables it is faster.\n Furthermore, it reclaims disk space immediately, rather than requiring\n a subsequent VACUUM operation. This is most useful on large tables.\n\n If restart is set to True, sequences owned by columns of the truncated\n table(s) are automatically restarted. If cascade is set to True, it\n also truncates all tables that have foreign-key references to any of\n the named tables. If the parameter only is not set to True, all the\n descendant tables (if any) will also be truncated. Optionally, a '*'\n can be specified after the table name to explicitly indicate that\n descendant tables are included.\n \"\"\"\n if isinstance(table, basestring):\n only = {table: only}\n table = [table]\n elif isinstance(table, (list, tuple)):\n if isinstance(only, (list, tuple)):\n only = dict(zip(table, only))\n else:\n only = dict.fromkeys(table, only)\n elif isinstance(table, (set, frozenset)):\n only = dict.fromkeys(table, only)\n else:\n raise TypeError('The table must be a string, list or set')\n if not (restart is None or isinstance(restart, (bool, int))):\n raise TypeError('Invalid type for the restart option')\n if not (cascade is None or isinstance(cascade, (bool, int))):\n raise TypeError('Invalid type for the cascade option')\n tables = []\n for t in table:\n u = only.get(t)\n if not (u is None or isinstance(u, (bool, int))):\n raise TypeError('Invalid type for the only option')\n if t.endswith('*'):\n if u:\n raise ValueError(\n 'Contradictory table name and only options')\n t = t[:-1].rstrip()\n t = self._escape_qualified_name(t)\n if u:\n t = 'ONLY %s' % t\n tables.append(t)\n q = ['TRUNCATE', ', '.join(tables)]\n if restart:\n q.append('RESTART IDENTITY')\n if cascade:\n q.append('CASCADE')\n q = ' '.join(q)\n self._do_debug(q)\n return self.db.query(q)\n\n def get_as_list(self, table, what=None, where=None,\n order=None, limit=None, offset=None, scalar=False):\n \"\"\"Get a table as a list.\n\n This gets a convenient representation of the table as a list\n of named tuples in Python. You only need to pass the name of\n the table (or any other SQL expression returning rows). Note that\n by default this will return the full content of the table which\n can be huge and overflow your memory. However, you can control\n the amount of data returned using the other optional parameters.\n\n The parameter 'what' can restrict the query to only return a\n subset of the table columns. It can be a string, list or a tuple.\n The parameter 'where' can restrict the query to only return a\n subset of the table rows. It can be a string, list or a tuple\n of SQL expressions that all need to be fulfilled. The parameter\n 'order' specifies the ordering of the rows. It can also be a\n other string, list or a tuple. If no ordering is specified,\n the result will be ordered by the primary key(s) or all columns\n if no primary key exists. You can set 'order' to False if you\n don't care about the ordering. The parameters 'limit' and 'offset'\n can be integers specifying the maximum number of rows returned\n and a number of rows skipped over.\n\n If you set the 'scalar' option to True, then instead of the\n named tuples you will get the first items of these tuples.\n This is useful if the result has only one column anyway.\n \"\"\"\n if not table:\n raise TypeError('The table name is missing')\n if what:\n if isinstance(what, (list, tuple)):\n what = ', '.join(map(str, what))\n if order is None:\n order = what\n else:\n what = '*'\n q = ['SELECT', what, 'FROM', table]\n if where:\n if isinstance(where, (list, tuple)):\n where = ' AND '.join(map(str, where))\n q.extend(['WHERE', where])\n if order is None:\n try:\n order = self.pkey(table, True)\n except (KeyError, ProgrammingError):\n try:\n order = list(self.get_attnames(table))\n except (KeyError, ProgrammingError):\n pass\n if order:\n if isinstance(order, (list, tuple)):\n order = ', '.join(map(str, order))\n q.extend(['ORDER BY', order])\n if limit:\n q.append('LIMIT %d' % limit)\n if offset:\n q.append('OFFSET %d' % offset)\n q = ' '.join(q)\n self._do_debug(q)\n q = self.db.query(q)\n res = q.namedresult()\n if res and scalar:\n res = [row[0] for row in res]\n return res\n\n def get_as_dict(self, table, keyname=None, what=None, where=None,\n order=None, limit=None, offset=None, scalar=False):\n \"\"\"Get a table as a dictionary.\n\n This method is similar to get_as_list(), but returns the table\n as a Python dict instead of a Python list, which can be even\n more convenient. The primary key column(s) of the table will\n be used as the keys of the dictionary, while the other column(s)\n will be the corresponding values. The keys will be named tuples\n if the table has a composite primary key. The rows will be also\n named tuples unless the 'scalar' option has been set to True.\n With the optional parameter 'keyname' you can specify an alternative\n set of columns to be used as the keys of the dictionary. It must\n be set as a string, list or a tuple.\n\n If the Python version supports it, the dictionary will be an\n OrderedDict using the order specified with the 'order' parameter\n or the key column(s) if not specified. You can set 'order' to False\n if you don't care about the ordering. In this case the returned\n dictionary will be an ordinary one.\n \"\"\"\n if not table:\n raise TypeError('The table name is missing')\n if not keyname:\n try:\n keyname = self.pkey(table, True)\n except (KeyError, ProgrammingError):\n raise _prg_error('Table %s has no primary key' % table)\n if isinstance(keyname, basestring):\n keyname = [keyname]\n elif not isinstance(keyname, (list, tuple)):\n raise KeyError('The keyname must be a string, list or tuple')\n if what:\n if isinstance(what, (list, tuple)):\n what = ', '.join(map(str, what))\n if order is None:\n order = what\n else:\n what = '*'\n q = ['SELECT', what, 'FROM', table]\n if where:\n if isinstance(where, (list, tuple)):\n where = ' AND '.join(map(str, where))\n q.extend(['WHERE', where])\n if order is None:\n order = keyname\n if order:\n if isinstance(order, (list, tuple)):\n order = ', '.join(map(str, order))\n q.extend(['ORDER BY', order])\n if limit:\n q.append('LIMIT %d' % limit)\n if offset:\n q.append('OFFSET %d' % offset)\n q = ' '.join(q)\n self._do_debug(q)\n q = self.db.query(q)\n res = q.getresult()\n cls = OrderedDict if order else dict\n if not res:\n return cls()\n keyset = set(keyname)\n fields = q.listfields()\n if not keyset.issubset(fields):\n raise KeyError('Missing keyname in row')\n keyind, rowind = [], []\n for i, f in enumerate(fields):\n (keyind if f in keyset else rowind).append(i)\n keytuple = len(keyind) > 1\n getkey = itemgetter(*keyind)\n keys = map(getkey, res)\n if scalar:\n rowind = rowind[:1]\n rowtuple = False\n else:\n rowtuple = len(rowind) > 1\n if scalar or rowtuple:\n getrow = itemgetter(*rowind)\n else:\n rowind = rowind[0]\n\n def getrow(row):\n return row[rowind], # tuple with one item\n\n rowtuple = True\n rows = map(getrow, res)\n if keytuple or rowtuple:\n if keytuple:\n keys = _namediter(_MemoryQuery(keys, keyname))\n if rowtuple:\n fields = [f for f in fields if f not in keyset]\n rows = _namediter(_MemoryQuery(rows, fields))\n # noinspection PyArgumentList\n return cls(zip(keys, rows))\n\n def notification_handler(self, event, callback,\n arg_dict=None, timeout=None, stop_event=None):\n \"\"\"Get notification handler that will run the given callback.\"\"\"\n return NotificationHandler(self, event, callback,\n arg_dict, timeout, stop_event)\n\n\n# if run as script, print some information\n\nif __name__ == '__main__':\n print('PyGreSQL version' + version)\n print('')\n print(__doc__)\n","sub_path":"pg.py","file_name":"pg.py","file_ext":"py","file_size_in_byte":104609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"557891837","text":"from .utils import getitem_or_default\n\nclass Starbase:\n def __init__(self, id, value):\n self.id = id\n self.level = getitem_or_default(value, \"level\", None)\n\n\n\n # relationships\n\n self.owner = None\n self.system = None\n\n self.owner_id = value[\"owner\"]\n self.system_id = value[\"system\"]\n\n def __str__(self):\n return f\"Starbase({self.id})\"","sub_path":"stellaris/model/starbase.py","file_name":"starbase.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"109442327","text":"#load qutip and matplotlib\nfrom qutip import *\nfrom pylab import *\n\ndef run():\n # define parameters\n N=4 # number of basis states to consider\n kappa=1.0/0.129 # coupling to heat bath\n nth= 0.063 # temperature with =0.063\n\n # create operators and initial |1> state\n a=destroy(N) # cavity destruction operator\n H=a.dag()*a # harmonic oscillator Hamiltonian\n psi0=basis(N,1) # initial Fock state with one photon\n\n # collapse operators\n c_op_list = []\n # decay operator\n c_op_list.append(sqrt(kappa * (1 + nth)) * a)\n # excitation operator\n c_op_list.append(sqrt(kappa * nth) * a.dag())\n\n # run monte carlo simulation\n ntraj=[1,5,15,904] # list of number of trajectories to avg. over\n tlist=linspace(0,0.6,100)\n mc = mcsolve(H,psi0,tlist,c_op_list,[a.dag()*a],ntraj)\n # get expectation values from mc data (need extra index since ntraj is list)\n ex1=mc.expect[0][0] #for ntraj=1\n ex5=mc.expect[1][0] #for ntraj=5\n ex15=mc.expect[2][0] #for ntraj=15\n ex904=mc.expect[3][0] #for ntraj=904\n\n ## run master equation to get ensemble average expectation values ## \n me = mesolve(H,psi0,tlist,c_op_list, [a.dag()*a])\n\n # calulate final state using steadystate solver\n final_state=steadystate(H,c_op_list) # find steady-state\n fexpt=expect(a.dag()*a,final_state) # find expectation value for particle number\n\n #\n # plot results using vertically stacked plots\n #\n \n # set legend fontsize\n import matplotlib.font_manager\n leg_prop = matplotlib.font_manager.FontProperties(size=10)\n \n f = figure(figsize=(6,9))\n subplots_adjust(hspace=0.001) #no space between plots\n \n # subplot 1 (top)\n ax1 = subplot(411)\n ax1.plot(tlist,ex1,'b',lw=2)\n ax1.axhline(y=fexpt,color='k',lw=1.5)\n yticks(linspace(0,2,5))\n ylim([-0.1,1.5])\n ylabel('$\\left< N \\\\right>$',fontsize=14)\n title(\"Ensemble Averaging of Monte Carlo Trajectories\")\n legend(('Single trajectory','steady state'),prop=leg_prop)\n \n # subplot 2\n ax2=subplot(412,sharex=ax1) #share x-axis of subplot 1\n ax2.plot(tlist,ex5,'b',lw=2)\n ax2.axhline(y=fexpt,color='k',lw=1.5)\n yticks(linspace(0,2,5))\n ylim([-0.1,1.5])\n ylabel('$\\left< N \\\\right>$',fontsize=14)\n legend(('5 trajectories','steadystate'),prop=leg_prop)\n \n # subplot 3\n ax3=subplot(413,sharex=ax1) #share x-axis of subplot 1\n ax3.plot(tlist,ex15,'b',lw=2)\n ax3.plot(tlist,me.expect[0],'r--',lw=1.5)\n ax3.axhline(y=fexpt,color='k',lw=1.5)\n yticks(linspace(0,2,5))\n ylim([-0.1,1.5])\n ylabel('$\\left< N \\\\right>$',fontsize=14)\n legend(('15 trajectories','master equation','steady state'),prop=leg_prop)\n \n # subplot 4 (bottom)\n ax4=subplot(414,sharex=ax1) #share x-axis of subplot 1\n ax4.plot(tlist,ex904,'b',lw=2)\n ax4.plot(tlist,me.expect[0],'r--',lw=1.5)\n ax4.axhline(y=fexpt,color='k',lw=1.5)\n yticks(linspace(0,2,5))\n ylim([-0.1,1.5])\n ylabel('$\\left< N \\\\right>$',fontsize=14)\n legend(('904 trajectories','master equation','steady state'),prop=leg_prop)\n \n #remove x-axis tick marks from top 3 subplots\n xticklabels = ax1.get_xticklabels()+ax2.get_xticklabels()+ax3.get_xticklabels()\n setp(xticklabels, visible=False)\n \n ax1.xaxis.set_major_locator(MaxNLocator(4))\n xlabel('Time (sec)',fontsize=14)\n show()\n\n\nif __name__==\"__main__\":\n run()\n","sub_path":"doc/examples/mc/ex_32.py","file_name":"ex_32.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"556366616","text":"import tensorflow as tf\n\nclass MultilayerPerceptron:\n def __init__(self, n_input, n_hidden_1, n_hidden_2, n_classes):\n self.n_input = n_input\n self.n_hidden_1 = n_hidden_1\n self.n_hidden_2 = n_hidden_2\n self.n_classes = n_classes\n # Store layers weight & bias\n self.weights = {\n 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))\n }\n self.biases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n }\n self.input_ph = tf.placeholder(tf.float32, shape=(None, n_input))\n\n # Create model\n # Hidden layer with RELU activation\n self.layer_1 = tf.add(tf.matmul(self.input_ph, self.weights['h1']), self.biases['b1'])\n self.layer_1 = tf.nn.relu(self.layer_1)\n # Hidden layer with RELU activation\n self.layer_2 = tf.add(tf.matmul(self.layer_1, self.weights['h2']), self.biases['b2'])\n self.layer_2 = tf.nn.relu(self.layer_2)\n # Output layer with linear activation\n self.out_layer = tf.matmul(self.layer_2, self.weights['out']) + self.biases['out']\n","sub_path":"camera-filter/src/lib/multilayer_perceptron.py","file_name":"multilayer_perceptron.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"257899682","text":"def Sequence(n=2, initialSequence=[0, 1]):\n \"\"\"\n Create a Fibonacci-style sequence up to the nth entry, based on an initial\n two-value sequeunce, where\n\n x_n = x_{n-1} + x_{n-2}\n\n Parameter\n ---------\n n: int\n The entry up to which to generate the sequence (n > 0, i.e., n=1 is the\n first entry, etc). Defaults to 2, the length of the initial sequence.\n initialSequence: list\n A list containing the first two values of the sequence, defaulting to\n [0, 1].\n\n Returns\n -------\n sequence: list\n A list containing the full sequence.\n \"\"\"\n\n if not len(initialSequence) > 1:\n raise ValueError(\"list needs to be at least two elements long to form a sequence\")\n\n if n < 2:\n raise ValueError(\"n must be the same or larger than the length of the initial sequence\")\n\n # copy and store the initialSequence\n sequence = list(initialSequence)\n\n # extend and fill in the sequence...\n for i in range(len(initialSequence), n):\n sequence.append(sequence[i - 1] + sequence[i - 2])\n\n return sequence\n\n\nn=100\nseq = Sequence(n=n)\nprint(seq)\n","sub_path":"quiz_2_2.py","file_name":"quiz_2_2.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"406751770","text":"from django.db import models\nfrom login_signup import models as _\nfrom django.contrib.postgres.fields import ArrayField\nfrom django.urls import reverse\nimport boto3\nfrom . import aws\nec2r = boto3.resource('ec2')\n# Create your models here.\n\nclass Server(models.Model):\n aws_id = models.CharField(max_length=20, blank= True)\n ip = models.CharField(max_length=20, blank= False)\n port = models.IntegerField(null=False , default= 27015)\n is_assigned = models.BooleanField(default=False)\n \n\n def __str__(self):\n return f'{self.ip}:{self.port}'\n\n \n def state(self):\n return aws.ServerState(self.aws_id)\n\n\n def Startinstance(self):\n instance = aws.StartAserver(self.aws_id)\n return instance\n\n \n def Stopinstance(self):\n instance = aws.StopAserver(self.aws_id)\n return instance\n\n\n def LaunchCS(self):\n return aws.Start_cs(self.ip)\n\n \n def StopCS(self):\n return aws.Stop_cs(self.ip)\n\n \n def RestartCs(self):\n return aws.Restart_cs(self.ip)\n\n\n def SetIp(self):\n instance = ec2r.Instance(self.aws_id)\n self.ip = instance.public_ip_address\n self.save()\n \n return self.ip\n\n\n def Csstatus(self):\n if self.state() == 'running':\n return aws.Csstatus(self.ip)\n\n\n def get_absolute_url(self):\n \n return reverse(\"server\", kwargs= {'pk': self.pk})\n\n\n def terminate(self):\n return aws.Terminate(self.aws_id)\n\n\n\nclass Team(models.Model):\n name = models.CharField(max_length=50, null=False)\n tag = models.CharField(max_length=10, blank=True, null=True)\n logo = models.ImageField(upload_to=\"logos\", blank=True, null=True)\n players = models.ManyToManyField(_.NewUser)\n team_description = models.CharField(max_length=150 , blank= True, null=True)\n player1 = models.CharField(max_length = 150, blank=True)\n player2 = models.CharField(max_length = 150, blank=True)\n player3 = models.CharField(max_length = 150, blank=True)\n player4 = models.CharField(max_length = 150, blank=True)\n player5 = models.CharField(max_length = 150, blank=True)\n\n\n\n def __str__(self):\n return self.name\n\n\n def get_absolute_url(self):\n print(reverse(\"team\", kwargs= {'pk': self.pk}))\n return reverse(\"team\", kwargs= {'pk': self.pk})\n\n\nclass Tournament(models.Model):\n name = models.CharField(max_length=50, blank=False, null=False)\n description = models.TextField(default = '')\n registration_starts = models.DateField(blank=False,null=False)\n registration_ends = models.DateField(blank=False,null=False)\n tournament_starts = models.DateField(blank=False,null=False)\n tournament_ends = models.DateField(blank=False,null=False)\n teams_left = models.ManyToManyField(Team,related_name= 'teams_left')\n teams = models.ManyToManyField(Team,related_name= 'tournament_teams')\n banner = models.ImageField(upload_to=\"tournament_banners\")\n logo = models.ImageField(upload_to=\"tournament_logo\",default='../static/images/logo.png')\n organizer = models.ForeignKey(_.NewUser,on_delete=models.CASCADE)\n prizepool = models.IntegerField(blank=False,null=False)\n game = models.CharField(max_length=10, blank=True, null=True)\n contact_email = models.EmailField(max_length=254)\n contact_no = models.IntegerField(blank=False,null=True)\n sponsored_by = models.CharField(max_length=50, blank=True, null=True)\n is_feat = models.BooleanField(default=False)\n servers = models.ManyToManyField(Server)\n winner = models.ForeignKey(Team, blank= True,null=True,on_delete=models.CASCADE,related_name='tournament_winner')\n \n\n\n def __str__(self):\n return self.name\n \n\n def get_absolute_url(self):\n print(reverse('tournament', kwargs= {'pk': self.pk}))\n return reverse('tournament', kwargs= {'pk': self.pk})\n\n\n\n\nclass Match(models.Model):\n \n\n server = models.ForeignKey(Server, on_delete= models.CASCADE, blank=True, null=True)\n\n \n stage = models.CharField(max_length = 50,blank=True, null=True)\n uuid = models.CharField(default='uuid4', max_length=100)\n \n team_1 = models.ForeignKey(\n Team, on_delete=models.SET_NULL, blank=True, null=True, related_name=\"team_1\"\n )\n team_2 = models.ForeignKey(\n Team, on_delete=models.SET_NULL, blank=True, null=True, related_name=\"team_2\"\n )\n \n winner = models.ForeignKey(\n Team, on_delete=models.SET_NULL, blank=True, null=True, related_name=\"winner\"\n )\n status = models.CharField(max_length=50, default=\"Pending\")\n start_time = models.DateTimeField(blank=True, null=True)\n end_time = models.DateTimeField(blank=True, null=True)\n skip_veto = models.BooleanField(default=True)\n team_1_score = models.IntegerField(default=0)\n team_2_score = models.IntegerField(default=0)\n veto_mappool = models.CharField(max_length=500, blank=True, null=True)\n match_map = models.CharField(max_length=20, blank=True, default='Default Dust2')\n max_maps = models.IntegerField(default=1)\n tournament = models.ManyToManyField(Tournament)\n\n class Meta:\n verbose_name = (\"Match\")\n verbose_name_plural = (\"Matches\")\n\n def teams(self):\n t1 = self.team_1\n\n t2 = self.team_2\n return f\"{t1} vs {t2}\"\n\n \n def setwinner(self,team):\n self.winner = team\n self.status = \"DONE\"\n self.save()\n\n \n @property\n def match_config(self) -> dict:\n t1_info = self.team_1.team_information if self.team_1 else {}\n t2_info = self.team_2.team_information if self.team_2 else {}\n return {\n \"match_id\": self.uuid,\n \"num_maps\": 1,\n \"maplist\": [{\"de_dust2\": \"\"}],\n \"skip_veto\": self.skip_veto,\n \"veto_first\": \"team1\",\n \"side_type\": \"always_knife\",\n \"players_per_team\": 5,\n \"min_players_to_ready\": 1,\n \"team1\": t1_info,\n \"team2\": t2_info,\n \"cvars\": {\n \"hostname\": f\"Match - {t1_info.get('name')} vs {t2_info.get('name')}\"\n },\n }\n \n def get_absolute_url(self):\n \n \n return reverse('home')\n\n","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"334014995","text":"\"\"\"\n@author Wildo Monges\nThis program runs the classifier using naive bayes algorithm to classify\na group of files by topics\nNote:\n Python 3.5\n Execute from console: python classifier.py\nResult:\n K = 0 Corrects: 242 Random: 86\n K = 1 Corrects: 242 Random: 104\n K = 2 Corrects: 242 Random: 100\n K = 3 Corrects: 242 Random: 87\n K = 4 Corrects: 242 Random: 115\n K = 5 Corrects: 242 Random: 102\n K = 6 Corrects: 242 Random: 104\n K = 7 Corrects: 242 Random: 103\n K = 8 Corrects: 242 Random: 90\n K = 9 Corrects: 242 Random: 103\nConclusion:\n The classifier works better than a random classifier\n\"\"\"\nfrom os import listdir\nfrom os.path import isfile, join\nimport naive_bayes_classifier.naive_bayes as naive_bayes\nimport random\n\nLIMIT_K = 10\n\ndirs = ('comp.os.ms-windows.misc', 'rec.sport.baseball', 'talk.politics.misc')\n\n# Store the classifier model\nmodel = None\nk = 0\nmax_corrects = 0\nfor K in range(LIMIT_K):\n\n # Train phase\n # For each class (d is the directory that represent a class)\n for d in dirs:\n # Get all names from the files\n path = join('data', 'train', d)\n files = [join(path, f) for f in listdir(path) if isfile(join(path, f))]\n\n # train a model\n model = naive_bayes.train(model, files, d, K)\n\n # Cross validation phase\n corrects = 0\n random_corrects = 0\n for d in dirs:\n # Get file names\n path = join('data', 'validation', d)\n files = [join(path, f) for f in listdir(path) if isfile(join(path, f))]\n\n # For each file\n for fn in files:\n # Test classifier\n class_label = naive_bayes.classify(model, fn, K)\n if class_label == d:\n corrects += 1\n\n # Random test to compare\n random_class = dirs[random.randint(0, len(dirs) - 1)]\n if random_class == d:\n random_corrects += 1\n\n print('K = ', K, ' Corrects: ', corrects, ' Random: ', random_corrects)\n\n if corrects > max_corrects:\n k = K\n\n","sub_path":"naive_bayes_classifier/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"392800215","text":"from math import log\nimport csv\n\n\ndef calc_entropy(data):\n samples = len(data)\n labels = {}\n for rec in data:\n label = rec[-1]\n if label not in labels .keys():\n labels[label] = 0\n labels[label] += 1\n entropy = 0.0\n for key in labels:\n prob = float(labels[key])/samples\n entropy -= prob * log(prob, 4)\n return entropy\n\n\ndef attribute_selection(data):\n features = len(data[0]) - 1\n base_entropy = calc_entropy(data)\n max_info_gain = 0.0;\n best_attr = -1\n for i in range(features):\n attr_list = [rec[i] for rec in data]\n unique_vals = set(attr_list)\n new_entropy = 0.0\n attr_entropy = 0.0\n for value in unique_vals:\n new_data = dataset_split(data, i, value)\n prob = len(new_data)/float(len(data))\n new_entropy = prob * calc_entropy(new_data)\n attr_entropy += new_entropy\n info_gain = base_entropy - attr_entropy\n if (info_gain > max_info_gain):\n max_info_gain = info_gain\n best_attr = i\n entropy = attr_entropy\n return best_attr,entropy\n\n\ndef dataset_split(data, arc, val):\n split_data = []\n for rec in data:\n if rec[arc] == val:\n reduced_set = list(rec[:arc])\n reduced_set.extend(rec[arc+1:])\n split_data.append(reduced_set)\n return split_data\n\n# Function to build the decision tree\ndef main(data, labels) :\n # list variable to store the class-labels (terminal nodes of decision tree)\n classList = [rec[-1] for rec in data]\n if classList.count(classList[0]) == len(classList):\n return classList[0]\n # functional call to identify the attribute for split\n maxGainNode, entropy = attribute_selection(data)\n # variable to store the class label value\n treeLabel = labels[maxGainNode]\n # dict object to represent the nodes in the decision tree\n theTree = {treeLabel:{}}\n del(labels[maxGainNode])\n # get the unique values of the attribute identified\n nodeValues = [rec[maxGainNode] for rec in data]\n uniqueVals = set(nodeValues)\n for value in uniqueVals:\n subLabels = labels[:]\n # update the non-terminal node values of the decision tree\n data_set1 = dataset_split(data,maxGainNode,value)\n entropy1 = calc_entropy(data_set1)\n print(treeLabel,value,entropy)\n theTree[treeLabel][value] = main(dataset_split(data, maxGainNode, value),subLabels)\n #return the decision tree (dict object)\n return theTree\n\ndef readcsv():\n data = list(csv.reader(open('car.csv')))\n return data\n\nimport pprint\nlabels = ['att0','att1','att2','att3','att4','att5']\ndata=readcsv()\npprint.pprint(main(data,labels))","sub_path":"id3 decision trees/decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"496117371","text":"\"\"\"\n This takes the feature engineered data in feature_data.csv as input and\n applies a standard machine learning framework. Output is prediction.csv.\n\"\"\"\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.utils.np_utils import to_categorical\nfrom keras.initializers import TruncatedNormal\nfrom keras.layers import Dropout\nfrom keras.optimizers import Adam\nfrom numpy import loadtxt, savetxt, column_stack\nfrom sklearn.metrics import accuracy_score\nfrom argparse import ArgumentParser\n\nparser = ArgumentParser()\nparser.add_argument('--train', dest='train_set', help='The path to the training set.')\nparser.add_argument('--test', dest='test_set', help='The path to the test set.')\nparser.add_argument('--validate', dest='validation_set',\n help='The path to the validation set.', action='store', default=None)\nparser.add_argument('--save', dest='save',\n help='Save the resulting model.', action='store', type=bool, default=False)\n\nparse = parser.parse_args()\n\n# Load and split data sets.\ntrain_set = loadtxt(parse.train_set, delimiter=',')\nX_train = train_set[:, :-1]\nY_train = train_set[:, -1]\n\ntest_set = loadtxt(parse.test_set, delimiter=',')\nX_test = test_set[:, :-1]\nY_test = test_set[:, -1]\n\nif parse.validation_set is not None:\n validation_set = loadtxt(parse.validation_set, delimiter=',')\n X_validate = validation_set[:, :-1]\n Y_validate = validation_set[:, -1]\n\n# building the model (simple neural network) in keras\nmodel = Sequential()\nmodel.add(Dense(units=24, activation='sigmoid', input_dim=X_train[0].size,\n kernel_initializer=TruncatedNormal(stddev=0.15)))\nmodel.add(Dense(units=24*3, activation='sigmoid', kernel_initializer=TruncatedNormal(stddev=0.15)))\nmodel.add(Dense(units=24*9, activation='sigmoid', kernel_initializer=TruncatedNormal(stddev=0.15)))\nmodel.add(Dropout(0.9))\nmodel.add(Dense(units=2, activation='softmax', kernel_initializer=TruncatedNormal(stddev=0.15)))\nmodel.compile(loss='categorical_crossentropy',\n optimizer=Adam(0.01),\n metrics=['accuracy'])\nmodel.fit(X_train,\n to_categorical(Y_train),\n epochs=200,\n batch_size=2000,\n validation_split=0,\n verbose=False)\n\n\ndef accuracy(X, Y):\n Y_pred = model.predict(X)\n Y_pred_round = [round(value[1]) for value in Y_pred]\n return accuracy_score(Y, Y_pred_round)\n\n\nout_of_sample = accuracy(X_test, Y_test)\nin_sample = accuracy(X_train, Y_train)\nprint('Accuracy in sample: {:04.2f}%'.format(in_sample*100))\nprint('Accuracy out of sample: {:04.2f}%'.format(out_of_sample*100))\n\nif parse.validation_set is not None:\n validate = accuracy(X_validate, Y_validate)\n print('Accuracy for validation: {:04.2f}%'.format(validate*100))\n\n\nif parse.save is True:\n test_prediction = model.predict(X_test)\n test_outcome = column_stack((test_prediction[:, 1], Y_test))\n savetxt('./predictions/tf_test_outcome.csv', test_outcome, delimiter=',')\n if parse.validation_set is not None:\n validation_prediction = model.predict(X_validate)\n validation_outcome = column_stack((validation_prediction[:, 0], Y_validate))\n savetxt('./predictions/tf_validation_outcome.csv', validation_outcome, delimiter=',')\n","sub_path":"tf_machine_learning.py","file_name":"tf_machine_learning.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"186186136","text":"import requests\nimport json\n\n\ndef getMainPharmaciesList():\n result = requests.get('https://wydfdauvw5.execute-api.sa-east-1.amazonaws.com/desafio/farmacias')\n return json.loads(result.content)\n\ndef getFullPharmaciesList():\n primaryList = getMainPharmaciesList()\n fullFarmacyList = []\n for pharmacy in primaryList['data']:\n url = pharmacy['links']['self']\n resultPharmacy = requests.get(url)\n fullFarmacyList.append(json.loads(resultPharmacy.content))\n return fullFarmacyList\n\n#obj = getFullPharmaciesList()\n#print(str(obj))\n","sub_path":"pharmacy-v1-back/apiproviders/getPharmacies.py","file_name":"getPharmacies.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"15849386","text":"#!/usr/bin/env python\n\ndebug = 0\nfrom Load_SOB import *\noutputDir = '/data2/users/jabeen/DATA_2/SB-All'\nresultsdir = '/data2/users/jabeen/DATA_2/SB-All'\nparser = ArgumentParser()\nparser.add_argument('--baseDirMuG', default=None, dest='baseDirMuG', required=False, help='Path to muon base directory')\nstitparser.add_argument('--baseDirElG', default=None, dest='baseDirElG', required=False, help='Path to electron base directory')\nparser.add_argument('--outputDir', default=None, dest='outputDir', required=False, help='Output directory to write histograms')\nparser.add_argument('--data', default=False, dest='data', required=False, help='Use data or MC')\nparser.add_argument('--batch', default=None, dest='batch', required=False, help='Supress X11 output')\n\noptions = parser.parse_args()\n\n_TREENAME = 'UMDNTuple/EventTree'\n_FILENAME = 'tree.root'\n_XSFILE = 'cross_sections/photon17.py'\n_LUMI = 36000\n#_BASEPATH = '/home/jkunkle/usercode/Plotting/LimitSetting/'\n_SAMPCONF = 'Modules/Resonance2017.py'\n#_SAMPCONF = 'Modules/Resonance.py'\n\n\n\nif options.batch:\n ROOT.gROOT.SetBatch(True)\nif options.outputDir is not None :\n if not os.path.isdir( options.outputDir ) :\n os.makedirs( options.outputDir )\n\nROOT.gStyle.SetOptStat(0)\nROOT.gStyle.SetOptFit(1)\n\n\n#ROOT.gROOT.SetBatch(True)\n\n# if no option is given, here are the default directories to read\n#if options.baseDirMuG is None: options.baseDirMuG = \"/data2/users/kakw/Resonances2017/LepGamma_mug_2019_09_15/\"\nif options.baseDirMuG is None: options.baseDirMuG = \"/data2/users/kakw/Resonances2017/LepGamma_mug_2019_10_28/\"\n#if options.baseDirElG is None: options.baseDirElG = \"/data2/users/kakw/Resonances2017/LepGamma_elg_2019_09_15/\"\nif options.baseDirElG is None: options.baseDirElG = \"/data2/users/kakw/Resonances2017/LepGamma_elg_2019_10_28/\"\n#options.baseDirElG = \"/data/users/friccita/WGammaNtuple/LepGamma_elg_2019_04_11/\"\n#=========Provide the string for base cuts to compare the sigbificanse against in the final SOB plots\n#sigstr_BCuts = [\"selbase_el_gtmet25_phpt80_elpt40_elidTight_phidTight_invZ10\", \"selbase_el_gtmet25_phpt80_elpt40_elidTight_phidTight_invZ10\", \"selbase_el_gtmet25_phpt80_elpt40_elidTight_phidTight_invZ10\"]\nsigstr_BCuts = \"selbase_el_gtmet25_phpt80_elpt40_elidTight_phidTight_invZ10\"\n \nsignal_name = [\"MadGraphResonanceMass200_width0p01\"\n ,\"MadGraphResonanceMass250_width5\"\n ,\"MadGraphResonanceMass300_width5\"\n ,\"MadGraphResonanceMass400_width5\"\n ,\"MadGraphResonanceMass450_width5\"\n ,\"MadGraphResonanceMass500_width5\"\n ,\"MadGraphResonanceMass700_width0p01\"\n ,\"MadGraphResonanceMass800_width0p01\"\n ,\"MadGraphResonanceMass800_width5\"\n ,\"MadGraphResonanceMass900_width0p01\"\n ,\"MadGraphResonanceMass900_width5\"\n ,\"MadGraphResonanceMass1000_width0p01\"\n ,\"MadGraphResonanceMass1200_width0p01\"\n ,\"MadGraphResonanceMass1400_width0p01\"\n ,\"MadGraphResonanceMass1400_width5\"\n ,\"MadGraphResonanceMass1800_width5\"\n ,\"MadGraphResonanceMass2000_width0p01\"\n ,\"MadGraphResonanceMass2000_width5\"\n ,\"MadGraphResonanceMass2200_width0p01\"\n ,\"MadGraphResonanceMass2200_width5\"\n ,\"MadGraphResonanceMass2600_width0p01\"\n ,\"MadGraphResonanceMass2800_width0p01\"\n ,\"MadGraphResonanceMass3500_width5\"\n ,\"MadGraphResonanceMass4000_width5\"]\n\n\n\n\n#sigstr = [\"MadGraphResonanceMass250_width5\", \"MadGraphResonanceMass1000_width0p01\", \"MadGraphResonanceMass2200_width0p01\"]\n\n\ndef main() :\n if options.outputDir: f1 = ROOT.TFile(\"%s/output.root\"%(options.outputDir),\"RECREATE\")\n\n #sampManMuG= SampleManager( options.baseDirMuG, _TREENAME, filename=_FILENAME, xsFile=_XSFILE, lumi=_LUMI )\n sampManElG= SampleManager( options.baseDirElG, _TREENAME, filename=_FILENAME, xsFile=_XSFILE, lumi=_LUMI )\n \n #sampManMuG.ReadSamples( _SAMPCONF )\n sampManElG.ReadSamples( _SAMPCONF )\n \n cut_phpt = [(phpt70,\"phpt70\"), (phpt80,\"phpt80\"), (phpt90,\"phpt90\"), (phpt100,\"phpt100\"), (phpt105,\"phpt105\"), (phpt110,\"phpt110\"), (phpt120,\"phpt120\"), (phpt130,\"phpt130\"), (phpt140,\"phpt140\"), (phpt150,\"phpt150\"), (phpt160,\"phpt160\"), (phpt170,\"phpt170\"), (phpt180,\"phpt180\"), (phpt190,\"phpt190\"), (phpt200,\"phpt200\"), (phpt210,\"phpt210\"), (phpt220,\"phpt220\"), (phpt230,\"phpt230\"), (phpt240,\"phpt240\"), (phpt250,\"phpt250\")]\n \n cut_elid = [(elidTight,\"elidTight\"), (elidMedium,\"elidMedium\"), (elidLoose,\"elidLoose\")]\n cut_phid = [(phidTight,\"phidTight\"), (phidMedium,\"phidMedium\"), (phidLoose,\"phidLoose\")]\n cut_z = [ (invZ10, \"invZ10\"), (invZ15, \"invZ15\"), (invZ20,\"invZ20\")]\n \n cut_met = [(gtmet25,\"gtmet25\"), (gtmet30,\"gtmet30\"), (gtmet40,\"gtmet40\"), (gtmet50,\"gtmet50\"), (gtmet60,\"gtmet60\"), (gtmet70,\"gtmet70\"), (gtmet80,\"gtmet80\"), (gtmet90,\"gtmet90\"), (gtmet100,\"gtmet100\"), (gtmet110,\"gtmet110\"), (gtmet120,\"gtmet120\"), (gtmet130,\"gtmet130\"), (gtmet140,\"gtmet140\"), (gtmet150,\"gtmet150\"), (gtmet160,\"gtmet160\"), (gtmet170,\"gtmet170\"), (gtmet180,\"gtmet180\"), (gtmet190,\"gtmet190\"), (gtmet200,\"gtmet200\"),(gtmet210,\"gtmet210\")]\n \n cut_elpt = [(elpt30,\"elpt30\"), (elpt40,\"elpt40\"), (elpt50,\"elpt50\"), (elpt60,\"elpt60\"), (elpt70,\"elpt70\"), (elpt80,\"elpt80\"), (elpt90,\"elpt90\"), (elpt100,\"elpt100\"), (elpt110,\"elpt110\"), (elpt120,\"elpt120\"), (elpt130,\"elpt130\"), (elpt140,\"elpt140\"), (elpt150,\"elpt150\"), (elpt160,\"elpt160\")]\n \n #el_pt0_selbase_el_gtmet30_phpt60_elpt160_elidLoose_phidLoose_invZ20_.pdf.log\n\n #Uncomment these instead for debugging selbase_el_gtmet25_phpt80_elpt40_elidTight_phidTight_invZ10\n #cut_met = [(gtmet30,\"gtmet30\")]\n #cut_elpt = [(elpt160,\"elpt160\")]\n #cut_phpt = [(phpt60,\"phpt60\")]\n #cut_elid = [(elidLoose,\"elidLoose\")]\n #cut_phid = [(phidLoose,\"phidLoose\")]\n #cut_z = [ (invZ20, \"invZ20\")]\n \n selarray = [[(selbase_el,\"selbase_el\"),], cut_met, cut_phpt, cut_elpt, cut_elid, cut_phid, cut_z]\n #variables to be plotted\n vararray = [ #(\"el_n\", (10,0,10), \"num of electrons\"), ## variable name, x axis range, x axis label\n (\"el_pt[0]\", (50,0,500), \"p_{T}(e, leading)\"),\n (\"el_eta[0]\", (10,-5.0, 5.0), \"#eta (e, leading)\"),\n #(\"ph_n\", (10,0,10), \"num of photons\"), \n (\"ph_pt[0]\", (50,0,500), \"p_{T}(#gamma, leading)\"),\n (\"ph_eta[0]\", (10,-5.0,5.0), \"#eta (#gamma, leading)\"),\n (\"met_pt\", (50,0,500), \"MET\"),\n (\"met_phi\", (20,-pi,pi), \"MET #phi\")\n ] \n \n \n # legend_config = {'legendLoc':\"Double\",\"legendTranslateX\":0.3}\n hist_config = {\"logy\":1,\"blind\":True, \"weight\": \"PUWeight*NLOWeight\"}\n \n # ========steps for finding optimal cuts ================ \n\n#STEP 1 - apply cuts and save final yields for all background and signal samples\n getyields = 0\n#STEP 2 get numbers from above saved log files, make tables and calculate significanse. Finally make SOB plots for every signal sample \n makesob = 1\n\n#STEP 3 get the cut strings from tables saved in the above step and plot variables corresponding to those cuts\n drawvars = 0\n\n\n if (getyields):\n vararray = [ (\"el_pt[0]\", (50,0,200), \"p_{T}(e, leading)\")] \n \n makeplots(0, sampManElG,vararray, resultsdir, selarray, hist_config, {}, \"\")\n # first 0 means dont save the plots as we just need logfiles\n #legend_config = {'legendLoc':\"Double\",\"legendTranslateX\":0.3}\n #hist_config = {\"blind\":True, \"weight\": \"PUWeight*NLOWeight\"}\n #makeplots(vararray, selarray, hist_config, legend_config) \n \n if(makesob):\n vararray = [ (\"el_pt[0]\", (50,0,200), \"p_{T}(e, leading)\")] \n \n for j in range(len(signal_name)):\n makesob_plots(0, sigstr_BCuts, sampManElG,resultsdir,vararray, signal_name[j], selarray, hist_config,{}, \"\" )# first param 0 to use already existing table rather than running reading from all the log files again\n\n n = 0\n if(drawvars):\n \n for j in range(len(signal_name)):\n if (n == 0):\n legend_config = {'legendLoc':\"Double\",\"legendTranslateX\":0.3}\n else:\n legend_config = {'legendLoc':\"Double\",\"legendTranslateX\":0.95}\n n = n +1\n hist_config = {\"logy\":1,\"blind\":True, \"weight\": \"PUWeight*NLOWeight\"}\n \n make_selected_plots(sigstr_BCuts,sampManElG,resultsdir, vararray, signal_name[j], selarray, hist_config, {}, \"log\")\n \n hist_config = {\"blind\":True, \"weight\": \"PUWeight*NLOWeight\"}\n \n \n make_selected_plots(sigstr_BCuts,sampManElG,resultsdir, vararray, signal_name[j], selarray, hist_config, {}, \"\")\n \n\n\n\n\n if options.outputDir:\n ## write and close root file\n f1.Write()\n f1.Close()\n\n \n\nmain()\n","sub_path":"run_MakeVarDist_and_SB.py","file_name":"run_MakeVarDist_and_SB.py","file_ext":"py","file_size_in_byte":9166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"144840234","text":"from tkinter import filedialog, messagebox, Listbox\r\nimport tkinter as tk\r\nfrom dictionary import *\r\nfrom openpyxl import *\r\nimport atexit\r\n\r\nwindow = tk.Tk()\r\nwindow.title('WAWA Scanner')\r\nwindow.geometry('300x250')\r\n\r\npicked = tk.StringVar()\r\npicked.set('----')\r\nproduct_num = []\r\n\r\ndef find_file():\r\n global workbook\r\n global wb\r\n global file_name\r\n global file_path\r\n file = filedialog.askopenfilename(filetypes =[(\"Excel files\", \".xlsx .xls .xlsm\")])\r\n file_name = file.split('/')[-1]\r\n file_path = file[0:-len(file_name)]\r\n print(file_path)\r\n wb = load_workbook(file,data_only=True,keep_vba=True)\r\n workbook = wb.active\r\n print('file done')\r\n if file_name == '':\r\n print('Try Again')\r\n else:\r\n # Show file selected\r\n show_file = tk.Label(window, text=file_name[0:30]+'...')\r\n show_file.grid(column=4,row=2,columnspan=3,sticky= tk.W+tk.E)\r\n if user_select.get() == 1:\r\n options = tk.OptionMenu(window, picked, *choices)\r\n options.grid(column=6, row=3)\r\n\r\n\r\n scan1.focus_set()\r\n return workbook\r\n\r\ndef run_program_remodel():\r\n if len(scan1.get()) != 0:\r\n find_pn_remodel()\r\n print('Start Check')\r\n pn_check(workbook)\r\n print('Finished')\r\n\r\ndef run_program_new():\r\n global product_num\r\n if len(scan1.get()) != 0:\r\n for item in rti_new_product.items():\r\n if item[0] == scan1.get():\r\n product_num = item[1]\r\n print(product_num)\r\n pn_check(workbook)\r\n\r\n\r\n\r\n\r\ndef find_pn_remodel():\r\n global product_num\r\n print(scan1.get())\r\n for item in rti_remodel_product.items():\r\n if item[0] == scan1.get():\r\n product_num = item[1]\r\n print('found')\r\n print(product_num)\r\n return product_num\r\n\r\ndef pn_check(workbook):\r\n if len(product_num)==0:\r\n tk.messagebox.showinfo(\"Can't Find\",\"I Can't find this product number. Double-check that it is correct\")\r\n\r\n else:\r\n for row in workbook.iter_rows():\r\n if row[0].value in exclude:\r\n print('206/9 Exclude')\r\n continue\r\n for cell in row:\r\n if cell.value == product_num:\r\n print(str(cell.row))\r\n if workbook[cell.row][3].value is None and workbook[cell.row][4].value is None:\r\n ok = tk.messagebox.askokcancel(\"Item Added\", \"Add Item as: \"+ str(workbook[cell.row][0].value))\r\n if not ok:\r\n continue\r\n workbook[cell.row][3].value = scan2.get()\r\n workbook[cell.row][4].value = scan3.get()\r\n print(workbook[cell.row][3].value)\r\n print('Check finished ')\r\n break\r\n else:\r\n continue\r\n break\r\n else:\r\n print('No More Nones')\r\n tk.messagebox.showinfo(\"Can't Add\", \"I can't find a empty slot for this item. \")\r\n\r\nuser_select = tk.IntVar()\r\nuser_select.set(0)\r\ndef selection():\r\n if user_select.get() ==1:\r\n print('New Store')\r\n if scan1.get() == '':\r\n pn_lookup()\r\n run_program_new()\r\n elif user_select.get() ==2:\r\n print('Remodel')\r\n run_program_remodel()\r\n else:\r\n print('Failed at selection')\r\n\r\ndef pn_lookup():\r\n for item in choices:\r\n if item == picked.get():\r\n scan1.insert(0,item)\r\n print('PN_Found')\r\n\r\ndef continue_selection_same():\r\n scan2.delete(first=0,last=100)\r\n scan3.delete(first=0,last=100)\r\n print('cleared')\r\n scan2.focus_set()\r\n\r\ndef continue_selection_diff():\r\n scan1.delete(first=0,last=100)\r\n scan2.delete(first=0,last=100)\r\n scan3.delete(first=0,last=100)\r\n print('cleared')\r\n scan1.focus_set()\r\n\r\ndef finish_program():\r\n print(file_name)\r\n wb.save(filename= file_path+'Filled_' + file_name)\r\n print('Saved')\r\n window.destroy()\r\n\r\ndef exit_prompt():\r\n comfirm =messagebox.askyesno('Save File?','Do you want to save the file?')\r\n if comfirm:\r\n finish_program()\r\n else:\r\n print('Closed')\r\n\r\n#Add selection for Remodel\r\nRbtn = tk.Radiobutton(window,text='New Store',indicatoron=0 , variable=user_select, value=1,width=10)\r\nRbtn.grid(column=4,row=0)\r\n\r\n#Add selection for New\r\nRbtn = tk.Radiobutton(window,text='Remodel',indicatoron=0, variable=user_select, value=2,width=10)\r\nRbtn.grid(column=6,row=0)\r\n\r\n# Add file button\r\nbtn = tk.Button(window, text='Add File', command=find_file)\r\nbtn.grid(column=5, row=1)\r\n\r\n#Text to indicate file browsing\r\nlbl1 = tk.Label(window,text='Product Number')\r\nlbl1.grid(column=4,row=3)\r\n\r\n#Entry field for product number\r\nscan1 = tk.Entry(window, width=15)\r\nscan1.grid(column=5,row=3)\r\n\r\n#Entry field for asset tags\r\nscan2 = tk.Entry(window, width=15)\r\nscan2.grid(column=5,row=4)\r\n#Text to indicate asset tag field\r\nlbl2 = tk.Label(window,text='Asset Tag')\r\nlbl2.grid(column=4,row=4)\r\n\r\n#Entry field for serial number\r\nscan3 = tk.Entry(window, width=15)\r\nscan3.grid(column=5,row=5)\r\n#Text to indicate Serial Number Field\r\nlbl3 = tk.Label(window,text='Serial Number')\r\nlbl3.grid(column=4,row=5)\r\n\r\n#product number submit button\r\nbtn1 = tk.Button(window,text='Add',command =selection,padx=40)\r\nbtn1.grid(column=5,row=6,pady=5)\r\n\r\nspace = tk.Label(window,text=' ')\r\nspace.grid(column=5,row=7)\r\n\r\n#Button to continue adding additional of the same\r\ncont_btn_same = tk.Button(window,text='Add Additional',command= continue_selection_same)\r\ncont_btn_same.grid(column=4,row=8)\r\n\r\n#Button to continue adding of other products\r\ncont_btn_diff = tk.Button(window,text='Add Other',command= continue_selection_diff)\r\ncont_btn_diff.grid(column=5,row=8)\r\n\r\n#Button to quit adding and finish\r\nfin_btn = tk.Button(window,text='Finish',command=finish_program)\r\nfin_btn.grid(column=6,row=8)\r\n\r\n\r\n#atexit.register(exit_prompt)\r\n\r\n\r\nwindow.mainloop()","sub_path":"GUI_3.py","file_name":"GUI_3.py","file_ext":"py","file_size_in_byte":6018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"123829501","text":"#\n# Wrappers on some IDA Python functions to help using them :)\n#\n# It has been heavily tested on x86/x86_64 but could possibly be modified to\n# work on other architectures.\n#\n\nfrom idc import *\nfrom idautils import *\nimport idaapi\nimport sark\nimport binascii\nimport sys\nimport ida_segment\nimport idautils\nimport idaapi\nimport ida_name\n\ndef logmsg(s, end=None):\n if type(s) == str:\n if end != None:\n print(\"[ida_helper] \" + s, end=end)\n else:\n print(\"[ida_helper] \" + s)\n else:\n print(s)\n\n# Attempt to have globals we can use in all other functions without having to\n# worry about architecture :)\ninfo = idaapi.get_inf_structure()\nif info.is_64bit():\n ERROR_MINUS_1 = 0xffffffffffffffff\n SIZE_POINTER = 8\n ARCHITECTURE = 64\n Pword = get_qword\nelse:\n ERROR_MINUS_1 = 0xffffffff\n SIZE_POINTER = 4\n ARCHITECTURE = 32\n Pword = get_wide_dword\n\n# Gives us the xrefs jumping/calling an address\ndef get_xrefs(ea = get_screen_ea()):\n res = []\n for e in XrefsTo(ea):\n #logmsg(\"0x%x -> 0x%x\" % (e.frm, e.to))\n res.append(e.frm)\n return res\n\n# Gives the current function's name an address is part of\ndef get_function_name(ea = get_screen_ea()):\n func = idaapi.get_func(ea)\n funcname = get_func_name(func.start_ea)\n #logmsg(\"%X is in %s\" % (ea, funcname))\n return funcname\n\n# Gives the current function's address an address is part of\ndef get_function_addr(ea = get_screen_ea()):\n func = idaapi.get_func(ea)\n if not func:\n logmsg(\"Error: get_function_addr: Failed to find function start for 0x%x\" % ea)\n return None\n return func.start_ea\n\n# Renames an address with a name (and append a digit at the end if already\n# exists)\ndef rename_function(e, funcname):\n currname = funcname\n count = 1\n if e == None:\n logmsg(\"Error: can't rename Nonetype to %s\" % funcname)\n return False\n while not set_name(e, currname, SN_CHECK):\n currname = \"%s_%d\" % (funcname, count)\n count += 1\n if count > 100:\n logmsg(\"Error: rename_function looped too much for 0x%d -> %s\" % (e, funcname))\n return False\n return True\n\n# Remove name for a function (most likely to have sub_XXXXXXXX back after that)\ndef unname_address(e):\n if not set_name(e, \"\", SN_CHECK):\n logmsg(\"Error: unname_address: could not remove name for element\")\n return False\n return True\nunname_function = unname_address\n\n# Retrieve a list with all the idbs' segments' names\ndef get_segments():\n seg_names = []\n for seg in idautils.Segments():\n st = ida_segment.getseg(seg)\n seg_names.append(idaapi.get_segm_name(st))\n return seg_names\n\n# Note this must match the list of segments in the current file\ndefault_seg_names = [\".init\", \".plt\", \".text\", \".fini\", \".rodata\", \".eh_frame_hdr\",\n \"eh_frame\", \".gcc_except_table\", \".tdata\", \".ctors\", \".dtors\",\n \".jcr\", \".got\", \".got.plt\", \".data\", \"freq_data_section\",\n \".bss\", \"extern\", \"abs\", \".rdata\"]\n# For each segment name, save start address, end address in a dictionary\n# This can be used to know if a pointer in one segment is part of another\n# segment\ndef get_segments_info(seg_names=default_seg_names):\n res = {}\n for name in seg_names:\n seg = idaapi.get_segm_by_name(name)\n if not seg:\n continue\n res[name] = {}\n res[name]['start_ea'] = seg.start_ea\n for n in range(idaapi.get_segm_qty()):\n seg = idaapi.getnseg(n)\n for name,d in res.items():\n if d['start_ea'] == seg.start_ea:\n res[name]['ID'] = seg.name # this is an ID, not a name, kthx IDA :(\n res[name]['end_ea'] = seg.end_ea\n return res\n\n# Checks if an address is part of a given segment\n# seg_info = get_segments_info() is passed to this function\ndef addr_is_in_one_segment(addr, seg_info):\n for name, d in seg_info.items():\n if addr <= seg_info[name][\"end_ea\"] and addr >= seg_info[name][\"start_ea\"]:\n return True\n return False\n\ndef name_to_rva(s):\n addr = get_name_ea_simple(s)\n if addr == ERROR_MINUS_1:\n logmsg(\"Error: name_to_rva: Failed to find '%s' symbol\" % s)\n return None\n logmsg(\"image base 0x%x\" % idaapi.get_imagebase())\n return addr - idaapi.get_imagebase()\n\n# Returns the address of any name: function, label, global, etc.\ndef name_to_addr(s):\n addr = get_name_ea_simple(s)\n if addr == ERROR_MINUS_1:\n logmsg(\"Error: name_to_addr: Failed to find '%s' symbol\" % s)\n return None\n return addr\n\ndef addr_to_name(ea):\n name = get_name(ea, ida_name.GN_VISIBLE)\n if name == \"\":\n logmsg(\"Error: addr_to_name: Failed to find '0x%x' address\" % ea)\n return \"\"\n return name\n\n# Gives the first Xref\ndef first_xref(addr):\n for e in XrefsTo(addr):\n addr = e.frm\n return addr\n logmsg(\"Error: first_xref: Failed to find xref for 0x%x\" % addr)\n return None\n\n# Gives the first Xref of first Xref to an address\ndef first_xref_of_first_xref(addr):\n for e in XrefsTo(addr):\n addr = e.frm\n for e in XrefsTo(addr):\n addr = e.frm\n return addr\n logmsg(\"Error: first_xref_of_first_xref: Failed to find xref for 0x%x\" % addr)\n return None\n\n# Gives the second Xref\ndef second_xref(addr):\n i = 1\n for e in XrefsTo(addr):\n frm = e.frm\n if i == 2:\n return frm\n i += 1\n logmsg(\"Error: second_xref: Failed to find xref for 0x%x\" % addr)\n return None\n\n# Gives the third Xref\ndef third_xref(addr):\n i = 1\n for e in XrefsTo(addr):\n frm = e.frm\n if i == 3:\n return frm\n i += 1\n logmsg(\"Error: third_xref: Failed to find xref for 0x%x\" % addr)\n return None\n\n# Gives the last Xref\ndef last_xref(addr):\n frm = None\n for e in XrefsTo(addr):\n frm = e.frm\n #print(\"0x%x\" % frm)\n if frm == None:\n logmsg(\"Error: last_xref: Failed to find xref for 0x%x\" % addr)\n return frm\n\n# Rename a function\ndef rename_address(e, funcname):\n if not set_name(e, funcname, SN_CHECK):\n logmsg(\"Error: rename_address: Impossible to rename 0x%x with %s\" % (e, funcname))\n return None\n return \"OK\"\n\n# Find a series of bytes\n# e.g. with byteStr = JMP_ESP = '\\xff\\xe4'\ndef find_gadget(byteStr):\n seg_info = get_segments_info()\n addr = seg_info[\".text\"][\"start_ea\"]\n while addr <= seg_info[\".text\"][\"end_ea\"]:\n b = get_bytes(addr, len(byteStr))\n if b == byteStr:\n #logmsg(\"Found candidate for gadget %s in .text at 0x%x\" % (binascii.hexlify(byteStr), addr))\n return addr\n addr += 1\n if addr > seg_info[\".data\"][\"end_ea\"]:\n logmsg(\"Error: Could not find gadget in .text\")\n return None\n\n# helper for get_call_arguments()-like for when we get a register instead of a useful\n# value as an argument, so we can retrieve what the register value is.\n# e.g.\n# .text:08380F8D mov eax, offset aAdmin_quick_ha ; \"admin_quick_handoff\"\n# .text:08380F92 mov [esp+20h], edi\n# .text:08380F96 mov [esp+1Ch], ecx\n# .text:08380F9A mov [esp+18h], edx\n# .text:08380F9E mov [esp+4], eax\n# .text:08380FA2 mov dword ptr [esp], offset aUnicorn_admi_0 ; \"unicorn_admin_server.c\"\n# .text:08380FA9 call unicorn_log_impl\n# assuming we are on instruction at 08380F9E, we want to resolve what eax is i.e. 0x0921BA08\n# .rodata:0921BA08 aAdmin_quick_ha db 'admin_quick_handoff',0\ndef get_register_value(e=get_screen_ea(), register=None, count_max=20):\n\n reg = print_operand(e, 1)\n if register != reg:\n logmsg(\"Error: bad register at 0x%x\" % e)\n return None\n\n arg_instructions = [\"mov %s\",\n \"movsxd %s\",\n \"lea %s\"]\n\n e = prev_head(e)\n count = 0\n while count <= count_max:\n disasm_line = GetDisasm(e)\n #logmsg(\"'%s'\" % disasm_line)\n for i in range(len(arg_instructions)):\n ins = arg_instructions[i] % register\n if ins in disasm_line:\n #logmsg(\"0x%x - Matches '%s'\" % (e, ins))\n # First arrive, first serve\n # We suppose that the instruction closest is the\n # one giving the register value.\n # If we encounter another instruction initializing\n # the register later, we ignore it\n # XXX: if a different register is used, it may give weird result\n # mov rax, cs:off_46141C0 -> accepted\n # movsxd rax, dword ptr [rax] -> rejected\n # mov [rdx+18h], rax\n if get_operand_type(e, 1) == o_mem:\n val = get_operand_value(e, 1)\n #logmsg(\"Found register value %s: 0x%x\" % (register, val))\n return val\n e = prev_head(e)\n count += 1\n #logmsg(\"Could not find register value\")\n return None\n\n# For a given address, check instructions above looking for potential arguments\n# and save this into a dictionary.\n# It only works on x86 architecture.\n# E.g.: this can be used on some logging functions where one of the argument\n# passed to the logging function contains the caller's function name\n# This allows renaming the caller's function automatically\ndef get_call_arguments_x86_1(e=get_screen_ea(), count_max=10):\n return get_structure_offsets(e=e, count_max=count_max, reg=\"esp\")\n\n# Works on both 32-bit and 64-bit\n# depending on the reg we provide (\"rdx\", \"edx\", etc.)\n#\n# It is generally useful when reg=\"esp\" but we also support parsing from\n# other registers in case a structure is filled\ndef get_structure_offsets(e=get_screen_ea(), count_max=10, reg=\"esp\"):\n args = {}\n\n # are we a call instruction?\n mnem = print_insn_mnem(e)\n if mnem != \"call\" and mnem != \"jmp\":\n logmsg(\"Error: not a call instruction at 0x%x\" % e)\n return None\n\n # we hardcode the instructions that we are looking for i.e. we don't look\n # for anything else that +4, +8, etc.\n # i.e we don't support yet case where the offset to esp is renamed by IDA\n\n # direct offset\n # e.g. \"mov dword ptr [esp], offset aUnicorn_admi_0\"\n arg_instructions = [\"mov dword ptr [%s]\" % reg,\n \"mov dword ptr [%s+4]\" % reg,\n \"mov dword ptr [%s+8]\" % reg,\n \"mov dword ptr [%s+0Ch]\" % reg,\n \"mov dword ptr [%s+10h]\" % reg,\n \"mov dword ptr [%s+14h]\" % reg,\n \"mov dword ptr [%s+18h]\" % reg,\n \"mov dword ptr [%s+1Ch]\" % reg]\n arg_instructions_2 = [\"mov qword ptr [%s]\" % reg,\n \"mov qword ptr [%s+4]\" % reg,\n \"mov qword ptr [%s+8]\" % reg,\n \"mov qword ptr [%s+0Ch]\" % reg,\n \"mov qword ptr [%s+10h]\" % reg,\n \"mov qword ptr [%s+14h]\" % reg,\n \"mov qword ptr [%s+18h]\" % reg,\n \"mov qword ptr [%s+1Ch]\" % reg]\n\n # register so will need an extra step to resolve...\n # e.g. \"mov [esp+4], eax\"\n arg_instructions_3 = [\"mov [%s]\" % reg,\n \"mov [%s+4]\" % reg,\n \"mov [%s+8]\" % reg,\n \"mov [%s+0Ch]\" % reg,\n \"mov [%s+10h]\" % reg,\n \"mov [%s+14h]\" % reg,\n \"mov [%s+18h]\" % reg,\n \"mov [%s+1Ch]\" % reg]\n\n # parse arguments, parsing instructions backwards\n e = prev_head(e)\n count = 0\n # we only supports 10 instructions backwards looking for arguments\n while count <= count_max:\n disasm_line = GetDisasm(e)\n #logmsg(\"'%s'\" % disasm_line)\n for i in range(len(arg_instructions)):\n if arg_instructions[i] in disasm_line:\n #logmsg(\"0x%x - Matches '%s'\" % (e, arg_instructions[i]))\n # First arrive, first serve\n # We suppose that the instruction closest to the call is the\n # one giving the argument.\n # If we encounter another instruction with mov [esp+offset]\n # later with the same offset, we ignore it\n if i not in args.keys():\n args[i] = get_operand_value(e,1)\n #logmsg(\"Found argument %d: 0x%x\" % (i, args[i]))\n for i in range(len(arg_instructions_2)):\n if arg_instructions_2[i] in disasm_line:\n #logmsg(\"Matches '%s'\" % arg_instructions_2[i])\n if i not in args.keys():\n args[i] = get_operand_value(e,1)\n #logmsg(\"Found argument %d: 0x%x (2)\" % (i, args[i]))\n for i in range(len(arg_instructions_3)):\n if arg_instructions_3[i] in disasm_line:\n #logmsg(\"Matches '%s'\" % arg_instructions_3[i])\n if i not in args.keys():\n register = print_operand(e, 1)\n #logmsg(\"Argument %d based on register %s...\" % (i, register))\n value = get_register_value(e, register)\n if value != None:\n args[i] = value\n #logmsg(\"Found argument %d: 0x%x (3)\" % (i, args[i]))\n e = prev_head(e)\n count += 1\n return args\n\n# see get_call_arguments_x86_1\ndef get_call_arguments_x86_3(e = get_screen_ea(), count_max = 5):\n args = {}\n\n # are we a call instruction?\n mnem = print_insn_mnem(e)\n if mnem != \"call\" and mnem != \"jmp\":\n logmsg(\"Error: not a call instruction at 0x%x\" % e)\n return None\n\n # Parse something like:\n # push offset aSshPacketSocke ; \"ssh_packet_socket_callback\"\n # push 2\n # push esi\n # call log\n args_tmp = []\n # parse arguments, parsing instructions backwards\n e = prev_head(e)\n count = 0\n # we only supports 10 instructions backwards looking for arguments\n while count <= count_max:\n disasm_line = GetDisasm(e)\n #logmsg(\"'%s'\" % disasm_line)\n # arguments are pushed in reverse order so we get the last arg first\n if \"push \" in disasm_line:\n args_tmp.append(get_operand_value(e,0))\n e = prev_head(e)\n count += 1\n for i in range(len(args_tmp)):\n args[i] = args_tmp[i]\n return args\n\n# Alternative to get_call_arguments_x86_1(). See get_call_arguments_x86_1() for more\n# information.\ndef get_call_arguments_x86_2(e = get_screen_ea(), count_max = 10):\n args = {}\n\n # are we a call instruction?\n mnem = print_insn_mnem(e)\n if mnem != \"call\" and mnem != \"jmp\":\n logmsg(\"Error: not a call instruction at 0x%x\" % e)\n return None\n\n # we hardcode the instructions that we are looking for i.e. we don't look\n # for anything else that +4, +8, etc.\n # i.e we don't support yet case where the offset to esp is renamed by IDA\n args_offsets = [0, 4, 8, 0xC, 0x10, 0x14]\n # parse arguments, parsing instructions backwards\n e = prev_head(e)\n count = 0\n # we only supports 10 instructions backwards looking for arguments\n while count <= count_max:\n disasm_line = GetDisasm(e)\n #logmsg(\"'%s'\" % disasm_line)\n if disasm_line.startswith(\"mov [esp\"):\n # o_phrase = 3 # Memory Ref [Base Reg + Index Reg] phrase\n if get_operand_type(e,0) == o_phrase:\n # unfortunately we can't test that there is no index register\n # so we ignore for now...\n if 0 not in args.keys():\n args[0] = get_operand_value(e,1)\n # o_displ = 4 # Memory Reg [Base Reg + Index Reg + Displacement] phrase+addr\n if get_operand_type(e,0) == o_displ:\n for i in range(len(args_offsets)):\n if i == 0:\n continue # handled by above case\n if get_operand_value(e,0) == args_offsets[i]:\n # First arrive, first serve\n # We suppose that the instruction closest to the call\n # is the one giving the argument.\n # If we encounter another instruction with mov [esp+offset]\n # later with the same offset, we ignore it\n if i not in args.keys():\n args[i] = get_operand_value(e,1)\n #logmsg(\"Found argument %d: 0x%x\" % (i, args[i]))\n e = prev_head(e)\n count += 1\n return args\n\ndef get_call_arguments_x64_linux(e = get_screen_ea(), count_max = 10, debug=False):\n return get_call_arguments_x64_generic(e=e, count_max=count_max, debug=debug, linux=True)\n\ndef get_call_arguments_x64_windows(e = get_screen_ea(), count_max = 10, debug=False):\n return get_call_arguments_x64_generic(e=e, count_max=count_max, debug=debug, linux=False)\n \n# Similar to get_call_arguments_x86_1() but for x86_64. See get_call_arguments_x86_1()\n# for more information.\ndef get_call_arguments_x64_generic(e = get_screen_ea(), count_max = 10, debug=False, linux=True):\n args = {}\n\n # are we a call instruction?\n mnem = print_insn_mnem(e)\n if mnem != \"call\" and mnem != \"jmp\":\n logmsg(\"Error: not a call instruction at 0x%x\" % e)\n return None\n\n # we only supports 6 arguments for Linux\n if linux:\n arg_instructions_x86 = [\"mov edi\",\n \"mov esi\",\n \"mov edx\",\n \"mov ecx\",\n \"mov r8d\",\n \"mov r9d\"]\n arg_instructions_x86_lea = [\"lea edi\",\n \"lea esi\",\n \"lea edx\",\n \"lea ecx\",\n \"lea r8d\",\n \"lea r9d\"]\n arg_instructions_x64 = [\"mov rdi\",\n \"mov rsi\",\n \"mov rdx\",\n \"mov rcx\",\n \"mov r8\",\n \"mov r9\"]\n arg_instructions_x64_lea = [\"lea rdi\",\n \"lea rsi\",\n \"lea rdx\",\n \"lea rcx\",\n \"lea r8\",\n \"lea r9\"]\n # we only supports 4 arguments for Windows\n else:\n arg_instructions_x86 = [\"mov ecx\",\n \"mov edx\",\n \"mov r8d\",\n \"mov r9d\"]\n arg_instructions_x86_lea = [\"lea ecx\",\n \"lea edx\",\n \"lea r8d\",\n \"lea r9d\"]\n arg_instructions_x64 = [\"mov rcx\",\n \"mov rdx\",\n \"mov r8\",\n \"mov r9\"]\n arg_instructions_x64_lea = [\"lea rcx\",\n \"lea rdx\",\n \"lea r8\",\n \"lea r9\"]\n\n # parse arguments, parsing instructions backwards\n e = prev_head(e)\n count = 0\n # we only supports 10 instructions backwards looking for arguments\n while count <= count_max:\n disasm_line = GetDisasm(e)\n if debug:\n logmsg(\"Handling '%s'\" % disasm_line)\n for i in range(len(arg_instructions_x86)):\n #if debug:\n # logmsg(\"'%s'\" % arg_instructions_x86[i])\n instruction_list = [arg_instructions_x86[i],\n arg_instructions_x86_lea[i],\n arg_instructions_x64[i],\n arg_instructions_x64_lea[i]]\n if any(instruction in disasm_line for instruction in instruction_list):\n # First arrive, first serve\n # We suppose that the instruction closest to the call is the one giving the argument.\n # If we encounter another instruction with \"mov reg\" later with the same offset, we ignore it\n if i not in args.keys():\n args[i] = get_operand_value(e,1)\n if debug:\n logmsg(\"Found argument %d: 0x%x\" % (i, args[i]))\n e = prev_head(e)\n count += 1\n return args\n\n# Similar to get_call_arguments_x64_linux() but for ARM 32-bit. See get_call_arguments_x86_1()\n# for more information.\ndef get_call_arguments_arm(e=get_screen_ea(), count_max=10):\n args = {}\n\n # are we a BL instruction?\n mnem = print_insn_mnem(e)\n if mnem != \"BL\" and mnem != \"SVC\" and mnem != \"BLNE\" and mnem != \"BLHI\" and mnem != \"BLEQ\":\n logmsg(\"Error: not a BL or SVC or BLNE or BLHI or BLEQ instruction at 0x%x\" % e)\n return None\n\n # we only supports 4 arguments\n arg_instructions_arm_mov = [\"MOV R0,\",\n \"MOV R1,\",\n \"MOV R2,\",\n \"MOV R3,\"]\n arg_instructions_arm_adr = [\"ADR R0,\",\n \"ADR R1,\",\n \"ADR R2,\",\n \"ADR R3,\"]\n arg_instructions_arm_ldr = [\"LDR R0,\",\n \"LDR R1,\",\n \"LDR R2,\",\n \"LDR R3,\"]\n arg_instructions_arm_adr2 = [\"ADREQ R0,\",\n \"ADREQ R1,\",\n \"ADDEQ R2,\",\n \"ADREQ R3,\"]\n arg_instructions_arm_mov2 = [\"MOVEQ R0,\",\n \"MOVEQ R1,\",\n \"MOVEQ R2,\",\n \"MOVEQ R3,\"]\n arg_instructions_arm_adr3 = [\"ADRNE R0,\",\n \"ADRNE R1,\",\n \"ADDNE R2,\",\n \"ADRNE R3,\"]\n # parse arguments, parsing instructions backwards\n e = prev_head(e)\n count = 0\n # we only supports 10 instructions backwards looking for arguments\n while count <= count_max:\n disasm_line = GetDisasm(e)\n #logmsg(\"'%s'\" % disasm_line)\n for i in range(len(arg_instructions_arm_mov)):\n #logmsg(\"'%s'\" % arg_instructions_arm_mov[i])\n #logmsg(\"Testing index %d\" % i)\n # First arrive, first serve\n # We suppose that the instruction closest to the call is the one giving the argument.\n # If we encounter another instruction with \"MOV reg\" later with the same offset, we ignore it\n instruction_list = [arg_instructions_arm_mov[i],\n arg_instructions_arm_mov2[i],\n arg_instructions_arm_adr[i],\n arg_instructions_arm_adr[i],\n arg_instructions_arm_adr3[i]]\n if any(instruction in disasm_line for instruction in instruction_list):\n if i not in args.keys():\n args[i] = get_operand_value(e,1)\n #logmsg(\"Found argument %d: 0x%x\" % (i, args[i]))\n elif arg_instructions_arm_ldr[i] in disasm_line:\n if i not in args.keys():\n addr = get_operand_value(e,1)\n args[i] = get_wide_dword(addr)\n #logmsg(\"Found argument %d: 0x%x\" % (i, args[i]))\n e = prev_head(e)\n count += 1\n return args\n\ndef get_call_arguments_x86(e = get_screen_ea(), count_max = 10):\n args = get_call_arguments_x86_1(e, count_max)\n if not args:\n args = get_call_arguments_x86_2(e, count_max)\n if not args:\n args = get_call_arguments_x86_3(e, count_max)\n return args\n\n# Wrapper to have a generic method to get arguments for a function call\n# based on internal helpers.\ndef get_call_arguments(e=get_screen_ea(), count_max=10):\n if ARCHITECTURE == 32:\n args = get_call_arguments_x86(e, count_max)\n if not args:\n args = get_call_arguments_arm(e, count_max)\n else:\n # XXX - we could determine if it is an ELF vs PE and call the right one\n args = get_call_arguments_x64_linux(e, count_max)\n #args = get_call_arguments_x64_windows(e, count_max)\n return args\n\n# find all candidates matching a given binary data\n# bytes_str needs to have spaces between each byte\n# e.g. \"0x%x\" % find_binary(get_screen_ea(), 1, '0d c0 a0 e1')\ndef find_all(bytes_str):\n ret = []\n ea = idc.find_binary(0, 1, bytes_str)\n while ea != idc.BADADDR:\n #print(\"ea = 0x%x\" % ea)\n # If the opcode is found in a function, skip it\n if sark.Line(ea).is_code:\n #print(\"Existing function at 0x%x\" % ea)\n pass\n else:\n ret.append(ea)\n # In ARM every instruction is aligned to 4-bytes\n ea = idc.find_binary(ea + 4, 1, bytes_str)\n return ret\n\n# similar to rename_function_by_aString_being_used()\n# but instead of assuming knowing an IDA aString label, takes\n# a sequence of characters to look for in order to find the right\n# aString\n# Note: str can be null terminated or not, or have any byte value\ndef rename_function_by_ascii_string_being_used(str, funcName, prevFunc=None, nextFunc=None, xref_func=first_xref):\n\n h = binascii.hexlify(str)\n bytes_str = \" \".join([h[i:i+2] for i in range(0, len(h), 2)])\n matches = find_all(bytes_str)\n if len(matches) != 1:\n logmsg(\"ERROR: rename_function_by_ascii_string_being_used does not support multiple strings\")\n return False\n str_addr = matches[0]\n aString = get_name(str_addr, ida_name.GN_VISIBLE)\n if not aString:\n logmsg(\"ERROR: rename_function_by_ascii_string_being_used did not find any name for aString\")\n return False\n\n return rename_function_by_aString_being_used(aString, funcName, prevFunc=prevFunc, nextFunc=nextFunc, xref_func=xref_func)\n\n# Uses an IDA string label (aString) to find a function and rename it (funcName)\n# It uses Xrefs to this string label to locate one function and optionally\n# functions surrounding the located function to rename the function\ndef rename_function_by_aString_being_used(aString, funcName, prevFunc=None, nextFunc=None, xref_func=first_xref):\n global ERROR_MINUS_1\n if name_to_addr(funcName) != None:\n logmsg(\"%s already defined\" % funcName)\n return True\n\n addr_str = name_to_addr(aString)\n if addr_str == None:\n return False\n addr_str_used = xref_func(addr_str)\n if addr_str_used == None:\n return False\n funcaddr = get_function_addr(addr_str_used)\n if funcaddr == None:\n return False\n if prevFunc != None:\n for i in range(prevFunc):\n logmsg(\"Going to previous function of 0x%x\" % funcaddr)\n funcaddr = get_prev_func(funcaddr)\n if nextFunc != None:\n for i in range(nextFunc):\n logmsg(\"Going to next function of 0x%x\" % funcaddr)\n funcaddr = get_next_func(funcaddr)\n logmsg(\"%s = 0x%x\" % (funcName, funcaddr))\n res = rename_address(funcaddr, funcName)\n if res == None:\n return False\n return True\n\n# Same as rename_function_by_aString_being_used() but with the additional\n# capability to filter that the found function does not contain any references\n# to some other IDA string labels.\ndef rename_function_by_aString_being_used_with_filter(aString, funcName, prevFunc=None, nextFunc=None, filtered_aStrings=[], override_old_name=False):\n global ERROR_MINUS_1\n\n if override_old_name:\n funcaddr = name_to_addr(funcName)\n if funcaddr != None:\n logmsg(\"Removing old: %s at 0x%x\" % (funcName, funcaddr))\n unname_function(funcaddr)\n else:\n if name_to_addr(funcName) != None:\n logmsg(\"%s already defined\" % funcName)\n return True\n\n addr_str = name_to_addr(aString)\n if addr_str == None:\n return False\n for addr_str_used in get_xrefs(addr_str):\n if addr_str_used == None:\n continue\n funcaddr = get_function_addr(addr_str_used)\n if funcaddr == None:\n continue\n if prevFunc != None:\n for i in range(prevFunc):\n logmsg(\"Going to previous function of 0x%x\" % funcaddr)\n funcaddr = get_prev_func(funcaddr)\n if nextFunc != None:\n for i in range(nextFunc):\n logmsg(\"Going to next function of 0x%x\" % funcaddr)\n funcaddr = get_next_func(funcaddr)\n logmsg(\"Candidate function: 0x%x == %s ?\" % (funcaddr, funcName))\n # Checking now if any filtered referenced string in the candidate function\n bFilter = False\n for aFilteredStr in filtered_aStrings:\n addr_filt_str = name_to_addr(aFilteredStr)\n if addr_filt_str == None:\n continue\n addr_filt_str_used = first_xref(addr_filt_str)\n if addr_filt_str_used == None:\n continue\n funcaddr_filt = get_function_addr(addr_filt_str_used)\n if funcaddr_filt == None:\n continue\n if funcaddr_filt == funcaddr:\n logmsg(\"This is not the right function: 0x%x == %s\" % (funcaddr, aFilteredStr))\n bFilter = True\n break\n if not bFilter:\n break\n if bFilter:\n logmsg(\"Failed to find the right function\")\n return False\n\n logmsg(\"%s = 0x%x\" % (funcName, funcaddr))\n res = rename_address(funcaddr, funcName)\n if res == None:\n return False\n return True\n\n# ARM only atm\n# similar to rename_function_by_aString_surrounding_call()\n# but instead of assuming knowing an IDA aString label, takes\n# a sequence of characters to look for in order to find the right\n# aString\n# Note: str can be null terminated or not, or have any byte value\ndef rename_function_by_ascii_surrounding_call(str, funcName, xref_func=first_xref, count_max=10, filtered_funcs=[], count_filtered_funcs=0, head_func=prev_head):\n\n h = binascii.hexlify(str)\n bytes_str = \" \".join([h[i:i+2] for i in range(0, len(h), 2)])\n matches = find_all(bytes_str)\n if len(matches) != 1:\n logmsg(\"ERROR: rename_function_by_ascii_surrounding_call does not support multiple strings\")\n return False\n str_addr = matches[0]\n aString = get_name(str_addr, ida_name.GN_VISIBLE)\n if not aString:\n logmsg(\"ERROR: rename_function_by_ascii_surrounding_call did not find any name for aString\")\n return False\n\n return rename_function_by_aString_surrounding_call(aString, funcName, xref_func=xref_func, count_max=count_max, filtered_funcs=filtered_funcs, count_filtered_funcs=count_filtered_funcs, head_func=head_func)\n\n# ARM only atm\n# Uses an IDA string label (aString) to find a function and then list all instructions\n# backwards looking for ARM Branch With Link instruction \"BL\". And rename the function\n# part of the BL instruction.\ndef rename_function_by_aString_surrounding_call(aString, funcName, xref_func=first_xref, count_max=10, filtered_funcs=[], count_filtered_funcs=0, head_func=prev_head):\n global ERROR_MINUS_1\n if name_to_addr(funcName) != None:\n logmsg(\"%s already defined\" % funcName)\n return True\n\n if filtered_funcs and count_filtered_funcs > 0:\n logmsg(\"ERROR: Only one argument is supported\")\n return False\n\n # required functions to locate funcName\n for filtered_name in filtered_funcs:\n if name_to_addr(filtered_name) == None:\n logmsg(\"required function: %s missing, can't locate %s\" % (filtered_name, funcName))\n return False\n\n addr_str = name_to_addr(aString)\n if addr_str == None:\n return False\n addr_str_used = xref_func(addr_str)\n if addr_str_used == None:\n return False\n try:\n sark.Function(ea=addr_str_used)\n except sark.exceptions.SarkNoFunction:\n logmsg(\"No function at 0x%x when handling %s\" % (addr_str_used, aString))\n return False\n\n count = 0\n e = addr_str_used\n bFound = False\n while count <= count_max:\n e = head_func(e)\n line = sark.Line(e)\n #print(line)\n try:\n insn = line.insn\n except sark.exceptions.SarkNoInstruction:\n logmsg(\"data in the middle of instructions at 0x%x, not supported yet\" % e)\n return False\n if insn.mnem == \"BL\":\n if len(insn.operands) != 1:\n logmsg(\"Wrong number of operands for BL at 0x%x\" % e)\n return False\n curr_func_name = insn.operands[0].text\n # do we need to skip this \"BL\" or are we done?\n bFiltered = False\n if count_filtered_funcs > 0:\n logmsg(\"skipping filtered due to count: %d at 0x%x\" % (count_filtered_funcs, e))\n count_filtered_funcs -= 1\n bFiltered = True\n else:\n for filtered_name in filtered_funcs:\n if curr_func_name == filtered_name:\n logmsg(\"skipping filtered name: %s at 0x%x\" % (filtered_name, e))\n bFiltered = True\n break\n if bFiltered:\n count +=1\n continue\n func_addr = name_to_addr(curr_func_name)\n if func_addr == None:\n return False\n rename_address(func_addr, funcName)\n logmsg(\"%s = 0x%x\" % (funcName, func_addr))\n bFound = True\n break\n count += 1\n if not bFound:\n logmsg(\"ERROR: %s not found\" % funcName)\n return False\n return True\n\n# Starts from address (e) and goes backwards until it finds a pointer to another\n# segment, stopping after count_max instructions\n# seg_info = get_segments_info() is passed to this function\ndef find_first_pointer_backwards(e, seg_info, count_max=10):\n global SIZE_POINTER\n e -= SIZE_POINTER # we can't use prev_head() because we are not sure DWORDs are defined.\n # Otherwise it goes to a previous DWORD defined by IDA. That can be far away from us :(\n count = 0\n # we only supports 10 addresses backwards\n while count <= count_max:\n addr = get_wide_dword(e)\n #logmsg(\"%x\" % addr)\n if not addr_is_in_one_segment(addr, seg_info):\n break\n e -= SIZE_POINTER\n count += 1\n if count > count_max:\n logmsg(\"Error: find_first_pointer_backwards: failed to get the first pointer for: 0x%x\" % e)\n return False\n # we found a value not from a segment. The right values are the next one.\n e += SIZE_POINTER\n return e\n\n# Returns the number of instruction of a given function\ndef function_count_instructions(ea = get_screen_ea()):\n E = list(FuncItems(ea))\n return len(E)\n\n# It is indeed to find the basic block that returns from the function\n# though it would break if the function had multiple returns\ndef find_ret_block(addr):\n func = idaapi.get_func(addr)\n # Taken from ex_gdl_qflow_chart.py\n f = idaapi.FlowChart(func)\n for block in f:\n if idaapi.is_ret_block(block.type):\n return block\n return None\n\ndef get_bss_end(void):\n return idaapi.get_segm_by_name(\".bss\").end_ea\n\n# Return the current idb name (without the .idb extension)\ndef get_idb_name():\n idbpath = get_idb_path()\n idbname = os.path.basename(idbpath)\n if idbname.endswith(\".idb\"):\n return idbname[:-4]\n if idbname.endswith(\".i64\"):\n return idbname[:-4]\n return idbname\n\n# Old exported names, to be deprecated\nget_current_function = get_function_name\nMyGetFuncStartEA = get_function_addr\nuname_whatever = unname_address\nNameToRVA = name_to_rva\nMyLocByName = name_to_addr\nMyFirstXrefTo = first_xref\nMyFirstXrefOfFirstXrefTo = first_xref_of_first_xref\nMySecondXrefTo = second_xref\nMyThirdXrefTo = third_xref\nMyLastXrefTo = last_xref\nMyMakeName = rename_address\nget_call_arguments_1 = get_call_arguments_x86_1\nget_call_arguments_2 = get_call_arguments_x86_2\nget_call_arguments_3 = get_call_arguments_x86_3\nget_call_arguments_x64 = get_call_arguments_x64_generic\n\nlogmsg(\"loaded\")\n\nif __name__ == \"__main__\":\n args = get_call_arguments(e=get_screen_ea())\n print(args)\n","sub_path":"ida_helper.py","file_name":"ida_helper.py","file_ext":"py","file_size_in_byte":36858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"612531011","text":"import tornado.web\nimport tornado.ioloop\n\nclass IndexHandler(tornado.web.RequestHandler):\n\n def get(self):\n if self.get_argument('u',None) in ['kevin','alvin']:\n # self.set_cookie('name',self.get_argument('u')) #未加密cookie\n self.set_secure_cookie('user',self.get_argument('u')) #加密cookie\n else:\n self.write('请登录')\n\nclass ManagerHandler(tornado.web.RequestHandler):\n\n def get(self):\n # if self.get_cookie('name',None) in ['kevin','alvin']:\n # self.write('欢迎登陆' + self.get_cookie('name'))\n print(self.get_secure_cookie('user',None))\n if str(self.get_secure_cookie('user',None), encoding='utf-8') in ['kevin','alvin']:\n self.write('欢迎欢迎' + str(self.get_secure_cookie('user')))\n else:\n self.redirect('/index')\n\nsettings = {\n 'template_path': 'views',\n 'static_path': 'statics',\n 'cookie_secret': 'asdfasdfasdf',\n}\n\napplication = tornado.web.Application([\n (r\"/index\",IndexHandler),\n (r\"/manager\",ManagerHandler),\n], **settings)\n\nif __name__ == '__main__':\n application.listen(8888)\n tornado.ioloop.IOLoop.instance().start()","sub_path":"Py3Projecting/Pyscript/tornado_demo/mod06-session-验证码/app-cookie.py","file_name":"app-cookie.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"623767297","text":"#Student list input and display\n\n\nclass Student:\n\n def __init__(self, name, age, roll_no, cls):\n self.name = name\n self.age = age\n self.roll_no = roll_no\n self.cls = cls\n\n\nif __name__ == \"__main__\":\n\n st_list = []\n \n count = int(input(\"\\nHow details do you wish to enter? \"))\n\n for i in range(0, count):\n\n name = input(\"\\nEnter name: \")\n age = input(\"Enter age: \")\n roll_no = input(\"Enter roll no.: \")\n cls = input(\"Enter cls: \")\n\n st_list.append(Student(name, age, roll_no, cls))\n \n print(\"\\n\\nStudents list Output\\n\\n\")\n for obj in st_list:\n\n print(\"\\nEnter name: \", obj.name)\n print(\"Enter age: \", obj.age)\n print(\"Enter roll no.: \", obj.roll_no)\n print(\"Enter cls: \", obj.cls)\n print(\"\\n\\n\")\n \n","sub_path":"classes-objects/studList.py","file_name":"studList.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"589061063","text":"\"\"\"\nBuild management utilities\n\"\"\"\nfrom __future__ import absolute_import\n\nimport re\nimport os\nimport sys\nimport json\nimport shlex\nimport shutil\nimport logging\nimport tempfile\nimport platform\nimport subprocess\n\nfrom contextlib import contextmanager\n\nfrom . import log\n\nPY3 = sys.version_info[0] == 3\nSYSTEM = platform.system()\n\nif PY3:\n def _iter(it):\n return it.items()\n string_types = str\nelse:\n def _iter(it):\n return it.iteritems()\n string_types = (str, basestring)\n\n\ndef path_ancestor(path, count):\n if count <= 0:\n return path\n return path_ancestor(os.path.dirname(path), count - 1)\n\n\ndef run_(command_and_args, custom_env = {}, verbose = False, build_file = None):\n \"\"\"\n Execute the command provided\n :param command_and_args: list|str of the command and arguments we want to run\n :param custom_env: Environment values we want to utilize over our current environ\n :param verbose: Not used currently\n :param build_file: For building, this is a BuildFile instance that has additional\n environment augmentation\n :return: int exit code\n \"\"\"\n env = dict(os.environ, **custom_env)\n if build_file is not None:\n build_file.command_environment(env)\n os.environ.update(env)\n\n if not isinstance(command_and_args, (list, tuple)):\n full_command = shlex.split(command_and_args)\n else:\n full_command = list(command_and_args)\n\n for i, c in enumerate(full_command):\n if re.match(\"^\\\"[^\\\"]\\\"^\", c):\n # Strip away surrounding the quotes\n full_command[i] = c[1:-1]\n\n # Last second evaluation\n full_command = [os.path.expandvars(f) for f in full_command]\n logging.info(\"Running command: \" + \" \".join(full_command))\n\n if PY3:\n return subprocess.run(full_command).returncode\n else:\n return subprocess.check_call(full_command)\n\n\ndef local_path(package, version=None, base_only=False):\n \"\"\"\n Based on the package and the local version, build a\n path that we know will hold onto this version of the\n software suite\n :param pacakge: The package that we're localizing (str)\n :param version: The version of the pacakge (str)\n :param base_only: Should we only return the root of all apps\n :return: str\n \"\"\"\n if platform.system() == 'Windows':\n base = os.path.join(os.environ['APPDATA'], 'flux_launch', 'apps')\n elif platform.system() == 'Linux':\n base = os.path.join(\n os.path.expanduser('~'),\n '.local',\n 'share',\n 'flux_launch',\n 'apps'\n )\n\n if not os.path.isdir(base):\n os.makedirs(base)\n\n if base_only:\n return base\n\n if version is None:\n return os.path.join(base, package).replace('\\\\', '/')\n\n return os.path.join(base, package, version).replace('\\\\', '/')\n\n\ndef add_metaclass(metaclass):\n \"\"\"\n Taken from the six module. Python 2 and 3 compatible.\n \"\"\"\n def wrapper(cls):\n \"\"\"\n The actual wrapper. take the given class and return one that contains the proper metaclass.\n \"\"\"\n orig_vars = cls.__dict__.copy()\n orig_vars.pop('__dict__', None)\n orig_vars.pop('__weakref__', None)\n return metaclass(cls.__name__, cls.__bases__, orig_vars)\n return wrapper\n\n\ndef merge_dicts(dict1, dict2, combine_keys=None, ignore=None):\n '''\n Merge dictionaries recursively and pass back the result.\n If a conflict of types arrive, just get out with what\n we can.\n '''\n if combine_keys is None:\n combine_keys = {}\n if ignore is None:\n ignore = []\n def _merge_list_of_dicts(list1, list2, key):\n\n list1_values = [l[key] for l in list1]\n list2_values = [l[key] for l in list2]\n\n for v in set(list1_values).union(list2_values):\n if v in list2_values:\n # If the value is in the second list, we use that instead\n yield list2[list2_values.index(v)]\n else:\n yield list1[list1_values.index(v)]\n\n\n for k in set(dict1.keys()).union(dict2.keys()):\n if k in dict1 and k in dict2:\n if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):\n if k in ignore:\n yield (k, dict2[k])\n else:\n yield (k, dict(merge_dicts(dict1[k], dict2[k], combine_keys, ignore)))\n else:\n # If one of the values is not a dict, you can't continue merging it.\n # Value from second dict overrides one in first and we move on.\n\n # That is, unless, we've supplied combine keys. This is for list\n # concatinaion based on a given key.\n if k in combine_keys:\n if isinstance(dict1[k], list) and isinstance(dict2[k], list):\n yield (k, list(_merge_list_of_dicts(dict1[k], dict2[k], combine_keys[k])))\n else:\n yield (k, dict2[k])\n else:\n yield (k, dict2[k])\n elif k in dict1:\n yield (k, dict1[k])\n else:\n yield (k, dict2[k])\n\n\ndef levenshtein(s1, s2):\n \"\"\"\n Pythonic levenshtein math to quickly determine how many \"edits\" two strings are\n differently than one another.\n\n Code snippet by Halfdan Ingvarsson\n\n :param s1: String to compare\n :param s2: String to compare\n :return: int - number of edits required (higher number means more different)\n \"\"\"\n\n if len(s1) < len(s2):\n return levenshtein(s2, s1)\n\n # len(s1) >= len(s2)\n if len(s2) == 0:\n return len(s1)\n\n previous_row = range(len(s2) + 1)\n for i, c1 in enumerate(s1):\n current_row = [i + 1]\n for j, c2 in enumerate(s2):\n # j+1 instead of j since previous_row and current_row are one character longer\n # than s2\n insertions = previous_row[j + 1] + 1\n deletions = current_row[j] + 1\n substitutions = previous_row[j] + (c1 != c2)\n current_row.append(min(insertions, deletions, substitutions))\n previous_row = current_row\n \n return previous_row[-1]\n\n\ndef cli_name(arg, ignore_prefix = False):\n \"\"\"\n Convert an argument name into the cli equivalent\n \n Basically this just adds '--' to the front and converts '_' to '-'\n\n :param arg: str to convert\n :return: str\n \"\"\"\n prefix = '--' if (not arg.startswith('--') and not ignore_prefix) else ''\n return prefix + arg.replace('_', '-')\n\n\n@contextmanager\ndef cd(new_directory, cleanup=lambda: True):\n \"\"\"\n Quick context for working in our temp directory\n \"\"\"\n previous = os.getcwd()\n os.chdir(os.path.expanduser(new_directory))\n try:\n yield\n finally:\n os.chdir(previous)\n cleanup()\n\n\n@contextmanager\ndef temp_dir(change_dir=True):\n \"\"\"\n Quick temp directory that we move into to do our work\n :return: The new temp directory (we've also cd'd into it) \n \"\"\"\n dirpath = tempfile.mkdtemp()\n def _clean():\n shutil.rmtree(dirpath)\n if change_dir:\n with cd(dirpath, _clean):\n yield dirpath\n else:\n yield dirpath\n\n\ndef load_from_source(filepath, name=None):\n \"\"\"\n Obligatory Python 2/3 source file loading mechanism\n :param filepath: path to a loadable source file\n :param name: name to give this module (will pick one if None)\n :return: Python Module\n \"\"\"\n if name is None:\n name = os.path.basename(filepath).replace('.py', '')\n\n # In the event we have a repeat module name\n count = 0\n while name in sys.modules:\n name += str(count)\n count += 1\n\n if PY3:\n if sys.version_info[2] >= 5:\n import importlib.util\n spec = importlib.util.spec_from_file_location(name, filepath)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n else:\n from importlib.machinery import SourceFileLoader\n return SourceFileLoader(name, filepath).load_module()\n else:\n import imp\n return imp.load_source(name, filepath)\n\n\ndefault_build_yaml = \"\"\"\\\n#\n# The {package} build.yaml\n#\n\n# The name of the package\nname: {package}\n\n# The build procedure\nbuild:\n\n type: basic\n\"\"\"\n\ndef initialize_pacakge(args):\n \"\"\"\n Initialize the current directory with a build.yaml\n \"\"\"\n if os.path.exists('build.yaml'):\n logging.error('build.yaml already exists! Cannot initialize.')\n sys.exit(1)\n\n package_name = os.path.basename(os.getcwd())\n\n with open('build.yaml', 'w') as f:\n f.write(default_build_yaml.format(\n package = package_name\n ))\n\n logging.info('Initialized - created build.yaml! Welcome to fbuild.')\n\n\nclass SimpleRegistry(type):\n \"\"\"\n A metaclass that builds a registry automatically\n \"\"\"\n def __init__(cls, name, bases, dct):\n if not hasattr(cls, '_registry'):\n cls._registry = {} # Base Class\n else:\n cls._registry[cls.alias] = cls","sub_path":"src/common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"384025468","text":"#!/usr/bin/python\nimport numpy as nmp\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as grs\nimport matplotlib.animation as animation\nfrom mpl_toolkits.mplot3d import Axes3D\n\nclass Graph():\n\n def __init__(self, frame_list, lon0, lon2, epoch1):\n self.frame_list = frame_list\n self.lon0 = lon0\n self.lon2 = lon2\n self.epoch1 = epoch1\n self.fig, self.ax, self.txt = self._setup_fig()\n self.frame_cnt = len(self.frame_list)\n\n def _setup_fig(self):\n gs = grs.GridSpec(1, 2, width_ratios=[4,1])\n fig = plt.figure()\n fig.set_size_inches(14, 8, True)\n ax = fig.add_subplot(gs[0], projection='3d')\n ax2 = fig.add_subplot(gs[1])\n ax.xaxis.pane.fill = False\n ax.yaxis.pane.fill = False\n ax.zaxis.pane.fill = False\n #plt.axis('off')\n #plt.tight_layout(pad=0)\n plt.subplots_adjust(wspace=0.00001)\n return fig, ax, ax2\n\n def run(self):\n ani = animation.FuncAnimation(\n fig=self.fig,\n init_func=self._init_graph,\n func=self._update_graph,\n frames=self.frame_cnt,\n interval=200)\n\n plt.show()\n #ani.save('orb3.gif', writer='imagemagick', fps=5, dpi=128)\n\n def _init_graph(self):\n self.ax.set_xlabel('Latitude, $\\circ$E')\n self.ax.set_ylabel('Longitude, $\\circ$E')\n self.ax.set_zlabel('Altitude, km')\n self.ax.set_xlim3d(-15,15)\n self.ax.set_zlim3d(35600,36100)\n if self.lon2 > self.lon0:\n self.ax.set_ylim3d(self.lon0-2, self.lon2+2)\n else:\n self.ax.set_ylim3d(self.lon0+2, self.lon2-2)\n #self.txt.axis('off')\n self.txt.grid(False)\n self.txt.set_xticks([])\n self.txt.set_yticks([])\n\n def _update_graph(self, f):\n frame = self.frame_list[f]\n\n self.ax.clear()\n self.txt.clear()\n self._init_graph()\n\n self._update_sats(frame)\n self._update_text(frame)\n\n def _update_sats(self, frame):\n for n in range(len(frame.lon)):\n x, y, z, s, c, m, name = frame.get_params(n)\n\n center = (self.lon2+self.lon0)/2\n self.ax.scatter(x, y, z, s=s, c=c, marker=m)\n self.ax.text(x, y, z+10, '%s'%(name), size=5, zorder=1)\n\n def _update_text(self, frame):\n self.txt.text(0.0,0.92,'Epoch Start:', size=8)\n self.txt.text(0.1,0.90,'%s'%(self.epoch1), size=8)\n self.txt.text(0.0,0.87,'Mission Time:', size=8)\n self.txt.text(0.1,0.85,'%.2f hr'%(frame.mission_time), size=8)\n self.txt.text(0.0,0.82,'RSGS Longitude:', size=8)\n self.txt.text(0.1,0.8,'%.2f deg'%(frame.lon[-1]), size=8)\n self.txt.text(0.0,0.75,'Range to Object, km:', size=8)\n\n for n, r in enumerate(frame.range[0:10]):\n name = frame.name_sorted[n]\n color = frame.range_color[n]\n self.txt.text(0.1, 0.73-n*0.015,'%.2f'%(r), size=8, color=color)\n self.txt.text(0.4, 0.73-n*0.015,'%s'%(name), size=8, color=color)\n\nclass GraphFrame():\n\n def __init__(self, sat_list):\n self.sat_list = sat_list\n self.dt = self.sat_list[0].dt\n self.lat = []\n self.lon = []\n self.alt = []\n self.size = []\n self.name = []\n self.color = []\n self.range = []\n self.marker = []\n self.mission_time = 0\n\n def load_data(self, epoch):\n\n self.mission_time = epoch * self.dt / 3600 # days\n\n for sat in self.sat_list:\n self.name.append(sat.name[2:]) # remove leading zero\n self.lat.append(sat.lat[epoch])\n self.lon.append(sat.lon[epoch])\n self.alt.append(sat.alt[epoch])\n self.range.append(sat.range[epoch])\n\n if sat.type == 'RB':\n self.size.append(30)\n self.color.append('r')\n self.marker.append('^')\n elif sat.type == 'RSGS':\n self.size.append(60)\n self.color.append('b')\n self.marker.append('s')\n elif sat.type == 'target':\n #self.name[-1] = 'RSO-1'\n self.size.append(40)\n self.color.append('C2')\n self.marker.append('o')\n else:\n self.size.append(16)\n self.color.append('C0')\n self.marker.append('o')\n\n self.range, self.name_sorted = zip(*sorted(zip(self.range,self.name)))\n self.range = self.range[1:] # Remove RSGS from List\n self.name_sorted = self.name_sorted[1:]\n\n red_lim = 250\n yel_lim = 500\n self.range_color = ['C3' if r < red_lim else 'C4' if r < yel_lim else 'C0' for r in self.range]\n\n def get_params(self, n):\n return [self.lat[n],\n self.lon[n],\n self.alt[n],\n self.size[n],\n self.color[n],\n self.marker[n],\n self.name[n]]\n","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":5020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"555490225","text":"import sys\nimport torch\nimport torch_xla\nimport torch_xla.core.xla_model as xm\nimport torch_xla.distributed.xla_multiprocessing as xmp\n\n\ndef _mp_fn(index):\n device = xm.xla_device()\n if xm.xla_device_hw(device) != 'CPU':\n ordinal_tensor = torch.tensor([index], dtype=torch.float).to(device)\n result = xm.all_gather(ordinal_tensor)\n\n cpu_result = result.cpu()\n expected = torch.arange(0, xm.xrt_world_size(), dtype=torch.float)\n if not cpu_result.allclose(expected):\n print('xm.all_gather() produced wrong reductions', file=sys.stderr)\n print('[{}] {}'.format(index, cpu_result), file=sys.stderr)\n sys.exit(1)\n else:\n print(\n 'Default device {} does not support replication'.format(device),\n file=sys.stderr)\n\n\nif __name__ == '__main__':\n xmp.spawn(_mp_fn, args=())\n","sub_path":"test/test_mp_all_gather.py","file_name":"test_mp_all_gather.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"240317944","text":"from django.test import TestCase\nfrom django.test.client import Client\nfrom django.core.management import call_command\nfrom django.contrib.auth.models import User\n\nfrom create_dataset.utils import CreateDataset\nfrom public_interface.models import Genes\n\n\nclass CreateFASTADatasetTest(TestCase):\n def setUp(self):\n args = []\n opts = {'dumpfile': 'test_db_dump2.xml', 'verbosity': 0}\n cmd = 'migrate_db'\n call_command(cmd, *args, **opts)\n\n g1 = Genes.objects.get(gene_code='COI-begin')\n g2 = Genes.objects.get(gene_code='ef1a')\n self.cleaned_data = {\n 'gene_codes': [g1, g2],\n 'taxonset': None,\n 'voucher_codes': 'CP100-10\\r\\nCP100-11',\n 'geneset': None,\n 'taxon_names': ['CODE', 'GENUS', 'SPECIES'],\n 'number_genes': None,\n 'translations': False,\n 'degen_translations': 'normal',\n 'positions': ['ALL'],\n 'partition_by_positions': 'by gene',\n 'file_format': 'FASTA',\n 'aminoacids': True,\n 'outgroup': '',\n }\n\n self.user = User.objects.get(username='admin')\n self.user.set_password('pass')\n self.user.save()\n\n self.c = Client()\n self.dataset_creator = CreateDataset(self.cleaned_data)\n self.maxDiff = None\n\n def test_create_dataset_degenerated(self):\n self.c.post('/accounts/login/', {'username': 'admin', 'password': 'pass'})\n c = self.c.post('/create_dataset/results/',\n {\n 'voucher_codes': 'CP100-10',\n 'gene_codes': 3, # wingless\n 'geneset': '',\n 'taxonset': '',\n 'translations': True,\n 'introns': 'YES',\n 'file_format': 'FASTA',\n 'degen_translations': 'normal',\n 'exclude': 'YES',\n 'aminoacids': False,\n 'special': False,\n 'outgroup': '',\n 'positions': 'ALL',\n 'partition_by_positions': 'by gene',\n 'taxon_names': ['CODE', 'GENUS', 'SPECIES'],\n }\n )\n expected = 'TNGGNTTYATHGTNTGAGCNCAYCAYATHTTYACN'\n self.assertTrue(expected in str(c.content))\n\n def test_create_dataset_degenerated_warning_data_cannot_be_partitioned(self):\n self.c.post('/accounts/login/', {'username': 'admin', 'password': 'pass'})\n c = self.c.post('/create_dataset/results/',\n {\n 'voucher_codes': 'CP100-10',\n 'gene_codes': 4,\n 'geneset': '',\n 'taxonset': '',\n 'introns': 'YES',\n 'file_format': 'FASTA',\n 'translations': True,\n 'degen_translations': 'normal',\n 'exclude': 'YES',\n 'aminoacids': False,\n 'special': False,\n 'outgroup': '',\n 'positions': 'ALL',\n 'partition_by_positions': 'by codon position',\n 'taxon_names': ['CODE', 'GENUS', 'SPECIES'],\n }\n )\n expected = 'Cannot degenerate codons if they go to different partitions'\n self.assertTrue(expected in str(c.content))\n\n def test_create_dataset_degenerated_warning_data_cannot_be_of_partial_codons(self):\n self.c.post('/accounts/login/', {'username': 'admin', 'password': 'pass'})\n c = self.c.post('/create_dataset/results/',\n {\n 'voucher_codes': 'CP100-10',\n 'gene_codes': 4,\n 'geneset': '',\n 'taxonset': '',\n 'introns': 'YES',\n 'file_format': 'FASTA',\n 'translations': True,\n 'degen_translations': 'normal',\n 'exclude': 'YES',\n 'aminoacids': False,\n 'special': False,\n 'outgroup': '',\n 'positions': '1st',\n 'partition_by_positions': 'by gene',\n 'taxon_names': ['CODE', 'GENUS', 'SPECIES'],\n }\n )\n expected = 'Cannot degenerate codons if you have not selected all codon positions'\n self.assertTrue(expected in str(c.content))\n\n def test_fasta_as_aminoacids(self):\n self.c.post('/accounts/login/', {'username': 'admin', 'password': 'pass'})\n c = self.c.post('/create_dataset/results/',\n {\n 'voucher_codes': 'CP100-10',\n 'gene_codes': 3, # wingless\n 'geneset': '',\n 'taxonset': '',\n 'translations': True,\n 'introns': 'YES',\n 'file_format': 'FASTA',\n 'degen_translations': 'normal',\n 'exclude': 'YES',\n 'aminoacids': True,\n 'special': False,\n 'outgroup': '',\n 'positions': 'ALL',\n 'partition_by_positions': 'by gene',\n 'taxon_names': ['CODE', 'GENUS', 'SPECIES'],\n }\n )\n expected = 'IYAMLAIGLLGFIVWAHHM'\n self.assertTrue(expected in str(c.content))\n","sub_path":"voseq/create_dataset/tests/tests_create_fasta_dataset.py","file_name":"tests_create_fasta_dataset.py","file_ext":"py","file_size_in_byte":6043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"644972644","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n @Time : 2020/6/15 21:54\n @Auth : 可优\n @File : urls.py\n @IDE : PyCharm\n @Motto: ABC(Always Be Coding)\n @Email: keyou100@qq.com\n @Company: 湖南省零檬信息技术有限公司\n @Copyright: 柠檬班\n-------------------------------------------------\n\"\"\"\nfrom rest_framework.routers import SimpleRouter\n\nfrom . import views\n\n\n# 定义路由对象\nrouter = SimpleRouter()\nrouter.register(r'interfaces', views.InterfacesViewSet)\n\nurlpatterns = [\n\n]\nurlpatterns += router.urls\n","sub_path":"apps/interfaces/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"185692602","text":"#!/usr/bin/env python\nimport os,sys\nimport time\nimport psutil\n\ndef get_pcpu():\n pid = int(os.popen(\"cat /export/App/elasticsearch/jiesi-22/es_jiesi-22.pid\").read())\n pcpu = psutil.Process(pid).cpu_percent(interval=1)\n #realtime = time.strftime(\"%Y%m%d%H%M%S\", time.localtime(time.time()))\n #event = 'CPU persent: ' + pcpu\n #log(realtime, event)\n return pcpu\n\ndef es_dump():\n pid = str(os.popen(\"cat /export/App/elasticsearch/jiesi-22/es_jiesi-22.pid\").read())\n realtime = time.strftime(\"%Y%m%d%H%M%S\", time.localtime(time.time()))\n heapdump_command = '/export/servers/jdk1.8.0_60/bin/jmap -dump:live,format=b,file=/export/Data/elasticsearch/dump/jiesi-22/' + realtime + '.hd ' + pid\n threaddump_command = '/export/servers/jdk1.8.0_60/bin/jstack -l ' + pid + ' > /export/Data/elasticsearch/dump/jiesi-22/' + realtime + '.td'\n os.system('mkdir -p /export/Data/elasticsearch/dump/jiesi-22')\n os.system(heapdump_command)\n os.system(threaddump_command)\n event = 'heap dump & thread dump'\n log(realtime, event)\n\ndef log(realtime, event):\n log_file = file(\"/export/Data/elasticsearch/dump/jiesi-22/monitor.log\", \"a+\")\n log = str(realtime) + \"\\t\" + str(event) + \"\\n\"\n log_file.write(log)\n log_file.close()\n\nif __name__ == '__main__':\n count = 0\n while 1:\n pcpu = get_pcpu()\n if pcpu > 2500:\n realtime = time.strftime(\"%Y%m%d%H%M%S\", time.localtime(time.time()))\n event = 'CPU persent: ' + str(pcpu)\n log(realtime, event)\n count += 1\n if psutil.disk_usage('/export').percent > 95:\n break \n if count > 6:\n es_dump()\n count = 0\n time.sleep(10)\n","sub_path":"work/jd/linux/monitor_process.py","file_name":"monitor_process.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"651217379","text":"#-*-coding:GBK -*-\nimport smtplib, os\nimport sys\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom path_data import Path_data\n\n\nclass Test_mail:\n def __init__(self, title, path):\n self.msg = MIMEMultipart()\n self.user = 'zhouyang@droi.com'\n self.to_user = 'zhouyang@droi.com'\n self.msg[\"Subject\"] = title\n self.msg[\"From\"] = self.user\n self.msg[\"To\"] = self.to_user\n self.pas = 'zhou123...'\n\n # self.path=os.listdir(Path_data.get_path()+\"/test_data/test_log_report\")\n absolute_path = Path_data.get_path() + \"/test_data/test_log_report/%s.txt\" % path\n #part = MIMEApplication(open(absolute_path, 'rb').read())\n #part.add_header('Content-Disposition', 'attachment', filename=\"%s.txt\" % path)\n with open(absolute_path, 'r', encoding='utf8') as f:\n self.msg.attach(MIMEText(f.read()))\n #self.msg.attach(part)\n #Data_analysis.data_delete('test')\n def smtp_on(self):\n\n s = smtplib.SMTP('smtp.263.net', 465, timeout=30)\n s.login(self.user, self.pas)\n s.sendmail(self.user, ['zhouyang@droi.com','liuyang@droi.com','chengyu@droi.com','niuyuanman@droi.com'], self.msg.as_string())\n s.close()\n#,'liuyang@droi.com','chengyu@droi.com','niuyuanman@droi.com'\nif __name__ == '__main__':\n Test_mail('test','Air_Quality_Ranking').smtp_on()\n","sub_path":"aqi_weather_location_tests/tools/test_html.py","file_name":"test_html.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"522757175","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 21 10:12:49 2018\n\n@author: manu\n\"\"\"\nfrom django import forms\nfrom mptt.forms import TreeNodeMultipleChoiceField\nfrom .models import enonce_exo, solution_exo,niveau_exo, image_exo,image_exo_simplifie,image_sol,image_sol_simplifie\n\n\n#class ExoForm(forms.ModelForm):\n#\n# class Meta:\n# model = enonce_exo\n## fields = ('enonce_latex_initial','macros_initial','exo_niveau')\n# enonce_latex_initial=forms.CharField(widget=forms.Textarea)\n# macros_initial=forms.CharField(widget=forms.Textarea)\n# exo_niveau=forms.CheckboxInput()\n#\n\n\n\n#\n#class CustomCheckboxSelectMultiple(forms.CheckboxSelectMultiple):\n# allow_multiple_selected = True\n# input_type = 'checkbox'\n# template_name = 'django/forms/widgets/checkbox_select.html'\n# template_name = 'serveur_exos/checkbox_select.html'\n## option_template_name = 'django/forms/widgets/checkbox_option.html'\n#\n# def use_required_attribute(self, initial):\n# # Don't use the 'required' attribute because browser validation would\n# # require all checkboxes to be checked instead of at least one.\n# return False\n#\n# def value_omitted_from_data(self, data, files, name):\n# # HTML checkboxes don't appear in POST data if not checked, so it's\n# # never known if the value is actually omitted.\n# return False\n#\n# def id_for_label(self, id_, index=None):\n# \"\"\"\"\n# Don't include for=\"field_0\" in